From 80fea327633c39fc6746f6a1e10fc7c6ab222614 Mon Sep 17 00:00:00 2001 From: Conor Date: Sun, 29 Mar 2026 22:42:54 +0100 Subject: [PATCH 001/123] sketch --- src/core/promise.cxx | 33 ++++++++++++++++++++++++--------- 1 file changed, 24 insertions(+), 9 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 0b2deda0..07fe25ab 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -257,14 +257,18 @@ struct join_awaitable { constexpr auto await_ready(this join_awaitable self) noexcept -> bool { if (not_null(self.frame)->steals == 0) [[likely]] { - // If no steals then we are the only owner of the parent and we are ready - // to join. Therefore, no need to reset the control block. + // If no steals then we are the only owner of the parent and we are + // ready to join. Therefore, no need to reset the control block. + if (self.frame->is_cancelled()) [[unlikely]] { + // Must unconditionally suspended if cancelled + return false; + } return true; } // TODO: benchmark if including the below check (returning false here) in // multithreaded case helps performance enough to justify the extra - // instructions along the fast path + // instructions along the fast path and complexity // Currently: joins() = k_u16_max - num_joined // Hence: k_u16_max - joins() = num_joined @@ -281,22 +285,25 @@ struct join_awaitable { // We must reset the control block and take the stack. We should never // own the stack at this point because we must have stolen the stack. // For ruther explanation see await_suspend() below. - return self.take_stack_and_reset(), true; - } + self.take_stack_and_reset(); + if (self.frame->is_cancelled()) [[unlikely]] { + return false; + } + return true; + } return false; } constexpr auto await_suspend(this join_awaitable self, std::coroutine_handle<> task) noexcept -> coro<> { // Currently self.joins = k_u16_max - num_joined + // // We set joins = self->joins - (k_u16_max - num_steals) // = num_steals - num_joined - + // // Hence joined = k_u16_max - num_joined // k_u16_max - joined = num_joined - LF_ASSUME(self.frame); - // Lemma: // // If a thread is at a join and steals have occurred then the @@ -319,7 +326,14 @@ struct join_awaitable { // We must reset the control block and take the stack. We should never // own the stack at this point because we must have stolen the stack. - return self.take_stack_and_reset(), task; + self.take_stack_and_reset(); + + if (self.frame->is_cancelled()) [[unlikely]] { + // TODO: this needs to sink the exception + return std::noop_coroutine(); + } + + return task; } // Someone else is responsible for running this task. @@ -332,6 +346,7 @@ struct join_awaitable { // in a switch awaitable. In this case we can/must do another self-steal. // return try_self_stealing(); + return std::noop_coroutine(); } From 3acc32a9294c2a858e12dccbf98cfbe6820da883 Mon Sep 17 00:00:00 2001 From: Conor Date: Thu, 9 Apr 2026 22:52:00 +0100 Subject: [PATCH 002/123] spell --- src/core/promise.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 07fe25ab..d655509b 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -260,7 +260,7 @@ struct join_awaitable { // If no steals then we are the only owner of the parent and we are // ready to join. Therefore, no need to reset the control block. if (self.frame->is_cancelled()) [[unlikely]] { - // Must unconditionally suspended if cancelled + // Must unconditionally suspended if canceled return false; } return true; From 28a7d5c1d5aebb40c621f2b9b3221a40b2ba2724 Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 10 Apr 2026 09:33:11 +0100 Subject: [PATCH 003/123] await suspend --- src/core/promise.cxx | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index d655509b..358dfe05 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -225,6 +225,7 @@ struct awaitable : std::suspend_always { * @brief Pull an exception out of a frame and clean-up the union/allocation. */ template +[[nodiscard]] constexpr auto extract_exception(frame_type *frame) noexcept -> std::exception_ptr { LF_ASSUME(frame->exception_bit); // Should only be called if an exception was thrown. @@ -329,14 +330,12 @@ struct join_awaitable { self.take_stack_and_reset(); if (self.frame->is_cancelled()) [[unlikely]] { - // TODO: this needs to sink the exception - return std::noop_coroutine(); + return self.handle_cancel(); } - return task; } - // Someone else is responsible for running this task. + // We cannot touch *this or dereference self as someone may have resumed already! // We cannot currently own this stack (checking would violate above). @@ -350,6 +349,16 @@ struct join_awaitable { return std::noop_coroutine(); } + [[nodiscard]] + constexpr auto handle_cancel(this join_awaitable self) -> coro<> { + if constexpr (LF_COMPILER_EXCEPTIONS) { + if (self.frame->exception_bit) [[unlikely]] { + std::ignore = extract_exception(self.frame); + } + } + return final_suspend(self.frame); + }; + [[noreturn]] constexpr void rethrow_exception(this join_awaitable self) { std::rethrow_exception(extract_exception(self.frame)); From 73241e9db6c8cf9a963dad3e87b790243581c53b Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 10 Apr 2026 09:53:55 +0100 Subject: [PATCH 004/123] in final suspend --- src/core/promise.cxx | 180 +++++++++++++++++++++++-------------------- 1 file changed, 96 insertions(+), 84 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 358dfe05..a9dc96f3 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -31,104 +31,116 @@ using frame_t = frame_type>; template [[nodiscard]] LF_FORCE_INLINE constexpr auto final_suspend(frame_t *frame) noexcept -> coro<> { + for (;;) { + // Validate final state + LF_ASSUME(frame); + LF_ASSUME(frame->steals == 0); + LF_ASSUME(frame->joins == k_u16_max); + LF_ASSUME(frame->exception_bit == 0); - // Validate final state - LF_ASSUME(frame->steals == 0); - LF_ASSUME(frame->joins == k_u16_max); - LF_ASSUME(frame->exception_bit == 0); + // Local copies (before we destroy frame) + category const kind = frame->kind; - // Local copies (before we destroy frame) - category const kind = frame->kind; + frame_t *parent = not_null(frame->parent); - frame_t *parent = not_null(frame->parent); + // Before resuming the next (or exiting) we should clean-up the current frame. + // Can't use frame from this point onwards + frame->handle().destroy(); - // Before resuming the next (or exiting) we should clean-up the current frame. - // Can't use frame from this point onwards - frame->handle().destroy(); + if (kind == category::call) { + return parent->handle(); + } - if (kind == category::call) { - return parent->handle(); - } + LF_ASSUME(kind == category::fork); - LF_ASSUME(kind == category::fork); + Context &context = get_tls_context(); - Context &context = get_tls_context(); + if (steal_handle last_pushed = context.pop()) { + // No-one stole continuation, we are the exclusive owner of parent -> just keep ripping! + LF_ASSUME(last_pushed == steal_handle{key(), parent}); + // This is not a join point so no state (i.e. counters) is guaranteed. + return parent->handle(); + } - if (steal_handle last_pushed = context.pop()) { - // No-one stole continuation, we are the exclusive owner of parent -> just keep ripping! - LF_ASSUME(last_pushed == steal_handle{key(), parent}); - // This is not a join point so no state (i.e. counters) is guaranteed. - return parent->handle(); - } + // An owner is a worker who: + // + // - Created the task. + // - OR had the task submitted to them. + // - OR won the task at a join. + // + // An owner of a task owns the stack the task is on. + // + // As the worker who completed the child task this thread owns the stack the child task was on. + // + // Either: + // + // 1. The parent is on the same stack as the child. + // 2. OR the parent is on a different stack to the child. + // + // Case (1) implies: we owned the parent; forked the child task; then the parent was then stolen. + // Case (2) implies: we stole the parent task; then forked the child; then the parent was stolen. + // + // Case (2) implies that our stack is empty. + + // As soon as we do the `fetch_sub` below the parent task is no longer safe + // to access as it may be resumed and then destroyed by another thread. Hence + // we must make copies on-the-stack of any data we may need if we lose the + // join race. + bool const owner = parent->stack_ckpt == context.stack().checkpoint(); + + // TODO: we could reduce branching if we unconditionally release and also + // drop pre-release function altogether... Need to benchmark with code that + // triggers a lot of stealing. + + // As soon as we do the fetch_sub (if we loose) someone may acquire + // the stack so we must prepare it for release now. + auto release_key = context.stack().prepare_release(); + + // TODO: we could add an `if (owner)` around acquire below, then we could + // define that acquire is always called with null or not-self. + + // Register with parent we have completed this child task. + if (parent->atomic_joins().fetch_sub(1, std::memory_order_release) == 1) { + // Parent has reached join and we are the last child task to complete. We + // are the exclusive owner of the parent and therefore, we must continue + // parent. As we won the race, acquire all writes before resuming. + std::atomic_thread_fence(std::memory_order_acquire); - // An owner is a worker who: - // - // - Created the task. - // - OR had the task submitted to them. - // - OR won the task at a join. - // - // An owner of a task owns the stack the task is on. - // - // As the worker who completed the child task this thread owns the stack the child task was on. - // - // Either: - // - // 1. The parent is on the same stack as the child. - // 2. OR the parent is on a different stack to the child. - // - // Case (1) implies: we owned the parent; forked the child task; then the parent was then stolen. - // Case (2) implies: we stole the parent task; then forked the child; then the parent was stolen. - // - // Case (2) implies that our stack is empty. - - // As soon as we do the `fetch_sub` below the parent task is no longer safe - // to access as it may be resumed and then destroyed by another thread. Hence - // we must make copies on-the-stack of any data we may need if we lose the - // join race. - bool const owner = parent->stack_ckpt == context.stack().checkpoint(); - - // TODO: we could reduce branching if we unconditionally release and also - // drop pre-release function altogether... Need to benchmark with code that - // triggers a lot of stealing. - - // As soon as we do the fetch_sub (if we loose) someone may acquire - // the stack so we must prepare it for release now. - auto release_key = context.stack().prepare_release(); - - // TODO: we could add an `if (owner)` around acquire below, then we could - // define that acquire is always called with null or not-self. - - // Register with parent we have completed this child task. - if (parent->atomic_joins().fetch_sub(1, std::memory_order_release) == 1) { - // Parent has reached join and we are the last child task to complete. We - // are the exclusive owner of the parent and therefore, we must continue - // parent. As we won the race, acquire all writes before resuming. - std::atomic_thread_fence(std::memory_order_acquire); - - if (!owner) { - // In case of scenario (2) we must acquire the parent's stack. - context.stack().acquire(std::as_const(parent->stack_ckpt)); - } + if (!owner) { + // In case of scenario (2) we must acquire the parent's stack. + context.stack().acquire(std::as_const(parent->stack_ckpt)); + } - // Must reset parent's control block before resuming parent. - parent->reset_counters(); + // Must reset parent's control block before resuming parent. + parent->reset_counters(); + + if (parent->is_cancelled()) [[unlikely]] { + // Don't resume if cancelled + if constexpr (LF_COMPILER_EXCEPTIONS) { + if (parent->exception_bit) [[unlikely]] { + std::ignore = extract_exception(parent); + } + } + frame = parent; + continue; + } + return parent->handle(); + } - return parent->handle(); - } + // We did not win the join-race, we cannot dereference the parent pointer now + // as the frame may now be freed by the winner. Parent has not reached join + // or we are not the last child to complete. We are now out of jobs, we must + // yield to the executor. - // We did not win the join-race, we cannot dereference the parent pointer now - // as the frame may now be freed by the winner. Parent has not reached join - // or we are not the last child to complete. We are now out of jobs, we must - // yield to the executor. + if (owner) { + // We were unable to resume the parent and we were its owner, as the + // resuming thread will take ownership of the parent's we must give it up. + context.stack().release(std::move(release_key)); + } - if (owner) { - // We were unable to resume the parent and we were its owner, as the - // resuming thread will take ownership of the parent's we must give it up. - context.stack().release(std::move(release_key)); + // Else, case (2), our stack has no allocations on it, it may be used later. + return std::noop_coroutine(); } - - // Else, case (2), our stack has no allocations on it, it may be used later. - return std::noop_coroutine(); } struct final_awaitable : std::suspend_always { From 22c6a4d80ab47aec4936c46be59982c9d515211f Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 10 Apr 2026 09:54:58 +0100 Subject: [PATCH 005/123] todo --- src/core/promise.cxx | 1 + 1 file changed, 1 insertion(+) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index a9dc96f3..628c9af3 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -121,6 +121,7 @@ LF_FORCE_INLINE constexpr auto final_suspend(frame_t *frame) noexcept - std::ignore = extract_exception(parent); } } + // TODO: if the parent is a root task then we need to run a different final_suspend frame = parent; continue; } From 2ea78218fb589eea4dba3104241a14b572c3fb9c Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 10 Apr 2026 10:06:22 +0100 Subject: [PATCH 006/123] root declares what it is --- src/core/frame.cxx | 1 + src/core/schedule.cxx | 2 ++ 2 files changed, 3 insertions(+) diff --git a/src/core/frame.cxx b/src/core/frame.cxx index f9022dde..ddff6627 100644 --- a/src/core/frame.cxx +++ b/src/core/frame.cxx @@ -21,6 +21,7 @@ struct cancellation { export enum class category : std::uint8_t { call = 0, fork, + root, }; export struct frame_base {}; diff --git a/src/core/schedule.cxx b/src/core/schedule.cxx index 18e4aca6..2deb8f62 100644 --- a/src/core/schedule.cxx +++ b/src/core/schedule.cxx @@ -73,6 +73,8 @@ schedule(Sch &&sch, Fn &&fn, Args &&...args) -> schedule_result_tframe.kind = category::root; + LF_TRY { sch.post(sched_handle{key(), &task.promise->frame}); // If ^ didn't throw then the root_task will destroy itself at the final suspend. From cb49034eb41e2ac7fa9bea9916dac014ecaf7444 Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 10 Apr 2026 10:18:15 +0100 Subject: [PATCH 007/123] final suspend loop on cancel --- src/core/promise.cxx | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 628c9af3..5c7eaee9 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -34,6 +34,7 @@ LF_FORCE_INLINE constexpr auto final_suspend(frame_t *frame) noexcept - for (;;) { // Validate final state LF_ASSUME(frame); + LF_ASSUME(frame->kind != category::root); LF_ASSUME(frame->steals == 0); LF_ASSUME(frame->joins == k_u16_max); LF_ASSUME(frame->exception_bit == 0); @@ -51,7 +52,10 @@ LF_FORCE_INLINE constexpr auto final_suspend(frame_t *frame) noexcept - return parent->handle(); } + // Given we are not a call we must be a fork hence, our + // parent can't be a root as they can only call. LF_ASSUME(kind == category::fork); + LF_ASSUME(parent->kind != category::root); Context &context = get_tls_context(); @@ -121,10 +125,10 @@ LF_FORCE_INLINE constexpr auto final_suspend(frame_t *frame) noexcept - std::ignore = extract_exception(parent); } } - // TODO: if the parent is a root task then we need to run a different final_suspend frame = parent; continue; } + return parent->handle(); } @@ -370,7 +374,7 @@ struct join_awaitable { } } return final_suspend(self.frame); - }; + } [[noreturn]] constexpr void rethrow_exception(this join_awaitable self) { From 5f67646bf8c53cf5de0620605f70f2e6a0b39c0d Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 10 Apr 2026 10:47:08 +0100 Subject: [PATCH 008/123] restore assert --- src/core/promise.cxx | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 5c7eaee9..f9eda364 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -333,6 +333,8 @@ struct join_awaitable { // that had it's continuation stolen, where it would have had to release // the stack, because the parent was at not at the join. + LF_ASSUME(self.frame); + std::uint32_t steals = self.frame->steals; std::uint32_t offset = k_u16_max - steals; std::uint32_t joined = self.frame->atomic_joins().fetch_sub(offset, std::memory_order_release); From a2af5da6e60071f1b109b449d361e5c2380d558f Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 10 Apr 2026 10:48:47 +0100 Subject: [PATCH 009/123] remove pre-suspend check --- src/core/promise.cxx | 27 --------------------------- 1 file changed, 27 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index f9eda364..c3c324ad 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -283,33 +283,6 @@ struct join_awaitable { } return true; } - - // TODO: benchmark if including the below check (returning false here) in - // multithreaded case helps performance enough to justify the extra - // instructions along the fast path and complexity - - // Currently: joins() = k_u16_max - num_joined - // Hence: k_u16_max - joins() = num_joined - - // Could use (relaxed here) + (fence(acquire) in truthy branch) but, it's - // better if we see all the decrements to joins() and avoid suspending the - // coroutine if possible. Cannot fetch_sub() here and write to frame as - // coroutine must be suspended first. - - std::uint32_t steals = self.frame->steals; - std::uint32_t joined = k_u16_max - self.frame->atomic_joins().load(std::memory_order_acquire); - - if (steals == joined) { - // We must reset the control block and take the stack. We should never - // own the stack at this point because we must have stolen the stack. - // For ruther explanation see await_suspend() below. - self.take_stack_and_reset(); - - if (self.frame->is_cancelled()) [[unlikely]] { - return false; - } - return true; - } return false; } From 81fb23b71407c050b51b936424f29f1424654f74 Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 10 Apr 2026 11:02:16 +0100 Subject: [PATCH 010/123] owning parent --- src/core/frame.cxx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/core/frame.cxx b/src/core/frame.cxx index ddff6627..7a12d701 100644 --- a/src/core/frame.cxx +++ b/src/core/frame.cxx @@ -11,7 +11,7 @@ namespace lf { // =================== Cancellation =================== // struct cancellation { - cancellation *parent = nullptr; + std::unique_ptr parent; std::atomic stop = 0; }; @@ -59,7 +59,7 @@ struct frame_type : frame_base { [[nodiscard]] constexpr auto is_cancelled() const noexcept -> bool { // TODO: Should exception trigger cancellation? - for (cancellation *ptr = cancel; ptr != nullptr; ptr = ptr->parent) { + for (cancellation *ptr = cancel; ptr != nullptr; ptr = ptr->parent.get()) { // TODO: if users can't use cancellation outside of fork-join // then this can be relaxed if (ptr->stop.load(std::memory_order_acquire) == 1) { From 577bfec3ef981096c08269ce8c352cafb5b39b68 Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 10 Apr 2026 11:10:12 +0100 Subject: [PATCH 011/123] return adr abstraction --- src/core/ops.cxx | 28 +++++++++++++++------------- src/core/promise.cxx | 2 +- 2 files changed, 16 insertions(+), 14 deletions(-) diff --git a/src/core/ops.cxx b/src/core/ops.cxx index c178b1f2..64b74ed3 100644 --- a/src/core/ops.cxx +++ b/src/core/ops.cxx @@ -11,17 +11,19 @@ import :frame; namespace lf { +template +struct maybe_ptr { + T *ptr; +}; + +template <> +struct maybe_ptr {}; + // clang-format off template struct [[nodiscard("You should immediately co_await this!")]] pkg { - R *return_address; - [[no_unique_address]] Fn fn; - [[no_unique_address]] tuple args; -}; - -template -struct [[nodiscard("You should immediately co_await this!")]] pkg { + [[no_unique_address]] maybe_ptr maybe_ret_adr; [[no_unique_address]] Fn fn; [[no_unique_address]] tuple args; }; @@ -62,29 +64,29 @@ struct scope { template Fn> static constexpr auto fork(std::nullptr_t, Fn &&fn, Args &&...args) noexcept -> fork_pkg { - return {.fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + return {.maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } template Fn> static constexpr auto fork(Fn &&fn, Args &&...args) noexcept -> fork_pkg { - return {.fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + return {.maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } template Fn> static constexpr auto fork(R *ret, Fn &&fn, Args &&...args) noexcept -> fork_pkg { - return {.return_address = ret, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + return {.maybe_ret_adr = {ret}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } template Fn> static constexpr auto call(std::nullptr_t, Fn &&fn, Args &&...args) noexcept -> call_pkg { - return {.fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + return {.maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } template Fn> static constexpr auto call(Fn &&fn, Args &&...args) noexcept -> call_pkg { - return {.fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + return {.maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } template Fn> static constexpr auto call(R *ret, Fn &&fn, Args &&...args) noexcept -> call_pkg { - return {.return_address = ret, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + return {.maybe_ret_adr = {ret}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } }; diff --git a/src/core/promise.cxx b/src/core/promise.cxx index c3c324ad..5f0f71c6 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -426,7 +426,7 @@ struct mixin_frame { // TODO: tests for null path if constexpr (!std::is_void_v) { - child_promise->return_address = pkg.return_address; + child_promise->return_address = pkg.maybe_ret_adr.ptr; } else if constexpr (!std::is_void_v) { // Set child's return address to null to inhibit the return // TODO: add test for this From 80869f4ec21bf2f28e333d7e4961322c2960dc60 Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 10 Apr 2026 11:44:25 +0100 Subject: [PATCH 012/123] Revert "owning parent" This reverts commit 81fb23b71407c050b51b936424f29f1424654f74. --- src/core/frame.cxx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/core/frame.cxx b/src/core/frame.cxx index 7a12d701..ddff6627 100644 --- a/src/core/frame.cxx +++ b/src/core/frame.cxx @@ -11,7 +11,7 @@ namespace lf { // =================== Cancellation =================== // struct cancellation { - std::unique_ptr parent; + cancellation *parent = nullptr; std::atomic stop = 0; }; @@ -59,7 +59,7 @@ struct frame_type : frame_base { [[nodiscard]] constexpr auto is_cancelled() const noexcept -> bool { // TODO: Should exception trigger cancellation? - for (cancellation *ptr = cancel; ptr != nullptr; ptr = ptr->parent.get()) { + for (cancellation *ptr = cancel; ptr != nullptr; ptr = ptr->parent) { // TODO: if users can't use cancellation outside of fork-join // then this can be relaxed if (ptr->stop.load(std::memory_order_acquire) == 1) { From d7a175f6b71761ac8ded76f35554e436f7d572c9 Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 10 Apr 2026 11:51:53 +0100 Subject: [PATCH 013/123] extra template param in pkg --- src/core/ops.cxx | 31 ++++++++++++++++++++++++++++--- src/core/promise.cxx | 8 ++++---- 2 files changed, 32 insertions(+), 7 deletions(-) diff --git a/src/core/ops.cxx b/src/core/ops.cxx index 64b74ed3..bc15a126 100644 --- a/src/core/ops.cxx +++ b/src/core/ops.cxx @@ -21,8 +21,9 @@ struct maybe_ptr {}; // clang-format off -template +template struct [[nodiscard("You should immediately co_await this!")]] pkg { + [[no_unique_address]] maybe_ptr> maybe_cancel; [[no_unique_address]] maybe_ptr maybe_ret_adr; [[no_unique_address]] Fn fn; [[no_unique_address]] tuple args; @@ -55,12 +56,20 @@ struct scope { // TODO: Is it better to stores values for some types i.e. empty template - using call_pkg = pkg; + using call_pkg = pkg; template - using fork_pkg = pkg; + using fork_pkg = pkg; + + template + using call_cancel_pkg = pkg; + + template + using fork_cancel_pkg = pkg; public: + // === Fork no-cancel === // + template Fn> static constexpr auto fork(std::nullptr_t, Fn &&fn, Args &&...args) noexcept -> fork_pkg { @@ -75,6 +84,22 @@ struct scope { return {.maybe_ret_adr = {ret}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } + // === Fork with-cancel === // + + // template Fn> + // static constexpr auto + // fork(std::nullptr_t, Fn &&fn, Args &&...args) noexcept -> fork_cancel_pkg { + // return {.maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + // } + // template Fn> + // static constexpr auto fork(Fn &&fn, Args &&...args) noexcept -> fork_cancel_pkg { + // return {.maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + // } + // template Fn> + // static constexpr auto fork(R *ret, Fn &&fn, Args &&...args) noexcept -> fork_cancel_pkg { + // return {.maybe_ret_adr = {ret}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + // } + template Fn> static constexpr auto call(std::nullptr_t, Fn &&fn, Args &&...args) noexcept -> call_pkg { diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 5f0f71c6..cb7f3fec 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -401,8 +401,8 @@ struct mixin_frame { // --- Await transformations - template - static constexpr auto await_transform_pkg(pkg &&pkg) noexcept( + template + static constexpr auto await_transform_pkg(pkg &&pkg) noexcept( async_nothrow_invocable) -> awaitable { // Required for noexcept specifier to be correct @@ -436,8 +436,8 @@ struct mixin_frame { return {.child = &child_promise->frame}; } - template - constexpr auto await_transform(this auto &self, pkg &&pkg) noexcept + template + constexpr auto await_transform(this auto &self, pkg &&pkg) noexcept -> awaitable { LF_TRY { return self.await_transform_pkg(std::move(pkg)); From d784a41b964fec726adf1cb518ed2e6efaf5d919 Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 10 Apr 2026 12:14:49 +0100 Subject: [PATCH 014/123] op overloads --- src/core/ops.cxx | 49 +++++++++++++++++++++++++++++++++++------------- 1 file changed, 36 insertions(+), 13 deletions(-) diff --git a/src/core/ops.cxx b/src/core/ops.cxx index bc15a126..4bb964c4 100644 --- a/src/core/ops.cxx +++ b/src/core/ops.cxx @@ -67,6 +67,8 @@ struct scope { template using fork_cancel_pkg = pkg; + using cancel_t = cancellation *; + public: // === Fork no-cancel === // @@ -86,19 +88,23 @@ struct scope { // === Fork with-cancel === // - // template Fn> - // static constexpr auto - // fork(std::nullptr_t, Fn &&fn, Args &&...args) noexcept -> fork_cancel_pkg { - // return {.maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; - // } - // template Fn> - // static constexpr auto fork(Fn &&fn, Args &&...args) noexcept -> fork_cancel_pkg { - // return {.maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; - // } - // template Fn> - // static constexpr auto fork(R *ret, Fn &&fn, Args &&...args) noexcept -> fork_cancel_pkg { - // return {.maybe_ret_adr = {ret}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; - // } + template Fn> + static constexpr auto + fork(cancel_t ptr, std::nullptr_t, Fn &&fn, Args &&...args) noexcept -> fork_cancel_pkg { + return {.maybe_cancel = {ptr}, .maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + } + template Fn> + static constexpr auto + fork(cancel_t ptr, Fn &&fn, Args &&...args) noexcept -> fork_cancel_pkg { + return {.maybe_cancel = {ptr}, .maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + } + template Fn> + static constexpr auto + fork(cancel_t ptr, R *ret, Fn &&fn, Args &&...args) noexcept -> fork_cancel_pkg { + return {.maybe_cancel = {ptr}, .maybe_ret_adr = {ret}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + } + + // === Call no-cancel === // template Fn> static constexpr auto @@ -113,6 +119,23 @@ struct scope { static constexpr auto call(R *ret, Fn &&fn, Args &&...args) noexcept -> call_pkg { return {.maybe_ret_adr = {ret}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } + // === Call with-cancel === // + + template Fn> + static constexpr auto + call(cancel_t ptr, std::nullptr_t, Fn &&fn, Args &&...args) noexcept -> call_cancel_pkg { + return {.maybe_cancel = {ptr}, .maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + } + template Fn> + static constexpr auto + call(cancel_t ptr, Fn &&fn, Args &&...args) noexcept -> call_cancel_pkg { + return {.maybe_cancel = {ptr}, .maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + } + template Fn> + static constexpr auto + call(cancel_t ptr, R *ret, Fn &&fn, Args &&...args) noexcept -> call_cancel_pkg { + return {.maybe_cancel = {ptr}, .maybe_ret_adr = {ret}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + } }; // TODO: do we want join a member of scope? From 9facc5c6772099841d904c3f4ef30e5bcb568579 Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 10 Apr 2026 12:15:48 +0100 Subject: [PATCH 015/123] check return is non null --- src/core/promise.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index cb7f3fec..d41cb161 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -426,7 +426,7 @@ struct mixin_frame { // TODO: tests for null path if constexpr (!std::is_void_v) { - child_promise->return_address = pkg.maybe_ret_adr.ptr; + child_promise->return_address = not_null(pkg.maybe_ret_adr.ptr); } else if constexpr (!std::is_void_v) { // Set child's return address to null to inhibit the return // TODO: add test for this From 194101d013d0ab8cb74535234c5222e629f06bf3 Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 10 Apr 2026 12:19:51 +0100 Subject: [PATCH 016/123] todo --- src/core/ops.cxx | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/core/ops.cxx b/src/core/ops.cxx index 4bb964c4..8328fd95 100644 --- a/src/core/ops.cxx +++ b/src/core/ops.cxx @@ -119,8 +119,11 @@ struct scope { static constexpr auto call(R *ret, Fn &&fn, Args &&...args) noexcept -> call_pkg { return {.maybe_ret_adr = {ret}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } + // === Call with-cancel === // + // TODO: explicitly = delte overloads with cancel ptr = std::nullptr_t to avoid mistakes? + template Fn> static constexpr auto call(cancel_t ptr, std::nullptr_t, Fn &&fn, Args &&...args) noexcept -> call_cancel_pkg { From 3926434abd4753d37d596ab06ff40bb7607497d6 Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 10 Apr 2026 12:26:49 +0100 Subject: [PATCH 017/123] cancel propagation --- src/core/promise.cxx | 30 +++++++++++++++++++++++------- 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index d41cb161..03af0008 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -175,7 +175,7 @@ constexpr void stash_current_exception(frame_type *frame) noexcept { } } -template +template struct awaitable : std::suspend_always { static_assert(Cat == category::call || Cat == category::fork, "Invalid category for awaitable"); @@ -204,14 +204,26 @@ struct awaitable : std::suspend_always { return parent; } - if (parent.promise().frame.is_cancelled()) [[unlikely]] { - // Noop if canceled, must clean-up the child that will never be resumed. - return self.child->handle().destroy(), parent; + // Noop if canceled, must clean-up the child that will never be resumed. + if constexpr (Cancel) { + // + if (self.child->is_cancelled()) [[unlikely]] { + return self.child->handle().destroy(), parent; + } + } else { + if (parent.promise().frame.is_cancelled()) [[unlikely]] { + return self.child->handle().destroy(), parent; + } } // Propagate parent->child relationships self.child->parent = &parent.promise().frame; - self.child->cancel = parent.promise().frame.cancel; + + if constexpr (!Cancel) { + // If not explicitly bound to a cancel source then + // we propagate cancellation parent -> child + self.child->cancel = parent.promise().frame.cancel; + } if constexpr (Cat == category::call) { // Should be the default @@ -403,7 +415,7 @@ struct mixin_frame { template static constexpr auto await_transform_pkg(pkg &&pkg) noexcept( - async_nothrow_invocable) -> awaitable { + async_nothrow_invocable) -> awaitable { // Required for noexcept specifier to be correct static_assert(std::is_reference_v && (... && std::is_reference_v)); @@ -433,12 +445,16 @@ struct mixin_frame { child_promise->return_address = nullptr; } + if constexpr (Cancel) { + child_promise->frame.cancel = not_null(pkg.maybe_cancel.ptr); + } + return {.child = &child_promise->frame}; } template constexpr auto await_transform(this auto &self, pkg &&pkg) noexcept - -> awaitable { + -> awaitable { LF_TRY { return self.await_transform_pkg(std::move(pkg)); } LF_CATCH_ALL { From b3ec30559ee53256fe164de080199330ca9f9b4e Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 10 Apr 2026 12:35:27 +0100 Subject: [PATCH 018/123] PERF: move propagation to await_transform --- src/core/promise.cxx | 39 ++++++++++++++++----------------------- 1 file changed, 16 insertions(+), 23 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 03af0008..072ad6ae 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -175,7 +175,7 @@ constexpr void stash_current_exception(frame_type *frame) noexcept { } } -template +template struct awaitable : std::suspend_always { static_assert(Cat == category::call || Cat == category::fork, "Invalid category for awaitable"); @@ -204,25 +204,8 @@ struct awaitable : std::suspend_always { return parent; } - // Noop if canceled, must clean-up the child that will never be resumed. - if constexpr (Cancel) { - // - if (self.child->is_cancelled()) [[unlikely]] { - return self.child->handle().destroy(), parent; - } - } else { - if (parent.promise().frame.is_cancelled()) [[unlikely]] { - return self.child->handle().destroy(), parent; - } - } - - // Propagate parent->child relationships - self.child->parent = &parent.promise().frame; - - if constexpr (!Cancel) { - // If not explicitly bound to a cancel source then - // we propagate cancellation parent -> child - self.child->cancel = parent.promise().frame.cancel; + if (self.child->is_cancelled()) [[unlikely]] { + return self.child->handle().destroy(), parent; } if constexpr (Cat == category::call) { @@ -414,8 +397,9 @@ struct mixin_frame { // --- Await transformations template - static constexpr auto await_transform_pkg(pkg &&pkg) noexcept( - async_nothrow_invocable) -> awaitable { + constexpr auto + await_transform_pkg(this auto &self, pkg &&pkg) noexcept( + async_nothrow_invocable) -> awaitable { // Required for noexcept specifier to be correct static_assert(std::is_reference_v && (... && std::is_reference_v)); @@ -445,8 +429,17 @@ struct mixin_frame { child_promise->return_address = nullptr; } + // TODO: bench cancel test here instead of await + + // Propagate parent->child relationships + child_promise->frame.parent = &self.frame; + if constexpr (Cancel) { + // Explicit cancel source child_promise->frame.cancel = not_null(pkg.maybe_cancel.ptr); + } else { + // Propagate parent->child relationships + child_promise->frame.cancel = self.frame.cancel; } return {.child = &child_promise->frame}; @@ -454,7 +447,7 @@ struct mixin_frame { template constexpr auto await_transform(this auto &self, pkg &&pkg) noexcept - -> awaitable { + -> awaitable { LF_TRY { return self.await_transform_pkg(std::move(pkg)); } LF_CATCH_ALL { From 37f99b5625ebf1687954d59f738bcf1789402ba6 Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 10 Apr 2026 12:39:46 +0100 Subject: [PATCH 019/123] PERF: move check --- src/core/promise.cxx | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 072ad6ae..8f6b8151 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -204,10 +204,6 @@ struct awaitable : std::suspend_always { return parent; } - if (self.child->is_cancelled()) [[unlikely]] { - return self.child->handle().destroy(), parent; - } - if constexpr (Cat == category::call) { // Should be the default LF_ASSUME(self.child->kind == category::call); @@ -448,6 +444,12 @@ struct mixin_frame { template constexpr auto await_transform(this auto &self, pkg &&pkg) noexcept -> awaitable { + + // Don't launch work if cancelled. + if (self.frame.is_cancelled()) [[unlikely]] { + return {.child = nullptr}; + } + LF_TRY { return self.await_transform_pkg(std::move(pkg)); } LF_CATCH_ALL { From 33a599fd6c6f5cfe7be41708befc737b6221d2c0 Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 10 Apr 2026 19:31:31 +0100 Subject: [PATCH 020/123] Revert "PERF: move check" This reverts commit 37f99b5625ebf1687954d59f738bcf1789402ba6. --- src/core/promise.cxx | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 8f6b8151..072ad6ae 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -204,6 +204,10 @@ struct awaitable : std::suspend_always { return parent; } + if (self.child->is_cancelled()) [[unlikely]] { + return self.child->handle().destroy(), parent; + } + if constexpr (Cat == category::call) { // Should be the default LF_ASSUME(self.child->kind == category::call); @@ -444,12 +448,6 @@ struct mixin_frame { template constexpr auto await_transform(this auto &self, pkg &&pkg) noexcept -> awaitable { - - // Don't launch work if cancelled. - if (self.frame.is_cancelled()) [[unlikely]] { - return {.child = nullptr}; - } - LF_TRY { return self.await_transform_pkg(std::move(pkg)); } LF_CATCH_ALL { From 7d5c1de27123691e15e9ef9d655ce239265ac013 Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 10 Apr 2026 19:31:31 +0100 Subject: [PATCH 021/123] Revert "PERF: move propagation to await_transform" This reverts commit b3ec30559ee53256fe164de080199330ca9f9b4e. --- src/core/promise.cxx | 39 +++++++++++++++++++++++---------------- 1 file changed, 23 insertions(+), 16 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 072ad6ae..03af0008 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -175,7 +175,7 @@ constexpr void stash_current_exception(frame_type *frame) noexcept { } } -template +template struct awaitable : std::suspend_always { static_assert(Cat == category::call || Cat == category::fork, "Invalid category for awaitable"); @@ -204,8 +204,25 @@ struct awaitable : std::suspend_always { return parent; } - if (self.child->is_cancelled()) [[unlikely]] { - return self.child->handle().destroy(), parent; + // Noop if canceled, must clean-up the child that will never be resumed. + if constexpr (Cancel) { + // + if (self.child->is_cancelled()) [[unlikely]] { + return self.child->handle().destroy(), parent; + } + } else { + if (parent.promise().frame.is_cancelled()) [[unlikely]] { + return self.child->handle().destroy(), parent; + } + } + + // Propagate parent->child relationships + self.child->parent = &parent.promise().frame; + + if constexpr (!Cancel) { + // If not explicitly bound to a cancel source then + // we propagate cancellation parent -> child + self.child->cancel = parent.promise().frame.cancel; } if constexpr (Cat == category::call) { @@ -397,9 +414,8 @@ struct mixin_frame { // --- Await transformations template - constexpr auto - await_transform_pkg(this auto &self, pkg &&pkg) noexcept( - async_nothrow_invocable) -> awaitable { + static constexpr auto await_transform_pkg(pkg &&pkg) noexcept( + async_nothrow_invocable) -> awaitable { // Required for noexcept specifier to be correct static_assert(std::is_reference_v && (... && std::is_reference_v)); @@ -429,17 +445,8 @@ struct mixin_frame { child_promise->return_address = nullptr; } - // TODO: bench cancel test here instead of await - - // Propagate parent->child relationships - child_promise->frame.parent = &self.frame; - if constexpr (Cancel) { - // Explicit cancel source child_promise->frame.cancel = not_null(pkg.maybe_cancel.ptr); - } else { - // Propagate parent->child relationships - child_promise->frame.cancel = self.frame.cancel; } return {.child = &child_promise->frame}; @@ -447,7 +454,7 @@ struct mixin_frame { template constexpr auto await_transform(this auto &self, pkg &&pkg) noexcept - -> awaitable { + -> awaitable { LF_TRY { return self.await_transform_pkg(std::move(pkg)); } LF_CATCH_ALL { From 1fda32b2bbe1a8bb378501bf0dce189c1161a94a Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 10 Apr 2026 19:10:05 +0100 Subject: [PATCH 022/123] split final suspend tripple final suspend --- src/core/promise.cxx | 143 ++++++++++++++++++++++++++++++++++++++----- 1 file changed, 129 insertions(+), 14 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 03af0008..91d805c9 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -28,9 +28,10 @@ template using frame_t = frame_type>; // =============== Final =============== // + template [[nodiscard]] -LF_FORCE_INLINE constexpr auto final_suspend(frame_t *frame) noexcept -> coro<> { +constexpr auto final_suspend3(Context &context, frame_t *frame) noexcept -> coro<> { for (;;) { // Validate final state LF_ASSUME(frame); @@ -57,8 +58,6 @@ LF_FORCE_INLINE constexpr auto final_suspend(frame_t *frame) noexcept - LF_ASSUME(kind == category::fork); LF_ASSUME(parent->kind != category::root); - Context &context = get_tls_context(); - if (steal_handle last_pushed = context.pop()) { // No-one stole continuation, we are the exclusive owner of parent -> just keep ripping! LF_ASSUME(last_pushed == steal_handle{key(), parent}); @@ -98,7 +97,12 @@ LF_FORCE_INLINE constexpr auto final_suspend(frame_t *frame) noexcept - // As soon as we do the fetch_sub (if we loose) someone may acquire // the stack so we must prepare it for release now. - auto release_key = context.stack().prepare_release(); + if (owner) { + auto release_key = context.stack().prepare_release(); + // We were unable to resume the parent and we were its owner, as the + // resuming thread will take ownership of the parent's we must give it up. + context.stack().release(std::move(release_key)); + } // TODO: we could add an `if (owner)` around acquire below, then we could // define that acquire is always called with null or not-self. @@ -110,10 +114,8 @@ LF_FORCE_INLINE constexpr auto final_suspend(frame_t *frame) noexcept - // parent. As we won the race, acquire all writes before resuming. std::atomic_thread_fence(std::memory_order_acquire); - if (!owner) { - // In case of scenario (2) we must acquire the parent's stack. - context.stack().acquire(std::as_const(parent->stack_ckpt)); - } + // In case of scenario (2) we must acquire the parent's stack. + context.stack().acquire(std::as_const(parent->stack_ckpt)); // Must reset parent's control block before resuming parent. parent->reset_counters(); @@ -137,17 +139,130 @@ LF_FORCE_INLINE constexpr auto final_suspend(frame_t *frame) noexcept - // or we are not the last child to complete. We are now out of jobs, we must // yield to the executor. - if (owner) { - // We were unable to resume the parent and we were its owner, as the - // resuming thread will take ownership of the parent's we must give it up. - context.stack().release(std::move(release_key)); - } - // Else, case (2), our stack has no allocations on it, it may be used later. return std::noop_coroutine(); } } +template +[[nodiscard]] +constexpr auto final_suspend2(Context *, frame_t *frame) noexcept -> coro<>; + +template +[[nodiscard]] +constexpr auto final_suspend(frame_t *frame) noexcept -> coro<> { + // Validate final state + LF_ASSUME(frame); + LF_ASSUME(frame->steals == 0); + LF_ASSUME(frame->joins == k_u16_max); + LF_ASSUME(frame->exception_bit == 0); + + // Local copies (before we destroy frame) + category const kind = frame->kind; + + frame_t *parent = not_null(frame->parent); + + // Before resuming the next (or exiting) we should clean-up the current frame. + // Can't use frame from this point onwards + frame->handle().destroy(); + + if (kind == category::call) { + return parent->handle(); + } + + LF_ASSUME(kind == category::fork); + + Context &context = get_tls_context(); + + if (steal_handle last_pushed = context.pop()) { + // No-one stole continuation, we are the exclusive owner of parent -> just keep ripping! + LF_ASSUME(last_pushed == steal_handle{key(), parent}); + // This is not a join point so no state (i.e. counters) is guaranteed. + return parent->handle(); + } + + return final_suspend2(context, parent); +} + +template +[[nodiscard]] +constexpr auto final_suspend2(Context &context, frame_t *parent) noexcept -> coro<> { + // An owner is a worker who: + // + // - Created the task. + // - OR had the task submitted to them. + // - OR won the task at a join. + // + // An owner of a task owns the stack the task is on. + // + // As the worker who completed the child task this thread owns the stack the child task was on. + // + // Either: + // + // 1. The parent is on the same stack as the child. + // 2. OR the parent is on a different stack to the child. + // + // Case (1) implies: we owned the parent; forked the child task; then the parent was then stolen. + // Case (2) implies: we stole the parent task; then forked the child; then the parent was stolen. + // + // Case (2) implies that our stack is empty. + + // As soon as we do the `fetch_sub` below the parent task is no longer safe + // to access as it may be resumed and then destroyed by another thread. Hence + // we must make copies on-the-stack of any data we may need if we lose the + // join race. + bool const owner = parent->stack_ckpt == context.stack().checkpoint(); + + // TODO: we could reduce branching if we unconditionally release and also + // drop pre-release function altogether... Need to benchmark with code that + // triggers a lot of stealing. + + // As soon as we do the fetch_sub (if we loose) someone may acquire + // the stack so we must prepare it for release now. + if (owner) { + auto release_key = context.stack().prepare_release(); + // We were unable to resume the parent and we were its owner, as the + // resuming thread will take ownership of the parent's we must give it up. + context.stack().release(std::move(release_key)); + } + + // TODO: we could add an `if (owner)` around acquire below, then we could + // define that acquire is always called with null or not-self. + + // Register with parent we have completed this child task. + if (parent->atomic_joins().fetch_sub(1, std::memory_order_release) == 1) { + // Parent has reached join and we are the last child task to complete. We + // are the exclusive owner of the parent and therefore, we must continue + // parent. As we won the race, acquire all writes before resuming. + std::atomic_thread_fence(std::memory_order_acquire); + + // In case of scenario (2) we must acquire the parent's stack. + context.stack().acquire(std::as_const(parent->stack_ckpt)); + + // Must reset parent's control block before resuming parent. + parent->reset_counters(); + + if (parent->is_cancelled()) [[unlikely]] { + // Don't resume if cancelled + if constexpr (LF_COMPILER_EXCEPTIONS) { + if (parent->exception_bit) [[unlikely]] { + std::ignore = extract_exception(parent); + } + } + return final_suspend3(context, parent); + } + return parent->handle(); + } + + // We did not win the join-race, we cannot dereference the parent pointer now + // as the frame may now be freed by the winner. Parent has not reached join + // or we are not the last child to complete. We are now out of jobs, we must + // yield to the executor. + + // Else, case (2), our stack has no allocations on it, it may be used later. + return std::noop_coroutine(); +} + struct final_awaitable : std::suspend_always { template constexpr static auto await_suspend(coro> handle) noexcept -> coro<> { From 953975bce308e676f5ca973a4c8efd325f2ee83c Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 10 Apr 2026 20:05:41 +0100 Subject: [PATCH 023/123] Revert "remove pre-suspend check" This reverts commit a2af5da6e60071f1b109b449d361e5c2380d558f. --- src/core/promise.cxx | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 91d805c9..930bf711 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -410,6 +410,33 @@ struct join_awaitable { } return true; } + + // TODO: benchmark if including the below check (returning false here) in + // multithreaded case helps performance enough to justify the extra + // instructions along the fast path and complexity + + // Currently: joins() = k_u16_max - num_joined + // Hence: k_u16_max - joins() = num_joined + + // Could use (relaxed here) + (fence(acquire) in truthy branch) but, it's + // better if we see all the decrements to joins() and avoid suspending the + // coroutine if possible. Cannot fetch_sub() here and write to frame as + // coroutine must be suspended first. + + std::uint32_t steals = self.frame->steals; + std::uint32_t joined = k_u16_max - self.frame->atomic_joins().load(std::memory_order_acquire); + + if (steals == joined) { + // We must reset the control block and take the stack. We should never + // own the stack at this point because we must have stolen the stack. + // For ruther explanation see await_suspend() below. + self.take_stack_and_reset(); + + if (self.frame->is_cancelled()) [[unlikely]] { + return false; + } + return true; + } return false; } From f9dac31f78389b5ba8aad46149e6648de49a49c9 Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 10 Apr 2026 20:05:59 +0100 Subject: [PATCH 024/123] Reapply "remove pre-suspend check" This reverts commit 953975bce308e676f5ca973a4c8efd325f2ee83c. --- src/core/promise.cxx | 27 --------------------------- 1 file changed, 27 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 930bf711..91d805c9 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -410,33 +410,6 @@ struct join_awaitable { } return true; } - - // TODO: benchmark if including the below check (returning false here) in - // multithreaded case helps performance enough to justify the extra - // instructions along the fast path and complexity - - // Currently: joins() = k_u16_max - num_joined - // Hence: k_u16_max - joins() = num_joined - - // Could use (relaxed here) + (fence(acquire) in truthy branch) but, it's - // better if we see all the decrements to joins() and avoid suspending the - // coroutine if possible. Cannot fetch_sub() here and write to frame as - // coroutine must be suspended first. - - std::uint32_t steals = self.frame->steals; - std::uint32_t joined = k_u16_max - self.frame->atomic_joins().load(std::memory_order_acquire); - - if (steals == joined) { - // We must reset the control block and take the stack. We should never - // own the stack at this point because we must have stolen the stack. - // For ruther explanation see await_suspend() below. - self.take_stack_and_reset(); - - if (self.frame->is_cancelled()) [[unlikely]] { - return false; - } - return true; - } return false; } From 20d76d4e1bf709cab43aa9ce94984ca722b6c87c Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 10 Apr 2026 20:20:28 +0100 Subject: [PATCH 025/123] drop inline forcing --- src/core/promise.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 91d805c9..02a3883b 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -309,7 +309,7 @@ struct awaitable : std::suspend_always { } template - LF_FORCE_INLINE constexpr auto + constexpr auto await_suspend(this awaitable self, coro> parent) noexcept -> coro<> { // TODO: Add tests for exception/cancellation handling in fork/call. From d38389b3cc2153ddc8baa932345180a3baf5640d Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 10 Apr 2026 20:34:07 +0100 Subject: [PATCH 026/123] revert owner split release --- src/core/promise.cxx | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 02a3883b..1b4c3129 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -217,14 +217,7 @@ constexpr auto final_suspend2(Context &context, frame_t *parent) noexce // drop pre-release function altogether... Need to benchmark with code that // triggers a lot of stealing. - // As soon as we do the fetch_sub (if we loose) someone may acquire - // the stack so we must prepare it for release now. - if (owner) { - auto release_key = context.stack().prepare_release(); - // We were unable to resume the parent and we were its owner, as the - // resuming thread will take ownership of the parent's we must give it up. - context.stack().release(std::move(release_key)); - } + auto release_key = context.stack().prepare_release(); // TODO: we could add an `if (owner)` around acquire below, then we could // define that acquire is always called with null or not-self. @@ -237,8 +230,9 @@ constexpr auto final_suspend2(Context &context, frame_t *parent) noexce std::atomic_thread_fence(std::memory_order_acquire); // In case of scenario (2) we must acquire the parent's stack. - context.stack().acquire(std::as_const(parent->stack_ckpt)); - + if (!owner) { + context.stack().acquire(std::as_const(parent->stack_ckpt)); + } // Must reset parent's control block before resuming parent. parent->reset_counters(); @@ -258,6 +252,13 @@ constexpr auto final_suspend2(Context &context, frame_t *parent) noexce // as the frame may now be freed by the winner. Parent has not reached join // or we are not the last child to complete. We are now out of jobs, we must // yield to the executor. + // As soon as we do the fetch_sub (if we loose) someone may acquire + // the stack so we must prepare it for release now. + if (owner) { + // We were unable to resume the parent and we were its owner, as the + // resuming thread will take ownership of the parent's we must give it up. + context.stack().release(std::move(release_key)); + } // Else, case (2), our stack has no allocations on it, it may be used later. return std::noop_coroutine(); From c80f7d7231fe5ff52643910355a158656b2dd270 Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 10 Apr 2026 20:39:05 +0100 Subject: [PATCH 027/123] unranched --- src/core/promise.cxx | 90 ++++++++++++++++++++++++++++---------------- 1 file changed, 58 insertions(+), 32 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 1b4c3129..e5617558 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -211,53 +211,79 @@ constexpr auto final_suspend2(Context &context, frame_t *parent) noexce // to access as it may be resumed and then destroyed by another thread. Hence // we must make copies on-the-stack of any data we may need if we lose the // join race. - bool const owner = parent->stack_ckpt == context.stack().checkpoint(); // TODO: we could reduce branching if we unconditionally release and also // drop pre-release function altogether... Need to benchmark with code that // triggers a lot of stealing. - auto release_key = context.stack().prepare_release(); + if (parent->stack_ckpt == context.stack().checkpoint()) { - // TODO: we could add an `if (owner)` around acquire below, then we could - // define that acquire is always called with null or not-self. + auto release_key = context.stack().prepare_release(); - // Register with parent we have completed this child task. - if (parent->atomic_joins().fetch_sub(1, std::memory_order_release) == 1) { - // Parent has reached join and we are the last child task to complete. We - // are the exclusive owner of the parent and therefore, we must continue - // parent. As we won the race, acquire all writes before resuming. - std::atomic_thread_fence(std::memory_order_acquire); + // TODO: we could add an `if (owner)` around acquire below, then we could + // define that acquire is always called with null or not-self. - // In case of scenario (2) we must acquire the parent's stack. - if (!owner) { - context.stack().acquire(std::as_const(parent->stack_ckpt)); - } - // Must reset parent's control block before resuming parent. - parent->reset_counters(); - - if (parent->is_cancelled()) [[unlikely]] { - // Don't resume if cancelled - if constexpr (LF_COMPILER_EXCEPTIONS) { - if (parent->exception_bit) [[unlikely]] { - std::ignore = extract_exception(parent); + // Register with parent we have completed this child task. + if (parent->atomic_joins().fetch_sub(1, std::memory_order_release) == 1) { + // Parent has reached join and we are the last child task to complete. We + // are the exclusive owner of the parent and therefore, we must continue + // parent. As we won the race, acquire all writes before resuming. + std::atomic_thread_fence(std::memory_order_acquire); + + // Must reset parent's control block before resuming parent. + parent->reset_counters(); + + if (parent->is_cancelled()) [[unlikely]] { + // Don't resume if cancelled + if constexpr (LF_COMPILER_EXCEPTIONS) { + if (parent->exception_bit) [[unlikely]] { + std::ignore = extract_exception(parent); + } } + return final_suspend3(context, parent); } - return final_suspend3(context, parent); + return parent->handle(); } - return parent->handle(); - } - // We did not win the join-race, we cannot dereference the parent pointer now - // as the frame may now be freed by the winner. Parent has not reached join - // or we are not the last child to complete. We are now out of jobs, we must - // yield to the executor. - // As soon as we do the fetch_sub (if we loose) someone may acquire - // the stack so we must prepare it for release now. - if (owner) { + // We did not win the join-race, we cannot dereference the parent pointer now + // as the frame may now be freed by the winner. Parent has not reached join + // or we are not the last child to complete. We are now out of jobs, we must + // yield to the executor. + // As soon as we do the fetch_sub (if we loose) someone may acquire + // the stack so we must prepare it for release now. + // We were unable to resume the parent and we were its owner, as the // resuming thread will take ownership of the parent's we must give it up. context.stack().release(std::move(release_key)); + } else { + + // TODO: we could add an `if (owner)` around acquire below, then we could + // define that acquire is always called with null or not-self. + + // Register with parent we have completed this child task. + if (parent->atomic_joins().fetch_sub(1, std::memory_order_release) == 1) { + // Parent has reached join and we are the last child task to complete. We + // are the exclusive owner of the parent and therefore, we must continue + // parent. As we won the race, acquire all writes before resuming. + std::atomic_thread_fence(std::memory_order_acquire); + + // In case of scenario (2) we must acquire the parent's stack. + context.stack().acquire(std::as_const(parent->stack_ckpt)); + + // Must reset parent's control block before resuming parent. + parent->reset_counters(); + + if (parent->is_cancelled()) [[unlikely]] { + // Don't resume if cancelled + if constexpr (LF_COMPILER_EXCEPTIONS) { + if (parent->exception_bit) [[unlikely]] { + std::ignore = extract_exception(parent); + } + } + return final_suspend3(context, parent); + } + return parent->handle(); + } } // Else, case (2), our stack has no allocations on it, it may be used later. From 4327a2b70038094533ae4834ae2058253392fbbd Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 10 Apr 2026 20:39:06 +0100 Subject: [PATCH 028/123] Revert "unranched" This reverts commit c80f7d7231fe5ff52643910355a158656b2dd270. --- src/core/promise.cxx | 90 ++++++++++++++++---------------------------- 1 file changed, 32 insertions(+), 58 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index e5617558..1b4c3129 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -211,79 +211,53 @@ constexpr auto final_suspend2(Context &context, frame_t *parent) noexce // to access as it may be resumed and then destroyed by another thread. Hence // we must make copies on-the-stack of any data we may need if we lose the // join race. + bool const owner = parent->stack_ckpt == context.stack().checkpoint(); // TODO: we could reduce branching if we unconditionally release and also // drop pre-release function altogether... Need to benchmark with code that // triggers a lot of stealing. - if (parent->stack_ckpt == context.stack().checkpoint()) { + auto release_key = context.stack().prepare_release(); - auto release_key = context.stack().prepare_release(); + // TODO: we could add an `if (owner)` around acquire below, then we could + // define that acquire is always called with null or not-self. - // TODO: we could add an `if (owner)` around acquire below, then we could - // define that acquire is always called with null or not-self. + // Register with parent we have completed this child task. + if (parent->atomic_joins().fetch_sub(1, std::memory_order_release) == 1) { + // Parent has reached join and we are the last child task to complete. We + // are the exclusive owner of the parent and therefore, we must continue + // parent. As we won the race, acquire all writes before resuming. + std::atomic_thread_fence(std::memory_order_acquire); - // Register with parent we have completed this child task. - if (parent->atomic_joins().fetch_sub(1, std::memory_order_release) == 1) { - // Parent has reached join and we are the last child task to complete. We - // are the exclusive owner of the parent and therefore, we must continue - // parent. As we won the race, acquire all writes before resuming. - std::atomic_thread_fence(std::memory_order_acquire); - - // Must reset parent's control block before resuming parent. - parent->reset_counters(); - - if (parent->is_cancelled()) [[unlikely]] { - // Don't resume if cancelled - if constexpr (LF_COMPILER_EXCEPTIONS) { - if (parent->exception_bit) [[unlikely]] { - std::ignore = extract_exception(parent); - } + // In case of scenario (2) we must acquire the parent's stack. + if (!owner) { + context.stack().acquire(std::as_const(parent->stack_ckpt)); + } + // Must reset parent's control block before resuming parent. + parent->reset_counters(); + + if (parent->is_cancelled()) [[unlikely]] { + // Don't resume if cancelled + if constexpr (LF_COMPILER_EXCEPTIONS) { + if (parent->exception_bit) [[unlikely]] { + std::ignore = extract_exception(parent); } - return final_suspend3(context, parent); } - return parent->handle(); + return final_suspend3(context, parent); } + return parent->handle(); + } - // We did not win the join-race, we cannot dereference the parent pointer now - // as the frame may now be freed by the winner. Parent has not reached join - // or we are not the last child to complete. We are now out of jobs, we must - // yield to the executor. - // As soon as we do the fetch_sub (if we loose) someone may acquire - // the stack so we must prepare it for release now. - + // We did not win the join-race, we cannot dereference the parent pointer now + // as the frame may now be freed by the winner. Parent has not reached join + // or we are not the last child to complete. We are now out of jobs, we must + // yield to the executor. + // As soon as we do the fetch_sub (if we loose) someone may acquire + // the stack so we must prepare it for release now. + if (owner) { // We were unable to resume the parent and we were its owner, as the // resuming thread will take ownership of the parent's we must give it up. context.stack().release(std::move(release_key)); - } else { - - // TODO: we could add an `if (owner)` around acquire below, then we could - // define that acquire is always called with null or not-self. - - // Register with parent we have completed this child task. - if (parent->atomic_joins().fetch_sub(1, std::memory_order_release) == 1) { - // Parent has reached join and we are the last child task to complete. We - // are the exclusive owner of the parent and therefore, we must continue - // parent. As we won the race, acquire all writes before resuming. - std::atomic_thread_fence(std::memory_order_acquire); - - // In case of scenario (2) we must acquire the parent's stack. - context.stack().acquire(std::as_const(parent->stack_ckpt)); - - // Must reset parent's control block before resuming parent. - parent->reset_counters(); - - if (parent->is_cancelled()) [[unlikely]] { - // Don't resume if cancelled - if constexpr (LF_COMPILER_EXCEPTIONS) { - if (parent->exception_bit) [[unlikely]] { - std::ignore = extract_exception(parent); - } - } - return final_suspend3(context, parent); - } - return parent->handle(); - } } // Else, case (2), our stack has no allocations on it, it may be used later. From 13b3f6322f5369d2818baaf8f9077551fa35744b Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 10 Apr 2026 20:44:31 +0100 Subject: [PATCH 029/123] unique types in pkg --- src/core/ops.cxx | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/src/core/ops.cxx b/src/core/ops.cxx index 8328fd95..e74392b4 100644 --- a/src/core/ops.cxx +++ b/src/core/ops.cxx @@ -11,20 +11,21 @@ import :frame; namespace lf { -template +// Integer is just to make the types different +template struct maybe_ptr { T *ptr; }; -template <> -struct maybe_ptr {}; +template +struct maybe_ptr {}; // clang-format off template struct [[nodiscard("You should immediately co_await this!")]] pkg { - [[no_unique_address]] maybe_ptr> maybe_cancel; - [[no_unique_address]] maybe_ptr maybe_ret_adr; + [[no_unique_address]] maybe_ptr<0, std::conditional> maybe_cancel; + [[no_unique_address]] maybe_ptr<1, R> maybe_ret_adr; [[no_unique_address]] Fn fn; [[no_unique_address]] tuple args; }; @@ -69,6 +70,8 @@ struct scope { using cancel_t = cancellation *; + // TODO: a test that instanticates all of these + public: // === Fork no-cancel === // From a5ca40647f6ac11522a9d8f156e8ed7aac5a06e6 Mon Sep 17 00:00:00 2001 From: Conor Date: Wed, 15 Apr 2026 15:59:18 +0100 Subject: [PATCH 030/123] comments --- src/core/promise.cxx | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 1b4c3129..994ce65d 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -29,6 +29,21 @@ using frame_t = frame_type>; // =============== Final =============== // +/** + * @brief The full final suspend logic. + * + * The final suspend logic is fully expressed in this function in brief: + * + * - Try to resume parent if a call. + * - Try to resume parent if a fork with no stealing. + * - Try to resume a stolen forked task if last to complete. + * + * This function also handles cancellation (of the parent) by iteratively + * climbing up the parent chain. + * + * This function is split and repeated as two separate functions to allow the + * hot-path code to be inlined more easily into the final suspend. + */ template [[nodiscard]] constexpr auto final_suspend3(Context &context, frame_t *frame) noexcept -> coro<> { @@ -273,8 +288,6 @@ struct final_awaitable : std::suspend_always { // =============== Fork/Call =============== // -// TODO: make sure exceptions are cancel-safe (I think now cancellation can leak) - /** * @brief Call inside a catch block, stash current exception in `frame`. */ From 97bbd711e4cad6b9d9339ee8ef0e84ef3171eac2 Mon Sep 17 00:00:00 2001 From: Conor Date: Wed, 15 Apr 2026 16:03:19 +0100 Subject: [PATCH 031/123] better names --- src/core/promise.cxx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 994ce65d..3e3d537f 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -46,7 +46,7 @@ using frame_t = frame_type>; */ template [[nodiscard]] -constexpr auto final_suspend3(Context &context, frame_t *frame) noexcept -> coro<> { +constexpr auto final_suspend_full(Context &context, frame_t *frame) noexcept -> coro<> { for (;;) { // Validate final state LF_ASSUME(frame); @@ -258,7 +258,7 @@ constexpr auto final_suspend2(Context &context, frame_t *parent) noexce std::ignore = extract_exception(parent); } } - return final_suspend3(context, parent); + return final_suspend_full(context, parent); } return parent->handle(); } From f53b1a05f8de7d6df068efbe692b9109a5c71c58 Mon Sep 17 00:00:00 2001 From: Conor Date: Wed, 15 Apr 2026 16:06:40 +0100 Subject: [PATCH 032/123] rm comments --- src/core/promise.cxx | 57 ++++---------------------------------------- 1 file changed, 4 insertions(+), 53 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 3e3d537f..c45e526a 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -166,19 +166,16 @@ constexpr auto final_suspend2(Context *, frame_t *frame) noexcept -> co template [[nodiscard]] constexpr auto final_suspend(frame_t *frame) noexcept -> coro<> { - // Validate final state + LF_ASSUME(frame); LF_ASSUME(frame->steals == 0); LF_ASSUME(frame->joins == k_u16_max); LF_ASSUME(frame->exception_bit == 0); - // Local copies (before we destroy frame) category const kind = frame->kind; frame_t *parent = not_null(frame->parent); - // Before resuming the next (or exiting) we should clean-up the current frame. - // Can't use frame from this point onwards frame->handle().destroy(); if (kind == category::call) { @@ -190,9 +187,7 @@ constexpr auto final_suspend(frame_t *frame) noexcept -> coro<> { Context &context = get_tls_context(); if (steal_handle last_pushed = context.pop()) { - // No-one stole continuation, we are the exclusive owner of parent -> just keep ripping! LF_ASSUME(last_pushed == steal_handle{key(), parent}); - // This is not a join point so no state (i.e. counters) is guaranteed. return parent->handle(); } @@ -202,57 +197,22 @@ constexpr auto final_suspend(frame_t *frame) noexcept -> coro<> { template [[nodiscard]] constexpr auto final_suspend2(Context &context, frame_t *parent) noexcept -> coro<> { - // An owner is a worker who: - // - // - Created the task. - // - OR had the task submitted to them. - // - OR won the task at a join. - // - // An owner of a task owns the stack the task is on. - // - // As the worker who completed the child task this thread owns the stack the child task was on. - // - // Either: - // - // 1. The parent is on the same stack as the child. - // 2. OR the parent is on a different stack to the child. - // - // Case (1) implies: we owned the parent; forked the child task; then the parent was then stolen. - // Case (2) implies: we stole the parent task; then forked the child; then the parent was stolen. - // - // Case (2) implies that our stack is empty. - - // As soon as we do the `fetch_sub` below the parent task is no longer safe - // to access as it may be resumed and then destroyed by another thread. Hence - // we must make copies on-the-stack of any data we may need if we lose the - // join race. - bool const owner = parent->stack_ckpt == context.stack().checkpoint(); - // TODO: we could reduce branching if we unconditionally release and also - // drop pre-release function altogether... Need to benchmark with code that - // triggers a lot of stealing. + bool const owner = parent->stack_ckpt == context.stack().checkpoint(); auto release_key = context.stack().prepare_release(); - // TODO: we could add an `if (owner)` around acquire below, then we could - // define that acquire is always called with null or not-self. - - // Register with parent we have completed this child task. if (parent->atomic_joins().fetch_sub(1, std::memory_order_release) == 1) { - // Parent has reached join and we are the last child task to complete. We - // are the exclusive owner of the parent and therefore, we must continue - // parent. As we won the race, acquire all writes before resuming. + std::atomic_thread_fence(std::memory_order_acquire); - // In case of scenario (2) we must acquire the parent's stack. if (!owner) { context.stack().acquire(std::as_const(parent->stack_ckpt)); } - // Must reset parent's control block before resuming parent. + parent->reset_counters(); if (parent->is_cancelled()) [[unlikely]] { - // Don't resume if cancelled if constexpr (LF_COMPILER_EXCEPTIONS) { if (parent->exception_bit) [[unlikely]] { std::ignore = extract_exception(parent); @@ -263,19 +223,10 @@ constexpr auto final_suspend2(Context &context, frame_t *parent) noexce return parent->handle(); } - // We did not win the join-race, we cannot dereference the parent pointer now - // as the frame may now be freed by the winner. Parent has not reached join - // or we are not the last child to complete. We are now out of jobs, we must - // yield to the executor. - // As soon as we do the fetch_sub (if we loose) someone may acquire - // the stack so we must prepare it for release now. if (owner) { - // We were unable to resume the parent and we were its owner, as the - // resuming thread will take ownership of the parent's we must give it up. context.stack().release(std::move(release_key)); } - // Else, case (2), our stack has no allocations on it, it may be used later. return std::noop_coroutine(); } From fe90470dbf6ab382dbca7739c9409a918e82788b Mon Sep 17 00:00:00 2001 From: Conor Date: Wed, 15 Apr 2026 16:08:01 +0100 Subject: [PATCH 033/123] full pre-release --- src/core/promise.cxx | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index c45e526a..51d85cb4 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -112,12 +112,7 @@ constexpr auto final_suspend_full(Context &context, frame_t *frame) noe // As soon as we do the fetch_sub (if we loose) someone may acquire // the stack so we must prepare it for release now. - if (owner) { - auto release_key = context.stack().prepare_release(); - // We were unable to resume the parent and we were its owner, as the - // resuming thread will take ownership of the parent's we must give it up. - context.stack().release(std::move(release_key)); - } + auto release_key = context.stack().prepare_release(); // TODO: we could add an `if (owner)` around acquire below, then we could // define that acquire is always called with null or not-self. @@ -129,8 +124,10 @@ constexpr auto final_suspend_full(Context &context, frame_t *frame) noe // parent. As we won the race, acquire all writes before resuming. std::atomic_thread_fence(std::memory_order_acquire); - // In case of scenario (2) we must acquire the parent's stack. - context.stack().acquire(std::as_const(parent->stack_ckpt)); + if (owner) { + // In case of scenario (2) we must acquire the parent's stack. + context.stack().acquire(std::as_const(parent->stack_ckpt)); + } // Must reset parent's control block before resuming parent. parent->reset_counters(); @@ -146,6 +143,12 @@ constexpr auto final_suspend_full(Context &context, frame_t *frame) noe continue; } + if (owner) { + // We were unable to resume the parent and we were its owner, as the + // resuming thread will take ownership of the parent's we must give it up. + context.stack().release(std::move(release_key)); + } + return parent->handle(); } From 764b6d87f3978269cdbb9746b4c4dc7c9b19b191 Mon Sep 17 00:00:00 2001 From: Conor Date: Wed, 15 Apr 2026 16:09:11 +0100 Subject: [PATCH 034/123] full fused release This reverts commit fe90470dbf6ab382dbca7739c9409a918e82788b. --- src/core/promise.cxx | 33 ++++++++++++++------------------- 1 file changed, 14 insertions(+), 19 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 51d85cb4..996d4a33 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -112,7 +112,12 @@ constexpr auto final_suspend_full(Context &context, frame_t *frame) noe // As soon as we do the fetch_sub (if we loose) someone may acquire // the stack so we must prepare it for release now. - auto release_key = context.stack().prepare_release(); + if (owner) { + auto release_key = context.stack().prepare_release(); + // We were unable to resume the parent and we were its owner, as the + // resuming thread will take ownership of the parent's we must give it up. + context.stack().release(std::move(release_key)); + } // TODO: we could add an `if (owner)` around acquire below, then we could // define that acquire is always called with null or not-self. @@ -124,10 +129,8 @@ constexpr auto final_suspend_full(Context &context, frame_t *frame) noe // parent. As we won the race, acquire all writes before resuming. std::atomic_thread_fence(std::memory_order_acquire); - if (owner) { - // In case of scenario (2) we must acquire the parent's stack. - context.stack().acquire(std::as_const(parent->stack_ckpt)); - } + // In case of scenario (2) we must acquire the parent's stack. + context.stack().acquire(std::as_const(parent->stack_ckpt)); // Must reset parent's control block before resuming parent. parent->reset_counters(); @@ -143,12 +146,6 @@ constexpr auto final_suspend_full(Context &context, frame_t *frame) noe continue; } - if (owner) { - // We were unable to resume the parent and we were its owner, as the - // resuming thread will take ownership of the parent's we must give it up. - context.stack().release(std::move(release_key)); - } - return parent->handle(); } @@ -203,15 +200,17 @@ constexpr auto final_suspend2(Context &context, frame_t *parent) noexce bool const owner = parent->stack_ckpt == context.stack().checkpoint(); - auto release_key = context.stack().prepare_release(); + if (owner) { + + auto release_key = context.stack().prepare_release(); + context.stack().release(std::move(release_key)); + } if (parent->atomic_joins().fetch_sub(1, std::memory_order_release) == 1) { std::atomic_thread_fence(std::memory_order_acquire); - if (!owner) { - context.stack().acquire(std::as_const(parent->stack_ckpt)); - } + context.stack().acquire(std::as_const(parent->stack_ckpt)); parent->reset_counters(); @@ -226,10 +225,6 @@ constexpr auto final_suspend2(Context &context, frame_t *parent) noexce return parent->handle(); } - if (owner) { - context.stack().release(std::move(release_key)); - } - return std::noop_coroutine(); } From 767eb38e864a4e44f67e6646f49c712d9104e451 Mon Sep 17 00:00:00 2001 From: Conor Date: Wed, 15 Apr 2026 16:25:28 +0100 Subject: [PATCH 035/123] Revert "full fused release" This reverts commit 764b6d87f3978269cdbb9746b4c4dc7c9b19b191. --- src/core/promise.cxx | 33 +++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 996d4a33..51d85cb4 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -112,12 +112,7 @@ constexpr auto final_suspend_full(Context &context, frame_t *frame) noe // As soon as we do the fetch_sub (if we loose) someone may acquire // the stack so we must prepare it for release now. - if (owner) { - auto release_key = context.stack().prepare_release(); - // We were unable to resume the parent and we were its owner, as the - // resuming thread will take ownership of the parent's we must give it up. - context.stack().release(std::move(release_key)); - } + auto release_key = context.stack().prepare_release(); // TODO: we could add an `if (owner)` around acquire below, then we could // define that acquire is always called with null or not-self. @@ -129,8 +124,10 @@ constexpr auto final_suspend_full(Context &context, frame_t *frame) noe // parent. As we won the race, acquire all writes before resuming. std::atomic_thread_fence(std::memory_order_acquire); - // In case of scenario (2) we must acquire the parent's stack. - context.stack().acquire(std::as_const(parent->stack_ckpt)); + if (owner) { + // In case of scenario (2) we must acquire the parent's stack. + context.stack().acquire(std::as_const(parent->stack_ckpt)); + } // Must reset parent's control block before resuming parent. parent->reset_counters(); @@ -146,6 +143,12 @@ constexpr auto final_suspend_full(Context &context, frame_t *frame) noe continue; } + if (owner) { + // We were unable to resume the parent and we were its owner, as the + // resuming thread will take ownership of the parent's we must give it up. + context.stack().release(std::move(release_key)); + } + return parent->handle(); } @@ -200,17 +203,15 @@ constexpr auto final_suspend2(Context &context, frame_t *parent) noexce bool const owner = parent->stack_ckpt == context.stack().checkpoint(); - if (owner) { - - auto release_key = context.stack().prepare_release(); - context.stack().release(std::move(release_key)); - } + auto release_key = context.stack().prepare_release(); if (parent->atomic_joins().fetch_sub(1, std::memory_order_release) == 1) { std::atomic_thread_fence(std::memory_order_acquire); - context.stack().acquire(std::as_const(parent->stack_ckpt)); + if (!owner) { + context.stack().acquire(std::as_const(parent->stack_ckpt)); + } parent->reset_counters(); @@ -225,6 +226,10 @@ constexpr auto final_suspend2(Context &context, frame_t *parent) noexce return parent->handle(); } + if (owner) { + context.stack().release(std::move(release_key)); + } + return std::noop_coroutine(); } From 12bb9c5d3f33cf6f57c78c7180c04c412f8dbb3b Mon Sep 17 00:00:00 2001 From: Conor Date: Wed, 15 Apr 2026 16:31:54 +0100 Subject: [PATCH 036/123] double branch reduced --- src/core/promise.cxx | 136 +++++++++++++++++++++++++++---------------- 1 file changed, 87 insertions(+), 49 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 51d85cb4..8f0456f0 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -110,46 +110,67 @@ constexpr auto final_suspend_full(Context &context, frame_t *frame) noe // drop pre-release function altogether... Need to benchmark with code that // triggers a lot of stealing. - // As soon as we do the fetch_sub (if we loose) someone may acquire - // the stack so we must prepare it for release now. - auto release_key = context.stack().prepare_release(); - - // TODO: we could add an `if (owner)` around acquire below, then we could - // define that acquire is always called with null or not-self. - - // Register with parent we have completed this child task. - if (parent->atomic_joins().fetch_sub(1, std::memory_order_release) == 1) { - // Parent has reached join and we are the last child task to complete. We - // are the exclusive owner of the parent and therefore, we must continue - // parent. As we won the race, acquire all writes before resuming. - std::atomic_thread_fence(std::memory_order_acquire); - - if (owner) { - // In case of scenario (2) we must acquire the parent's stack. - context.stack().acquire(std::as_const(parent->stack_ckpt)); - } - - // Must reset parent's control block before resuming parent. - parent->reset_counters(); - - if (parent->is_cancelled()) [[unlikely]] { - // Don't resume if cancelled - if constexpr (LF_COMPILER_EXCEPTIONS) { - if (parent->exception_bit) [[unlikely]] { - std::ignore = extract_exception(parent); + if (owner) { + // As soon as we do the fetch_sub (if we loose) someone may acquire + // the stack so we must prepare it for release now. + auto release_key = context.stack().prepare_release(); + + // TODO: we could add an `if (owner)` around acquire below, then we could + // define that acquire is always called with null or not-self. + + // Register with parent we have completed this child task. + if (parent->atomic_joins().fetch_sub(1, std::memory_order_release) == 1) { + // Parent has reached join and we are the last child task to complete. We + // are the exclusive owner of the parent and therefore, we must continue + // parent. As we won the race, acquire all writes before resuming. + std::atomic_thread_fence(std::memory_order_acquire); + + // Must reset parent's control block before resuming parent. + parent->reset_counters(); + + if (parent->is_cancelled()) [[unlikely]] { + // Don't resume if cancelled + if constexpr (LF_COMPILER_EXCEPTIONS) { + if (parent->exception_bit) [[unlikely]] { + std::ignore = extract_exception(parent); + } } + frame = parent; + continue; } - frame = parent; - continue; - } - if (owner) { // We were unable to resume the parent and we were its owner, as the // resuming thread will take ownership of the parent's we must give it up. context.stack().release(std::move(release_key)); - } - return parent->handle(); + } else { + // Register with parent we have completed this child task. + if (parent->atomic_joins().fetch_sub(1, std::memory_order_release) == 1) { + // Parent has reached join and we are the last child task to complete. We + // are the exclusive owner of the parent and therefore, we must continue + // parent. As we won the race, acquire all writes before resuming. + std::atomic_thread_fence(std::memory_order_acquire); + + // In case of scenario (2) we must acquire the parent's stack. + context.stack().acquire(std::as_const(parent->stack_ckpt)); + + // Must reset parent's control block before resuming parent. + parent->reset_counters(); + + if (parent->is_cancelled()) [[unlikely]] { + // Don't resume if cancelled + if constexpr (LF_COMPILER_EXCEPTIONS) { + if (parent->exception_bit) [[unlikely]] { + std::ignore = extract_exception(parent); + } + } + frame = parent; + continue; + } + } + + return parent->handle(); + } } // We did not win the join-race, we cannot dereference the parent pointer now @@ -203,31 +224,48 @@ constexpr auto final_suspend2(Context &context, frame_t *parent) noexce bool const owner = parent->stack_ckpt == context.stack().checkpoint(); - auto release_key = context.stack().prepare_release(); - - if (parent->atomic_joins().fetch_sub(1, std::memory_order_release) == 1) { + if (owner) { + auto release_key = context.stack().prepare_release(); - std::atomic_thread_fence(std::memory_order_acquire); + if (parent->atomic_joins().fetch_sub(1, std::memory_order_release) == 1) { - if (!owner) { - context.stack().acquire(std::as_const(parent->stack_ckpt)); - } + std::atomic_thread_fence(std::memory_order_acquire); - parent->reset_counters(); + parent->reset_counters(); - if (parent->is_cancelled()) [[unlikely]] { - if constexpr (LF_COMPILER_EXCEPTIONS) { - if (parent->exception_bit) [[unlikely]] { - std::ignore = extract_exception(parent); + if (parent->is_cancelled()) [[unlikely]] { + if constexpr (LF_COMPILER_EXCEPTIONS) { + if (parent->exception_bit) [[unlikely]] { + std::ignore = extract_exception(parent); + } } + return final_suspend_full(context, parent); } - return final_suspend_full(context, parent); + return parent->handle(); } - return parent->handle(); - } - if (owner) { context.stack().release(std::move(release_key)); + + } else { + + if (parent->atomic_joins().fetch_sub(1, std::memory_order_release) == 1) { + + std::atomic_thread_fence(std::memory_order_acquire); + + context.stack().acquire(std::as_const(parent->stack_ckpt)); + + parent->reset_counters(); + + if (parent->is_cancelled()) [[unlikely]] { + if constexpr (LF_COMPILER_EXCEPTIONS) { + if (parent->exception_bit) [[unlikely]] { + std::ignore = extract_exception(parent); + } + } + return final_suspend_full(context, parent); + } + return parent->handle(); + } } return std::noop_coroutine(); From da7054b10fd99fdbe9c96bfb0481099014f10237 Mon Sep 17 00:00:00 2001 From: Conor Date: Wed, 15 Apr 2026 16:33:06 +0100 Subject: [PATCH 037/123] Revert "double branch reduced" This reverts commit 12bb9c5d3f33cf6f57c78c7180c04c412f8dbb3b. --- src/core/promise.cxx | 136 ++++++++++++++++--------------------------- 1 file changed, 49 insertions(+), 87 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 8f0456f0..51d85cb4 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -110,67 +110,46 @@ constexpr auto final_suspend_full(Context &context, frame_t *frame) noe // drop pre-release function altogether... Need to benchmark with code that // triggers a lot of stealing. - if (owner) { - // As soon as we do the fetch_sub (if we loose) someone may acquire - // the stack so we must prepare it for release now. - auto release_key = context.stack().prepare_release(); - - // TODO: we could add an `if (owner)` around acquire below, then we could - // define that acquire is always called with null or not-self. - - // Register with parent we have completed this child task. - if (parent->atomic_joins().fetch_sub(1, std::memory_order_release) == 1) { - // Parent has reached join and we are the last child task to complete. We - // are the exclusive owner of the parent and therefore, we must continue - // parent. As we won the race, acquire all writes before resuming. - std::atomic_thread_fence(std::memory_order_acquire); - - // Must reset parent's control block before resuming parent. - parent->reset_counters(); - - if (parent->is_cancelled()) [[unlikely]] { - // Don't resume if cancelled - if constexpr (LF_COMPILER_EXCEPTIONS) { - if (parent->exception_bit) [[unlikely]] { - std::ignore = extract_exception(parent); - } + // As soon as we do the fetch_sub (if we loose) someone may acquire + // the stack so we must prepare it for release now. + auto release_key = context.stack().prepare_release(); + + // TODO: we could add an `if (owner)` around acquire below, then we could + // define that acquire is always called with null or not-self. + + // Register with parent we have completed this child task. + if (parent->atomic_joins().fetch_sub(1, std::memory_order_release) == 1) { + // Parent has reached join and we are the last child task to complete. We + // are the exclusive owner of the parent and therefore, we must continue + // parent. As we won the race, acquire all writes before resuming. + std::atomic_thread_fence(std::memory_order_acquire); + + if (owner) { + // In case of scenario (2) we must acquire the parent's stack. + context.stack().acquire(std::as_const(parent->stack_ckpt)); + } + + // Must reset parent's control block before resuming parent. + parent->reset_counters(); + + if (parent->is_cancelled()) [[unlikely]] { + // Don't resume if cancelled + if constexpr (LF_COMPILER_EXCEPTIONS) { + if (parent->exception_bit) [[unlikely]] { + std::ignore = extract_exception(parent); } - frame = parent; - continue; } + frame = parent; + continue; + } + if (owner) { // We were unable to resume the parent and we were its owner, as the // resuming thread will take ownership of the parent's we must give it up. context.stack().release(std::move(release_key)); - - } else { - // Register with parent we have completed this child task. - if (parent->atomic_joins().fetch_sub(1, std::memory_order_release) == 1) { - // Parent has reached join and we are the last child task to complete. We - // are the exclusive owner of the parent and therefore, we must continue - // parent. As we won the race, acquire all writes before resuming. - std::atomic_thread_fence(std::memory_order_acquire); - - // In case of scenario (2) we must acquire the parent's stack. - context.stack().acquire(std::as_const(parent->stack_ckpt)); - - // Must reset parent's control block before resuming parent. - parent->reset_counters(); - - if (parent->is_cancelled()) [[unlikely]] { - // Don't resume if cancelled - if constexpr (LF_COMPILER_EXCEPTIONS) { - if (parent->exception_bit) [[unlikely]] { - std::ignore = extract_exception(parent); - } - } - frame = parent; - continue; - } - } - - return parent->handle(); } + + return parent->handle(); } // We did not win the join-race, we cannot dereference the parent pointer now @@ -224,48 +203,31 @@ constexpr auto final_suspend2(Context &context, frame_t *parent) noexce bool const owner = parent->stack_ckpt == context.stack().checkpoint(); - if (owner) { - auto release_key = context.stack().prepare_release(); - - if (parent->atomic_joins().fetch_sub(1, std::memory_order_release) == 1) { + auto release_key = context.stack().prepare_release(); - std::atomic_thread_fence(std::memory_order_acquire); + if (parent->atomic_joins().fetch_sub(1, std::memory_order_release) == 1) { - parent->reset_counters(); - - if (parent->is_cancelled()) [[unlikely]] { - if constexpr (LF_COMPILER_EXCEPTIONS) { - if (parent->exception_bit) [[unlikely]] { - std::ignore = extract_exception(parent); - } - } - return final_suspend_full(context, parent); - } - return parent->handle(); - } - - context.stack().release(std::move(release_key)); - - } else { - - if (parent->atomic_joins().fetch_sub(1, std::memory_order_release) == 1) { - - std::atomic_thread_fence(std::memory_order_acquire); + std::atomic_thread_fence(std::memory_order_acquire); + if (!owner) { context.stack().acquire(std::as_const(parent->stack_ckpt)); + } - parent->reset_counters(); + parent->reset_counters(); - if (parent->is_cancelled()) [[unlikely]] { - if constexpr (LF_COMPILER_EXCEPTIONS) { - if (parent->exception_bit) [[unlikely]] { - std::ignore = extract_exception(parent); - } + if (parent->is_cancelled()) [[unlikely]] { + if constexpr (LF_COMPILER_EXCEPTIONS) { + if (parent->exception_bit) [[unlikely]] { + std::ignore = extract_exception(parent); } - return final_suspend_full(context, parent); } - return parent->handle(); + return final_suspend_full(context, parent); } + return parent->handle(); + } + + if (owner) { + context.stack().release(std::move(release_key)); } return std::noop_coroutine(); From 9fc5baadef187e9de06c23308d375e685de04e38 Mon Sep 17 00:00:00 2001 From: Conor Date: Wed, 15 Apr 2026 16:35:08 +0100 Subject: [PATCH 038/123] fixup negation --- src/core/promise.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 51d85cb4..179d5e57 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -124,7 +124,7 @@ constexpr auto final_suspend_full(Context &context, frame_t *frame) noe // parent. As we won the race, acquire all writes before resuming. std::atomic_thread_fence(std::memory_order_acquire); - if (owner) { + if (!owner) { // In case of scenario (2) we must acquire the parent's stack. context.stack().acquire(std::as_const(parent->stack_ckpt)); } From 3e21509db1128238417cac2aae16b8c3bc3918d6 Mon Sep 17 00:00:00 2001 From: Conor Date: Wed, 15 Apr 2026 16:35:56 +0100 Subject: [PATCH 039/123] fixup todo --- src/core/promise.cxx | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 179d5e57..ebce49bf 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -114,9 +114,6 @@ constexpr auto final_suspend_full(Context &context, frame_t *frame) noe // the stack so we must prepare it for release now. auto release_key = context.stack().prepare_release(); - // TODO: we could add an `if (owner)` around acquire below, then we could - // define that acquire is always called with null or not-self. - // Register with parent we have completed this child task. if (parent->atomic_joins().fetch_sub(1, std::memory_order_release) == 1) { // Parent has reached join and we are the last child task to complete. We From 2817984c1a3c6ed7c13bb7d7155615f74676ef99 Mon Sep 17 00:00:00 2001 From: Conor Date: Wed, 15 Apr 2026 16:53:30 +0100 Subject: [PATCH 040/123] refactor names of final suspend parts --- src/core/promise.cxx | 75 +++++++++++++++++++------------------------- 1 file changed, 33 insertions(+), 42 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index ebce49bf..07aeb222 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -106,10 +106,6 @@ constexpr auto final_suspend_full(Context &context, frame_t *frame) noe // join race. bool const owner = parent->stack_ckpt == context.stack().checkpoint(); - // TODO: we could reduce branching if we unconditionally release and also - // drop pre-release function altogether... Need to benchmark with code that - // triggers a lot of stealing. - // As soon as we do the fetch_sub (if we loose) someone may acquire // the stack so we must prepare it for release now. auto release_key = context.stack().prepare_release(); @@ -161,42 +157,7 @@ constexpr auto final_suspend_full(Context &context, frame_t *frame) noe template [[nodiscard]] -constexpr auto final_suspend2(Context *, frame_t *frame) noexcept -> coro<>; - -template -[[nodiscard]] -constexpr auto final_suspend(frame_t *frame) noexcept -> coro<> { - - LF_ASSUME(frame); - LF_ASSUME(frame->steals == 0); - LF_ASSUME(frame->joins == k_u16_max); - LF_ASSUME(frame->exception_bit == 0); - - category const kind = frame->kind; - - frame_t *parent = not_null(frame->parent); - - frame->handle().destroy(); - - if (kind == category::call) { - return parent->handle(); - } - - LF_ASSUME(kind == category::fork); - - Context &context = get_tls_context(); - - if (steal_handle last_pushed = context.pop()) { - LF_ASSUME(last_pushed == steal_handle{key(), parent}); - return parent->handle(); - } - - return final_suspend2(context, parent); -} - -template -[[nodiscard]] -constexpr auto final_suspend2(Context &context, frame_t *parent) noexcept -> coro<> { +constexpr auto final_suspend_trailing(Context &context, frame_t *parent) noexcept -> coro<> { bool const owner = parent->stack_ckpt == context.stack().checkpoint(); @@ -229,11 +190,41 @@ constexpr auto final_suspend2(Context &context, frame_t *parent) noexce return std::noop_coroutine(); } +template +[[nodiscard]] +constexpr auto final_suspend_leading(frame_t *frame) noexcept -> coro<> { + + LF_ASSUME(frame); + LF_ASSUME(frame->steals == 0); + LF_ASSUME(frame->joins == k_u16_max); + LF_ASSUME(frame->exception_bit == 0); + + category const kind = frame->kind; + + frame_t *parent = not_null(frame->parent); + + frame->handle().destroy(); + + if (kind == category::call) { + return parent->handle(); + } + + LF_ASSUME(kind == category::fork); + + Context &context = get_tls_context(); + + if (steal_handle last_pushed = context.pop()) { + LF_ASSUME(last_pushed == steal_handle{key(), parent}); + return parent->handle(); + } + + return final_suspend_trailing(context, parent); +} struct final_awaitable : std::suspend_always { template constexpr static auto await_suspend(coro> handle) noexcept -> coro<> { - return final_suspend(&handle.promise().frame); + return final_suspend_leading(&handle.promise().frame); } }; @@ -440,7 +431,7 @@ struct join_awaitable { std::ignore = extract_exception(self.frame); } } - return final_suspend(self.frame); + return final_suspend_leading(self.frame); } [[noreturn]] From acc9a7a4f44f7c3a3180e68b73136296db0adafd Mon Sep 17 00:00:00 2001 From: Conor Date: Wed, 15 Apr 2026 17:11:42 +0100 Subject: [PATCH 041/123] set cancellation in await suspend --- src/core/promise.cxx | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 07aeb222..05fe5116 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -290,12 +290,6 @@ struct awaitable : std::suspend_always { // Propagate parent->child relationships self.child->parent = &parent.promise().frame; - if constexpr (!Cancel) { - // If not explicitly bound to a cancel source then - // we propagate cancellation parent -> child - self.child->cancel = parent.promise().frame.cancel; - } - if constexpr (Cat == category::call) { // Should be the default LF_ASSUME(self.child->kind == category::call); @@ -485,7 +479,8 @@ struct mixin_frame { // --- Await transformations template - static constexpr auto await_transform_pkg(pkg &&pkg) noexcept( + constexpr auto + await_transform_pkg(this auto &self, pkg &&pkg) noexcept( async_nothrow_invocable) -> awaitable { // Required for noexcept specifier to be correct @@ -518,6 +513,8 @@ struct mixin_frame { if constexpr (Cancel) { child_promise->frame.cancel = not_null(pkg.maybe_cancel.ptr); + } else { + child_promise->frame.cancel = self.frame.cancel; } return {.child = &child_promise->frame}; From 20141eea3922a749206f284eea0adbdb5bc6c47b Mon Sep 17 00:00:00 2001 From: Conor Date: Wed, 15 Apr 2026 17:15:00 +0100 Subject: [PATCH 042/123] unconditional cancel test --- src/core/promise.cxx | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 05fe5116..c904ae63 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -276,15 +276,8 @@ struct awaitable : std::suspend_always { } // Noop if canceled, must clean-up the child that will never be resumed. - if constexpr (Cancel) { - // - if (self.child->is_cancelled()) [[unlikely]] { - return self.child->handle().destroy(), parent; - } - } else { - if (parent.promise().frame.is_cancelled()) [[unlikely]] { - return self.child->handle().destroy(), parent; - } + if (self.child->is_cancelled()) [[unlikely]] { + return self.child->handle().destroy(), parent; } // Propagate parent->child relationships From fcc1924d356d367343389877ccc361a813541305 Mon Sep 17 00:00:00 2001 From: Conor Date: Wed, 15 Apr 2026 17:19:07 +0100 Subject: [PATCH 043/123] bad --- src/core/promise.cxx | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index c904ae63..582b04f1 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -246,7 +246,7 @@ constexpr void stash_current_exception(frame_type *frame) noexcept { } } -template +template struct awaitable : std::suspend_always { static_assert(Cat == category::call || Cat == category::fork, "Invalid category for awaitable"); @@ -280,16 +280,6 @@ struct awaitable : std::suspend_always { return self.child->handle().destroy(), parent; } - // Propagate parent->child relationships - self.child->parent = &parent.promise().frame; - - if constexpr (Cat == category::call) { - // Should be the default - LF_ASSUME(self.child->kind == category::call); - } else { - self.child->kind = Cat; - } - if constexpr (Cat == category::fork) { // It is critical to pass self by-value here, after the call to push() // the object `*this` may be destroyed, if passing by ref it would be @@ -474,7 +464,7 @@ struct mixin_frame { template constexpr auto await_transform_pkg(this auto &self, pkg &&pkg) noexcept( - async_nothrow_invocable) -> awaitable { + async_nothrow_invocable) -> awaitable { // Required for noexcept specifier to be correct static_assert(std::is_reference_v && (... && std::is_reference_v)); @@ -510,12 +500,21 @@ struct mixin_frame { child_promise->frame.cancel = self.frame.cancel; } + // Propagate parent->child relationships + child_promise->frame.parent = &self.frame; + + if constexpr (Cat == category::call) { + // Should be the default + } else { + child_promise->frame.kind = Cat; + } + return {.child = &child_promise->frame}; } template constexpr auto await_transform(this auto &self, pkg &&pkg) noexcept - -> awaitable { + -> awaitable { LF_TRY { return self.await_transform_pkg(std::move(pkg)); } LF_CATCH_ALL { From 2054432ba975e4baaea2db525b68fb011369050c Mon Sep 17 00:00:00 2001 From: Conor Date: Wed, 15 Apr 2026 17:19:09 +0100 Subject: [PATCH 044/123] Revert "bad" This reverts commit fcc1924d356d367343389877ccc361a813541305. --- src/core/promise.cxx | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 582b04f1..c904ae63 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -246,7 +246,7 @@ constexpr void stash_current_exception(frame_type *frame) noexcept { } } -template +template struct awaitable : std::suspend_always { static_assert(Cat == category::call || Cat == category::fork, "Invalid category for awaitable"); @@ -280,6 +280,16 @@ struct awaitable : std::suspend_always { return self.child->handle().destroy(), parent; } + // Propagate parent->child relationships + self.child->parent = &parent.promise().frame; + + if constexpr (Cat == category::call) { + // Should be the default + LF_ASSUME(self.child->kind == category::call); + } else { + self.child->kind = Cat; + } + if constexpr (Cat == category::fork) { // It is critical to pass self by-value here, after the call to push() // the object `*this` may be destroyed, if passing by ref it would be @@ -464,7 +474,7 @@ struct mixin_frame { template constexpr auto await_transform_pkg(this auto &self, pkg &&pkg) noexcept( - async_nothrow_invocable) -> awaitable { + async_nothrow_invocable) -> awaitable { // Required for noexcept specifier to be correct static_assert(std::is_reference_v && (... && std::is_reference_v)); @@ -500,21 +510,12 @@ struct mixin_frame { child_promise->frame.cancel = self.frame.cancel; } - // Propagate parent->child relationships - child_promise->frame.parent = &self.frame; - - if constexpr (Cat == category::call) { - // Should be the default - } else { - child_promise->frame.kind = Cat; - } - return {.child = &child_promise->frame}; } template constexpr auto await_transform(this auto &self, pkg &&pkg) noexcept - -> awaitable { + -> awaitable { LF_TRY { return self.await_transform_pkg(std::move(pkg)); } LF_CATCH_ALL { From 2e56de7d07b16c228346fc1b47e75c97014cb44f Mon Sep 17 00:00:00 2001 From: Conor Date: Wed, 15 Apr 2026 17:22:02 +0100 Subject: [PATCH 045/123] Revert "unconditional cancel test" This reverts commit 20141eea3922a749206f284eea0adbdb5bc6c47b. --- src/core/promise.cxx | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index c904ae63..05fe5116 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -276,8 +276,15 @@ struct awaitable : std::suspend_always { } // Noop if canceled, must clean-up the child that will never be resumed. - if (self.child->is_cancelled()) [[unlikely]] { - return self.child->handle().destroy(), parent; + if constexpr (Cancel) { + // + if (self.child->is_cancelled()) [[unlikely]] { + return self.child->handle().destroy(), parent; + } + } else { + if (parent.promise().frame.is_cancelled()) [[unlikely]] { + return self.child->handle().destroy(), parent; + } } // Propagate parent->child relationships From a5e8f28d319e0e4a338d193f03c2c2b6c8da2781 Mon Sep 17 00:00:00 2001 From: Conor Date: Wed, 15 Apr 2026 17:27:32 +0100 Subject: [PATCH 046/123] rm comment --- src/core/promise.cxx | 1 - 1 file changed, 1 deletion(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 05fe5116..17a53b05 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -277,7 +277,6 @@ struct awaitable : std::suspend_always { // Noop if canceled, must clean-up the child that will never be resumed. if constexpr (Cancel) { - // if (self.child->is_cancelled()) [[unlikely]] { return self.child->handle().destroy(), parent; } From 6183c6eec7caa8db8707d83f94958d489a509764 Mon Sep 17 00:00:00 2001 From: Conor Date: Wed, 15 Apr 2026 17:34:49 +0100 Subject: [PATCH 047/123] constify --- src/core/promise.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 17a53b05..c2130b4c 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -479,7 +479,7 @@ struct mixin_frame { template constexpr auto - await_transform_pkg(this auto &self, pkg &&pkg) noexcept( + await_transform_pkg(this auto const &self, pkg &&pkg) noexcept( async_nothrow_invocable) -> awaitable { // Required for noexcept specifier to be correct From 774ed440500fc8af2df170284935d040735626ea Mon Sep 17 00:00:00 2001 From: Conor Date: Wed, 15 Apr 2026 17:43:32 +0100 Subject: [PATCH 048/123] add todo --- src/core/promise.cxx | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index c2130b4c..9b03942d 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -270,6 +270,8 @@ struct awaitable : std::suspend_always { // TODO: Add tests for exception/cancellation handling in fork/call. + // TODO: test of having a dedicated is_cancelld awaitable is quicker + if (!self.child) [[unlikely]] { // Noop if an exception was thrown. return parent; From 20c26f02d40edbce5007510113db352d735cf824 Mon Sep 17 00:00:00 2001 From: Conor Date: Wed, 15 Apr 2026 17:44:42 +0100 Subject: [PATCH 049/123] add todo --- src/core/promise.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 9b03942d..e911d6d3 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -502,7 +502,7 @@ struct mixin_frame { // void can signal drop return. static_assert(std::same_as || std::is_void_v); - // TODO: tests for null path + // TODO: tests for null return path if constexpr (!std::is_void_v) { child_promise->return_address = not_null(pkg.maybe_ret_adr.ptr); From 30db539cfece1e9a0506dff40268f687ff64b288 Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 10 Apr 2026 13:02:00 +0100 Subject: [PATCH 050/123] cancel test TMP: cancel branch tests format add joins fuzz more fuzz format tigher structure for agents drop all ref to lints fix conditional --- src/core/frame.cxx | 9 +- test/src/cancel.cpp | 410 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 418 insertions(+), 1 deletion(-) create mode 100644 test/src/cancel.cpp diff --git a/src/core/frame.cxx b/src/core/frame.cxx index ddff6627..c4ff3714 100644 --- a/src/core/frame.cxx +++ b/src/core/frame.cxx @@ -10,9 +10,16 @@ import libfork.utils; namespace lf { // =================== Cancellation =================== // -struct cancellation { +export struct cancellation { cancellation *parent = nullptr; std::atomic stop = 0; + + constexpr void request_stop() noexcept { stop.store(1, std::memory_order_release); } + + [[nodiscard]] + constexpr auto stop_requested() const noexcept -> bool { + return stop.load(std::memory_order_acquire) != 0; + } }; // =================== Frame =================== // diff --git a/test/src/cancel.cpp b/test/src/cancel.cpp new file mode 100644 index 00000000..f52e03a6 --- /dev/null +++ b/test/src/cancel.cpp @@ -0,0 +1,410 @@ +#include +#include + +#include "libfork/__impl/exception.hpp" + +import std; + +import libfork; + +// ============================================================ +// Helpers +// ============================================================ + +namespace { + +using lf::cancellation; +using lf::env; +using lf::task; + +// ---- context aliases ---- + +using mono_inline_ctx = lf::mono_context, lf::adapt_vector>; +using poly_inline_ctx = lf::derived_poly_context, lf::adapt_vector>; +using mono_busy_pool = lf::mono_busy_pool>; +using poly_busy_pool = lf::poly_busy_pool>; + +// ---- leaf tasks ---- + +template +auto noop_task(env) -> task { + co_return; +} + +template +auto counting_task(env, std::atomic *counter) -> task { + counter->fetch_add(1, std::memory_order_relaxed); + co_return; +} + +template +auto returning_task(env) -> task { + co_return 99; +} + +// Signals cancellation, then returns. +template +auto signal_cancel_task(env, cancellation *tok) -> task { + tok->request_stop(); + co_return; +} + +// ============================================================ +// Tasks for "pre-cancel: child task runs, grandchild is skipped" +// +// outer_with_cancel [cancel=nullptr] +// → scope::call(&tok, inner_with_cancel) +// inner_with_cancel [cancel=&tok] +// → scope::call(counting_task) ← await_transform checks inner's cancel +// → skipped when tok is stopped +// ============================================================ + +template +auto inner_with_cancel(env, std::atomic *grandchild_ran) -> task { + using S = lf::scope; + // Inherited cancel is checked here. If our cancel chain is stopped, + // counting_task is never created. + co_await S::call(counting_task, grandchild_ran); + co_await lf::join(); + co_return; +} + +template +auto outer_with_cancel(env, cancellation *tok, std::atomic *grandchild_ran) + -> task { + using S = lf::scope; + // Root's cancel=nullptr → await_transform succeeds, creates inner_with_cancel + // and binds it to tok. + co_await S::call(tok, inner_with_cancel, grandchild_ran); + co_await lf::join(); + co_return; +} + +// ============================================================ +// Tasks for "fork: child signals cancel, post-join code unreachable" +// +// fork_outer [cancel=nullptr] +// → scope::call(&tok, fork_signal_join) +// fork_signal_join [cancel=&tok] +// → scope::fork(signal_cancel_task) [cancel inherited=&tok] +// → co_await join() +// join: steals=0, is_cancelled()=true +// await_ready → false +// await_suspend: steals=0 special-case (bug-fix path) +// → handle_cancel → final_suspend cascade +// [post-join code unreachable] +// ============================================================ + +template +auto fork_signal_join(env, cancellation *tok, std::atomic *post_join_ran) + -> task { + using S = lf::scope; + + // Fork a child that sets tok.stop = 1. tok is not yet stopped, so the + // child is created (parent's await_transform sees is_cancelled()=false). + co_await S::fork(signal_cancel_task, tok); + + // Join: with the inline scheduler there are no steals (steals==0). + // tok is now stopped, so is_cancelled() returns true. + // await_ready returns false → await_suspend is called. + // The steals==0 special-case in await_suspend handles this correctly. + co_await lf::join(); + + // Should NOT be reached — cancellation cascades before here. + post_join_ran->fetch_add(1, std::memory_order_relaxed); + co_return; +} + +template +auto fork_outer(env, cancellation *tok, std::atomic *post_join_ran) -> task { + using S = lf::scope; + co_await S::call(tok, fork_signal_join, tok, post_join_ran); + co_await lf::join(); + co_return; +} + +// ============================================================ +// Tasks for "second fork skipped when token already stopped" +// +// fork_two_outer [cancel=nullptr] +// → scope::call(&tok, fork_two_children) +// fork_two_children [cancel=&tok] +// → fork(signal_cancel_task) ← sets tok, completes inline +// → fork(counting_task) ← await_transform: is_cancelled()=true → skipped +// → join() +// ============================================================ + +template +auto fork_two_children(env, cancellation *tok, std::atomic *second_ran) -> task { + using S = lf::scope; + + co_await S::fork(signal_cancel_task, tok); + // After inline child completes tok is stopped. + // This task's cancel = tok → next await_transform checks is_cancelled() → true → skipped. + co_await S::fork(counting_task, second_ran); + co_await lf::join(); + co_return; +} + +template +auto fork_two_outer(env, cancellation *tok, std::atomic *second_ran) -> task { + using S = lf::scope; + co_await S::call(tok, fork_two_children, tok, second_ran); + co_await lf::join(); + co_return; +} + +// ============================================================ +// Tasks for "return value is default-initialised when cancelled" +// +// The inner task cancels via a fork, then cascades before co_return 99. +// The outer task writes the (unset) return value to val, which stays 0. +// ============================================================ + +template +auto inner_returning(env, cancellation *tok) -> task { + using S = lf::scope; + co_await S::fork(signal_cancel_task, tok); + co_await lf::join(); // cascade happens here + co_return 99; // unreachable +} + +template +auto outer_returning(env, cancellation *tok) -> task { + using S = lf::scope; + int val = 0; + // Call inner with tok in its cancel chain; write result to val. + co_await S::call(tok, &val, inner_returning, tok); + co_await lf::join(); + co_return val; +} + +#if LF_COMPILER_EXCEPTIONS + +// ============================================================ +// Tasks for "exception in forked child that also signals cancel" +// +// The child throws AND signals cancellation. Because the parent frame is +// cancelled, handle_cancel() discards the stashed exception (std::ignore = +// extract_exception). The receiver must complete without throwing. +// ============================================================ + +template +auto throw_and_cancel(env, cancellation *tok) -> task { + tok->request_stop(); + LF_THROW(std::runtime_error{"intentional"}); + co_return; +} + +template +auto fork_throw_and_cancel(env, cancellation *tok) -> task { + using S = lf::scope; + co_await S::fork(throw_and_cancel, tok); + co_await lf::join(); + co_return; +} + +template +auto fork_throw_outer(env, cancellation *tok) -> task { + using S = lf::scope; + co_await S::call(tok, fork_throw_and_cancel, tok); + co_await lf::join(); + co_return; +} + +#endif // LF_COMPILER_EXCEPTIONS + +// ============================================================ +// Task for "call-based cancel: pre-stopped token, child skips its own work" +// ============================================================ + +template +auto call_pre_cancel_root(env, cancellation *tok, std::atomic *ran) -> task { + using S = lf::scope; + // tok is already stopped before this call. + // Root's cancel=nullptr → await_transform creates inner_with_cancel. + // inner_with_cancel inherits tok → its own await_transform skips counting_task. + co_await S::call(tok, inner_with_cancel, ran); + co_await lf::join(); + co_return; +} + +// ============================================================ +// Generic test runner +// ============================================================ + +template +void run_cancel_tests(Sch &scheduler) { + + using Ctx = lf::context_t; + + // ---------------------------------------------------------------- + // 1. No cancellation: normal execution still works + // ---------------------------------------------------------------- + SECTION("no cancel: noop task completes") { + auto recv = lf::schedule(scheduler, noop_task); + REQUIRE(recv.valid()); + std::move(recv).get(); + } + + SECTION("no cancel: value task returns correct value") { + auto recv = lf::schedule(scheduler, returning_task); + REQUIRE(recv.valid()); + REQUIRE(std::move(recv).get() == 99); + } + + // ---------------------------------------------------------------- + // 2. Pre-cancelled token: child runs but its own sub-children are skipped + // + // The cancel token is passed ONLY to the inner child (scope::call(&tok, + // inner_with_cancel)). The root's cancel chain is nullptr, so the root's + // await_transform succeeds. Inside inner_with_cancel, is_cancelled() + // returns true (its cancel=&tok, tok stopped), so counting_task is skipped. + // ---------------------------------------------------------------- + SECTION("pre-cancel: grandchild is skipped") { + cancellation tok; + tok.request_stop(); + std::atomic grandchild{0}; + + auto recv = lf::schedule(scheduler, outer_with_cancel, &tok, &grandchild); + REQUIRE(recv.valid()); + std::move(recv).get(); + REQUIRE(grandchild.load() == 0); + } + + // ---------------------------------------------------------------- + // 3. Token signalled from within a FORKED child, then join (steals=0 path) + // + // This exercises: + // (a) fork: child inline-completes before parent reaches join + // (b) join_awaitable::await_ready → false (steals=0, is_cancelled) + // (c) join_awaitable::await_suspend steals=0 special-case (bug fix) + // (d) handle_cancel → final_suspend cascade + // + // post_join_ran must stay 0: code after co_await join() is unreachable. + // ---------------------------------------------------------------- + SECTION("fork-cancel: post-join code unreachable after cancel (steals=0 path)") { + cancellation tok; + std::atomic post_join_ran{0}; + + auto recv = lf::schedule(scheduler, fork_outer, &tok, &post_join_ran); + REQUIRE(recv.valid()); + std::move(recv).get(); + REQUIRE(post_join_ran.load() == 0); + } + + // ---------------------------------------------------------------- + // 4. Second fork is skipped when token is already stopped at await_transform + // + // fork_two_children: first fork signals tok, second fork is skipped + // because the parent's await_transform checks is_cancelled() → true. + // ---------------------------------------------------------------- + SECTION("fork-cancel: second fork skipped when token already stopped") { + cancellation tok; + std::atomic second_ran{0}; + + auto recv = lf::schedule(scheduler, fork_two_outer, &tok, &second_ran); + REQUIRE(recv.valid()); + std::move(recv).get(); + REQUIRE(second_ran.load() == 0); + } + + // ---------------------------------------------------------------- + // 5. Return value is default-initialised when task is cancelled before + // co_return + // + // inner_returning cascades at the join (via fork+signal); outer_returning + // writes the (never-set) return value, which stays 0. + // ---------------------------------------------------------------- + SECTION("cancel before return: receiver holds default-initialised value") { + cancellation tok; + + auto recv = lf::schedule(scheduler, outer_returning, &tok); + REQUIRE(recv.valid()); + REQUIRE(std::move(recv).get() == 0); + } + +#if LF_COMPILER_EXCEPTIONS + // ---------------------------------------------------------------- + // 6. Exception in forked child that also signals cancel + // + // The child throws AND signals tok. When the parent's join cascades via + // handle_cancel(), it discards the stashed exception (std::ignore = + // extract_exception). The receiver must complete without throwing. + // ---------------------------------------------------------------- + SECTION("cancel cleans up stashed exception: receiver does not throw") { + cancellation tok; + + auto recv = lf::schedule(scheduler, fork_throw_outer, &tok); + REQUIRE(recv.valid()); + REQUIRE_NOTHROW(std::move(recv).get()); + } +#endif // LF_COMPILER_EXCEPTIONS +} + +} // namespace + +// ============================================================ +// Token unit tests (no scheduler required) +// ============================================================ + +TEST_CASE("cancellation token: initial state is not stopped", "[cancel]") { + cancellation tok; + REQUIRE_FALSE(tok.stop_requested()); +} + +TEST_CASE("cancellation token: request_stop sets stopped", "[cancel]") { + cancellation tok; + tok.request_stop(); + REQUIRE(tok.stop_requested()); +} + +TEST_CASE("cancellation token: request_stop is idempotent", "[cancel]") { + cancellation tok; + tok.request_stop(); + tok.request_stop(); + REQUIRE(tok.stop_requested()); +} + +TEST_CASE("cancellation token: chain — neither stopped", "[cancel]") { + cancellation parent; + cancellation child{.parent = &parent}; + REQUIRE_FALSE(parent.stop_requested()); + REQUIRE_FALSE(child.stop_requested()); +} + +TEST_CASE("cancellation token: chain — stopping child does not affect parent", "[cancel]") { + cancellation parent; + cancellation child{.parent = &parent}; + child.request_stop(); + REQUIRE(child.stop_requested()); + REQUIRE_FALSE(parent.stop_requested()); // parent unaffected +} + +TEST_CASE("cancellation token: chain — stopping parent does not affect child's own flag", "[cancel]") { + cancellation parent; + cancellation child{.parent = &parent}; + parent.request_stop(); + // stop_requested() only checks this node's flag: + REQUIRE(parent.stop_requested()); + REQUIRE_FALSE(child.stop_requested()); + // But is_cancelled() (on a frame that holds a chain through child → parent) + // would return true. That logic is tested indirectly via the scheduler tests. +} + +// ============================================================ +// Schedule-based tests +// ============================================================ + +TEMPLATE_TEST_CASE("Inline cancellation", "[cancel]", mono_inline_ctx, poly_inline_ctx) { + lf::inline_scheduler scheduler; + run_cancel_tests(scheduler); +} + +TEMPLATE_TEST_CASE("Busy-pool cancellation", "[cancel]", mono_busy_pool, poly_busy_pool) { + STATIC_REQUIRE(lf::scheduler); + for (std::size_t thr = 1; thr < 4; ++thr) { + TestType pool{thr}; + run_cancel_tests(pool); + } +} From 5d21d850dfde488049c231911973f57ec0bc9c2f Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 17 Apr 2026 13:31:31 +0100 Subject: [PATCH 051/123] join cancel fix --- src/core/promise.cxx | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index e911d6d3..d70d7fe2 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -386,6 +386,14 @@ struct join_awaitable { LF_ASSUME(self.frame); + // Special case: steals==0 means await_ready returned false only because + // is_cancelled() is true. We are the exclusive owner of the frame and stack; + // take_stack_and_reset() would falsely assert we don't own the stack. + if (self.frame->steals == 0) [[unlikely]] { + self.frame->reset_counters(); + return self.handle_cancel(); + } + std::uint32_t steals = self.frame->steals; std::uint32_t offset = k_u16_max - steals; std::uint32_t joined = self.frame->atomic_joins().fetch_sub(offset, std::memory_order_release); From 1a9dafd84378ba73e670062b7c953a5a665000fe Mon Sep 17 00:00:00 2001 From: Conor Date: Thu, 16 Apr 2026 17:07:30 +0100 Subject: [PATCH 052/123] cancel fuzz --- test/src/cancel_fuzz.cpp | 417 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 417 insertions(+) create mode 100644 test/src/cancel_fuzz.cpp diff --git a/test/src/cancel_fuzz.cpp b/test/src/cancel_fuzz.cpp new file mode 100644 index 00000000..c9d28dfa --- /dev/null +++ b/test/src/cancel_fuzz.cpp @@ -0,0 +1,417 @@ +#include +#include + +#include "libfork/__impl/exception.hpp" + +import std; + +import libfork; + +// ============================================================ +// Task-tree specification +// ============================================================ + +namespace { + +// Describes a single node in a randomly generated task tree. +// +// cancel_mode — how the PARENT creates this node: +// Inherit : scope::call/fork(fn) — child shares parent's cancel token +// New : scope::call/fork(&tok, fn) — child gets a fresh isolated token +// +// child_kind — how THIS node creates ITS children: +// Call : sequential scope::call, no explicit join +// Fork : parallel scope::fork + co_await join() +// +// signal_cancel — stop my own cancel token BEFORE spawning children. +// +// is_leaf() — true when children is empty. Leaf tasks increment the counter. +// +// Internal Fork nodes also increment the counter AFTER co_await join() (only +// if the join did not cascade due to cancellation). This lets the fuzz test +// verify cascade behaviour independently of leaf counts. + +struct NodeSpec { + enum class CancelMode : std::uint8_t { Inherit, New }; + enum class ChildKind : std::uint8_t { Call, Fork }; + + CancelMode cancel_mode = CancelMode::Inherit; + ChildKind child_kind = ChildKind::Call; + bool signal_cancel = false; + std::vector children; + + [[nodiscard]] + bool is_leaf() const noexcept { + return children.empty(); + } +}; + +// ============================================================ +// Random tree generation +// ============================================================ + +NodeSpec gen_node(std::mt19937 &rng, int depth, int max_depth) { + NodeSpec n; + + std::bernoulli_distribution coin{0.5}; + std::bernoulli_distribution rare{0.2}; + std::bernoulli_distribution very_rare{0.08}; + + // Leaves: at max depth or by chance + if (depth >= max_depth || rare(rng)) { + return n; // leaf (no children set) + } + + n.cancel_mode = coin(rng) ? NodeSpec::CancelMode::Inherit : NodeSpec::CancelMode::New; + n.child_kind = coin(rng) ? NodeSpec::ChildKind::Fork : NodeSpec::ChildKind::Call; + n.signal_cancel = very_rare(rng); + + std::uniform_int_distribution nc_dist{1, 4}; + int nc = nc_dist(rng); + n.children.reserve(static_cast(nc)); + for (int i = 0; i < nc; ++i) { + n.children.push_back(gen_node(rng, depth + 1, max_depth)); + } + return n; +} + +// ============================================================ +// Reference simulation (inline-scheduler semantics) +// +// simulate() returns the expected counter value contributed by this subtree. +// +// Precondition: the parent's token was NOT stopped when we were created +// (the caller already verified this with its own stopped[] check). +// +// stopped[id] tracks whether token `id` has been stop-requested. +// next_id is the allocator for fresh token ids. +// +// Token semantics: +// Inherit → child shares my_tok (same id) +// New → child gets a new id, initially false, with no link to my_tok +// +// inline-scheduler execution order (sequential, depth-first): +// For Fork mode: each child runs inline before parent resumes. +// After child i completes, stopped[my_tok] is re-checked before child i+1 +// (this is the await_transform check in the parent). +// For Call mode: identical ordering, no explicit join. +// Fork cascade: if stopped[my_tok] is true after all children, the +// post-join counter increment is suppressed. +// ============================================================ + +int simulate(const NodeSpec &spec, int my_tok, std::vector &stopped, int &next_id) { + + if (spec.signal_cancel) { + stopped[static_cast(my_tok)] = true; + } + + if (spec.is_leaf()) { + return 1; // leaf increments counter + } + + int total = 0; + bool is_fork = (spec.child_kind == NodeSpec::ChildKind::Fork); + + for (const auto &child : spec.children) { + // await_transform in parent: if my token stopped, skip this child + if (stopped[static_cast(my_tok)]) { + break; + } + + // Allocate the child's token + int child_tok; + if (child.cancel_mode == NodeSpec::CancelMode::Inherit) { + child_tok = my_tok; // shared + } else { + child_tok = next_id++; + stopped.push_back(false); + } + + total += simulate(child, child_tok, stopped, next_id); + // After child returns, stopped[my_tok] might be true if child (or a + // descendant) stopped an inherited token. The next loop iteration's + // stopped[] check handles this correctly. + } + + if (is_fork) { + // Post-join: runs only if join did NOT cascade (my_tok not stopped) + if (!stopped[static_cast(my_tok)]) { + ++total; + } + } + // Call mode: no post-join observable (call always returns to parent) + + return total; +} + +int expected_count(const NodeSpec &root) { + // The root always gets a fresh token (provided by fuzz_root below) + std::vector stopped{false}; // id 0 = root token, initially not stopped + int next_id = 1; + return simulate(root, 0, stopped, next_id); +} + +// ============================================================ +// Upper bound for busy-pool execution +// +// With a thread pool, a steal can cause the parent to fork sibling B before +// sibling A has had a chance to run and signal the shared cancel token. So +// the actual counter may be *higher* than the sequential simulation. +// +// The upper bound is the count produced when no cancellation takes effect at +// all: every leaf runs and every Fork-internal node reaches its post-join +// increment. It equals leaves(tree) + fork_internals(tree). +// +// Why this is always ≥ concurrent: +// Cancellation can only suppress tasks (lower the counter), never add them. +// +// Why sequential (min) ≤ concurrent: +// In sequential, task T is skipped only when a prior sibling already +// stopped the shared token before T's await_transform check. With a busy +// pool the parent can be stolen and fork T *before* that sibling runs, so T +// is created. Concurrent execution is therefore a superset of sequential. +// +// The post-join increment is DETERMINISTIC regardless of scheduling: +// The join winner's memory_order_acquire fence synchronises with all +// children's request_stop() (memory_order_release), so the join always +// sees every signal that originated inside the fork region. If any child +// (in any scheduling order) signalled the token, the join cascades. +// This means concurrent ≥ sequential may be entirely due to extra leaves. +// ============================================================ + +int max_count(const NodeSpec &node) { + if (node.is_leaf()) { + return 1; // leaf always increments + } + int total = 0; + for (const auto &child : node.children) { + total += max_count(child); + } + if (node.child_kind == NodeSpec::ChildKind::Fork) { + ++total; // post-join always reached when no cancellation + } + return total; +} + +// ============================================================ +// libfork execution +// ============================================================ + +using lf::cancellation; +using lf::env; +using lf::task; + +// Forward declaration: execute_node is mutually recursive with itself. +template +auto execute_node(env, const NodeSpec *, cancellation *, std::atomic *) -> task; + +// Root wrapper: schedule() always creates the root task with cancel=nullptr. +// We give the root node its own fresh token so the fuzz spec can signal it. +template +auto fuzz_root(env, const NodeSpec *spec, std::atomic *counter) -> task { + cancellation root_tok; + using S = lf::scope; + co_await S::call(&root_tok, execute_node, spec, &root_tok, counter); + co_return; +} + +// Interprets a NodeSpec subtree as libfork coroutines. +// +// my_tok — the cancel token this task was bound to (either inherited from parent +// or a fresh token created by the parent for this node). This matches +// self.frame.cancel for the Inherit case, and the explicit token for +// the New case. +// +// Counter semantics (must match reference simulation): +// Leaf → counter++ unconditionally +// Internal Fork, no cascade → counter++ after co_await join() +// Internal Fork, cascade → post-join code not reached (frame destroyed) +// Internal Call → no counter increment (pure structure) +template +auto execute_node(env, const NodeSpec *spec, cancellation *my_tok, std::atomic *counter) + -> task { + using S = lf::scope; + + // Signal before children (matches reference: stopped[my_tok]=true before loop) + if (spec->signal_cancel) { + my_tok->request_stop(); + } + + if (spec->is_leaf()) { + counter->fetch_add(1, std::memory_order_relaxed); + co_return; + } + + // Heap-allocate tokens for New-mode children. Lifetime: this coroutine + // frame, which outlives all children (they complete before or at the join). + auto child_toks = std::make_unique(spec->children.size()); + + if (spec->child_kind == NodeSpec::ChildKind::Fork) { + + for (std::size_t i = 0; i < spec->children.size(); ++i) { + const auto &ch = spec->children[i]; + // await_transform checks self.frame.is_cancelled() before creating child. + // If my_tok is stopped the awaitable returns {.child=nullptr} and the + // co_await resumes the parent immediately (same as a no-op). The loop + // continues but all remaining co_awaits also short-circuit. + if (ch.cancel_mode == NodeSpec::CancelMode::Inherit) { + // Child inherits my_tok: no explicit cancel arg + co_await S::fork(execute_node, &ch, my_tok, counter); + } else { + // Child gets fresh isolated token + co_await S::fork(&child_toks[i], execute_node, &ch, &child_toks[i], counter); + } + } + + co_await lf::join(); + + // Post-join: counter increment only if we reach here (join did not cascade) + counter->fetch_add(1, std::memory_order_relaxed); + + } else { // Call mode + + for (std::size_t i = 0; i < spec->children.size(); ++i) { + const auto &ch = spec->children[i]; + if (ch.cancel_mode == NodeSpec::CancelMode::Inherit) { + co_await S::call(execute_node, &ch, my_tok, counter); + } else { + co_await S::call(&child_toks[i], execute_node, &ch, &child_toks[i], counter); + } + } + // No post-join for Call (no explicit join point, call always returns) + } + + co_return; +} + +// ============================================================ +// Fuzz runners +// ============================================================ + +// Exact check — correct for the inline scheduler (deterministic, no stealing). +template +void run_fuzz_exact(Sch &scheduler, std::mt19937 &rng, int n_trees, int max_depth) { + using Ctx = lf::context_t; + + for (int t = 0; t < n_trees; ++t) { + NodeSpec root = gen_node(rng, 0, max_depth); + int expected = expected_count(root); + + std::atomic counter{0}; + auto recv = lf::schedule(scheduler, fuzz_root, &root, &counter); + REQUIRE(recv.valid()); + std::move(recv).get(); + + int actual = counter.load(); + if (actual != expected) { + FAIL("exact mismatch: expected " << expected << " got " << actual << " (depth " << max_depth + << ", iter " << t << ")"); + } + } +} + +// Range check — correct for the busy pool (concurrent, non-deterministic cancel races). +// +// Invariant: expected_count(root) ≤ actual ≤ max_count(root) +// +// Lower bound (expected_count): sequential simulation, signals maximally observed. +// Upper bound (max_count): no cancellation at all, every task runs. +// +// Post-join increments are fully deterministic due to the acquire fence in the +// join winner: if any child signalled the token the post-join is suppressed in +// EVERY scheduling, not just sequential. The non-determinism is confined to +// whether sibling forks were created before a prior sibling's signal propagated. +template +void run_fuzz_range(Sch &scheduler, std::mt19937 &rng, int n_trees, int max_depth) { + using Ctx = lf::context_t; + + for (int t = 0; t < n_trees; ++t) { + NodeSpec root = gen_node(rng, 0, max_depth); + int lo = expected_count(root); + int hi = max_count(root); + + std::atomic counter{0}; + auto recv = lf::schedule(scheduler, fuzz_root, &root, &counter); + REQUIRE(recv.valid()); + std::move(recv).get(); + + int actual = counter.load(); + if (actual < lo || actual > hi) { + FAIL("range violation: " << lo << " ≤ actual ≤ " << hi << " but got " << actual << " (depth " + << max_depth << ", iter " << t << ")"); + } + } +} + +} // namespace + +// ============================================================ +// Test cases +// ============================================================ + +using mono_inline_ctx = lf::mono_context, lf::adapt_vector>; +using poly_inline_ctx = lf::derived_poly_context, lf::adapt_vector>; +using mono_busy_pool = lf::mono_busy_pool>; +using poly_busy_pool = lf::poly_busy_pool>; + +// Inline: deterministic execution, exact expected-count check. +TEMPLATE_TEST_CASE("Cancellation fuzz: random task trees (inline)", "[cancel][fuzz]", mono_inline_ctx, + poly_inline_ctx) { + + lf::inline_scheduler scheduler; + + SECTION("fixed seed, shallow trees (reproducible)") { + std::mt19937 rng{0xDEAD'BEEF}; + run_fuzz_exact(scheduler, rng, 2000, 4); + } + + SECTION("random seed, deeper trees") { + std::mt19937 rng{std::random_device{}()}; + run_fuzz_exact(scheduler, rng, 500, 6); + } +} + +// Busy pool: concurrent execution, range check [min, max]. +// +// min = sequential simulation (signals observed maximally = fewest tasks run) +// max = no-cancel simulation (signals ignored = most tasks run) +// +// Invariant proof sketch: +// actual ≤ max: cancellation suppresses tasks; removing signals can only add. +// actual ≥ min: stealing lets the parent fork sibling B before sibling A +// runs and signals; concurrent execution is a superset of sequential. +// Post-join determinism: the join winner's acquire fence synchronises with +// every child's request_stop() release; the join always sees all signals, +// so the post-join increment is suppressed in every scheduling if any +// child signalled — not just in the sequential case. +TEMPLATE_TEST_CASE("Cancellation fuzz: random task trees (busy pool)", "[cancel][fuzz]", mono_busy_pool, + poly_busy_pool) { + + STATIC_REQUIRE(lf::scheduler); + + SECTION("fixed seed, 1 thread (sequential, exact check degenerates to range check)") { + TestType pool{1}; + std::mt19937 rng{0xDEAD'BEEF}; + run_fuzz_range(pool, rng, 500, 4); + } + + SECTION("fixed seed, 2 threads") { + TestType pool{2}; + std::mt19937 rng{0xCAFE'BABE}; + run_fuzz_range(pool, rng, 300, 4); + } + + SECTION("fixed seed, 4 threads") { + TestType pool{4}; + std::mt19937 rng{0xDEAD'C0DE}; + run_fuzz_range(pool, rng, 300, 4); + } + + SECTION("random seed, variable threads") { + std::mt19937 rng{std::random_device{}()}; + for (std::size_t thr = 1; thr <= 4; ++thr) { + TestType pool{thr}; + run_fuzz_range(pool, rng, 100, 4); + } + } +} From dd5f712dfe9355c84fb4dbf9d475ca03d44a9ee6 Mon Sep 17 00:00:00 2001 From: Conor Date: Thu, 16 Apr 2026 17:07:12 +0100 Subject: [PATCH 053/123] fix conditional --- src/core/ops.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/ops.cxx b/src/core/ops.cxx index e74392b4..f5ee7d6c 100644 --- a/src/core/ops.cxx +++ b/src/core/ops.cxx @@ -24,7 +24,7 @@ struct maybe_ptr {}; template struct [[nodiscard("You should immediately co_await this!")]] pkg { - [[no_unique_address]] maybe_ptr<0, std::conditional> maybe_cancel; + [[no_unique_address]] maybe_ptr<0, std::conditional_t> maybe_cancel; [[no_unique_address]] maybe_ptr<1, R> maybe_ret_adr; [[no_unique_address]] Fn fn; [[no_unique_address]] tuple args; From 936d2c5e7f868f8849c789da01aadd62b0423e48 Mon Sep 17 00:00:00 2001 From: Conor Date: Thu, 16 Apr 2026 17:06:44 +0100 Subject: [PATCH 054/123] agents tweaks --- AGENTS.md | 40 +--------------------------------------- 1 file changed, 1 insertion(+), 39 deletions(-) diff --git a/AGENTS.md b/AGENTS.md index 4cbaa5de..d25a557c 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -103,34 +103,6 @@ All tests should pass. If tests fail, check that: - Build completed without errors - Any changes you have made are correct -## Linting & Validation - -The CI runs two linting tools that you should run before committing: - -### codespell (spelling) - -```bash -codespell -``` - -Config: `.codespellrc` (ignores: build/, .git/, etc.) -Should produce no output if passing. - -### clang-format (code formatting) - -```bash -find src include test benchmark/src -name "*.cpp" -o -name "*.hpp" -o -name "*.cxx" | xargs clang-format --dry-run --Werror -``` - -Config: `.clang-format` (110 column limit, specific style) -Should produce no output if passing. - -**To auto-fix formatting**: - -```bash -find src include test benchmark/src -name "*.cpp" -o -name "*.hpp" -o -name "*.cxx" | xargs clang-format -i -``` - ## Project Structure ### Source Layout @@ -150,10 +122,9 @@ libfork/ │ ├── batteries/ # libfork.batteries — stacks, contexts, adaptors │ │ ├── batteries.cxx # aggregator │ │ └── *.cxx # :partitions -│ ├── schedulers/ # libfork.schedulers — concrete schedulers +│ └── schedulers/ # libfork.schedulers — concrete schedulers │ │ ├── schedulers.cxx # aggregator │ │ └── *.cxx # :partitions -│ └── exception.cpp # terminate_with() implementation ├── test/src/**/ # Test suite (Catch2) — uses `import libfork;` │ └── *.cpp ├── benchmark/src/ # Benchmarking suite (google-benchmark) @@ -189,7 +160,6 @@ All workflows follow this pattern: 1. **Modify source files** in `src/`, `include/`, `test/`, or `benchmark/` 2. **Rebuild**: `cmake --build --preset ` 3. **Test**: `ctest --preset ` -4. **Lint**: Run codespell and clang-format checks #### Adding/removing files from `src/` or `include/` @@ -230,11 +200,3 @@ rm -rf build/ **Problem**: "Could not automatically find libc++.modules.json" **Solution**: Ensure LLVM is installed via Homebrew; toolchain auto-detects the path - -### Linting Failures - -**Problem**: clang-format errors -**Solution**: Run fix command above to auto-format code - -**Problem**: codespell errors -**Solution**: Fix typos or add to ignore list in `.codespellrc` if false positive From 84e7d5ed492bfe04bafb9dd725de19168c9e2d35 Mon Sep 17 00:00:00 2001 From: Conor Date: Thu, 16 Apr 2026 17:07:33 +0100 Subject: [PATCH 055/123] Revert "cancel fuzz" This reverts commit 7de5615aa73c26cf33f2e064e7501718fb1f1104. --- test/src/cancel_fuzz.cpp | 417 --------------------------------------- 1 file changed, 417 deletions(-) delete mode 100644 test/src/cancel_fuzz.cpp diff --git a/test/src/cancel_fuzz.cpp b/test/src/cancel_fuzz.cpp deleted file mode 100644 index c9d28dfa..00000000 --- a/test/src/cancel_fuzz.cpp +++ /dev/null @@ -1,417 +0,0 @@ -#include -#include - -#include "libfork/__impl/exception.hpp" - -import std; - -import libfork; - -// ============================================================ -// Task-tree specification -// ============================================================ - -namespace { - -// Describes a single node in a randomly generated task tree. -// -// cancel_mode — how the PARENT creates this node: -// Inherit : scope::call/fork(fn) — child shares parent's cancel token -// New : scope::call/fork(&tok, fn) — child gets a fresh isolated token -// -// child_kind — how THIS node creates ITS children: -// Call : sequential scope::call, no explicit join -// Fork : parallel scope::fork + co_await join() -// -// signal_cancel — stop my own cancel token BEFORE spawning children. -// -// is_leaf() — true when children is empty. Leaf tasks increment the counter. -// -// Internal Fork nodes also increment the counter AFTER co_await join() (only -// if the join did not cascade due to cancellation). This lets the fuzz test -// verify cascade behaviour independently of leaf counts. - -struct NodeSpec { - enum class CancelMode : std::uint8_t { Inherit, New }; - enum class ChildKind : std::uint8_t { Call, Fork }; - - CancelMode cancel_mode = CancelMode::Inherit; - ChildKind child_kind = ChildKind::Call; - bool signal_cancel = false; - std::vector children; - - [[nodiscard]] - bool is_leaf() const noexcept { - return children.empty(); - } -}; - -// ============================================================ -// Random tree generation -// ============================================================ - -NodeSpec gen_node(std::mt19937 &rng, int depth, int max_depth) { - NodeSpec n; - - std::bernoulli_distribution coin{0.5}; - std::bernoulli_distribution rare{0.2}; - std::bernoulli_distribution very_rare{0.08}; - - // Leaves: at max depth or by chance - if (depth >= max_depth || rare(rng)) { - return n; // leaf (no children set) - } - - n.cancel_mode = coin(rng) ? NodeSpec::CancelMode::Inherit : NodeSpec::CancelMode::New; - n.child_kind = coin(rng) ? NodeSpec::ChildKind::Fork : NodeSpec::ChildKind::Call; - n.signal_cancel = very_rare(rng); - - std::uniform_int_distribution nc_dist{1, 4}; - int nc = nc_dist(rng); - n.children.reserve(static_cast(nc)); - for (int i = 0; i < nc; ++i) { - n.children.push_back(gen_node(rng, depth + 1, max_depth)); - } - return n; -} - -// ============================================================ -// Reference simulation (inline-scheduler semantics) -// -// simulate() returns the expected counter value contributed by this subtree. -// -// Precondition: the parent's token was NOT stopped when we were created -// (the caller already verified this with its own stopped[] check). -// -// stopped[id] tracks whether token `id` has been stop-requested. -// next_id is the allocator for fresh token ids. -// -// Token semantics: -// Inherit → child shares my_tok (same id) -// New → child gets a new id, initially false, with no link to my_tok -// -// inline-scheduler execution order (sequential, depth-first): -// For Fork mode: each child runs inline before parent resumes. -// After child i completes, stopped[my_tok] is re-checked before child i+1 -// (this is the await_transform check in the parent). -// For Call mode: identical ordering, no explicit join. -// Fork cascade: if stopped[my_tok] is true after all children, the -// post-join counter increment is suppressed. -// ============================================================ - -int simulate(const NodeSpec &spec, int my_tok, std::vector &stopped, int &next_id) { - - if (spec.signal_cancel) { - stopped[static_cast(my_tok)] = true; - } - - if (spec.is_leaf()) { - return 1; // leaf increments counter - } - - int total = 0; - bool is_fork = (spec.child_kind == NodeSpec::ChildKind::Fork); - - for (const auto &child : spec.children) { - // await_transform in parent: if my token stopped, skip this child - if (stopped[static_cast(my_tok)]) { - break; - } - - // Allocate the child's token - int child_tok; - if (child.cancel_mode == NodeSpec::CancelMode::Inherit) { - child_tok = my_tok; // shared - } else { - child_tok = next_id++; - stopped.push_back(false); - } - - total += simulate(child, child_tok, stopped, next_id); - // After child returns, stopped[my_tok] might be true if child (or a - // descendant) stopped an inherited token. The next loop iteration's - // stopped[] check handles this correctly. - } - - if (is_fork) { - // Post-join: runs only if join did NOT cascade (my_tok not stopped) - if (!stopped[static_cast(my_tok)]) { - ++total; - } - } - // Call mode: no post-join observable (call always returns to parent) - - return total; -} - -int expected_count(const NodeSpec &root) { - // The root always gets a fresh token (provided by fuzz_root below) - std::vector stopped{false}; // id 0 = root token, initially not stopped - int next_id = 1; - return simulate(root, 0, stopped, next_id); -} - -// ============================================================ -// Upper bound for busy-pool execution -// -// With a thread pool, a steal can cause the parent to fork sibling B before -// sibling A has had a chance to run and signal the shared cancel token. So -// the actual counter may be *higher* than the sequential simulation. -// -// The upper bound is the count produced when no cancellation takes effect at -// all: every leaf runs and every Fork-internal node reaches its post-join -// increment. It equals leaves(tree) + fork_internals(tree). -// -// Why this is always ≥ concurrent: -// Cancellation can only suppress tasks (lower the counter), never add them. -// -// Why sequential (min) ≤ concurrent: -// In sequential, task T is skipped only when a prior sibling already -// stopped the shared token before T's await_transform check. With a busy -// pool the parent can be stolen and fork T *before* that sibling runs, so T -// is created. Concurrent execution is therefore a superset of sequential. -// -// The post-join increment is DETERMINISTIC regardless of scheduling: -// The join winner's memory_order_acquire fence synchronises with all -// children's request_stop() (memory_order_release), so the join always -// sees every signal that originated inside the fork region. If any child -// (in any scheduling order) signalled the token, the join cascades. -// This means concurrent ≥ sequential may be entirely due to extra leaves. -// ============================================================ - -int max_count(const NodeSpec &node) { - if (node.is_leaf()) { - return 1; // leaf always increments - } - int total = 0; - for (const auto &child : node.children) { - total += max_count(child); - } - if (node.child_kind == NodeSpec::ChildKind::Fork) { - ++total; // post-join always reached when no cancellation - } - return total; -} - -// ============================================================ -// libfork execution -// ============================================================ - -using lf::cancellation; -using lf::env; -using lf::task; - -// Forward declaration: execute_node is mutually recursive with itself. -template -auto execute_node(env, const NodeSpec *, cancellation *, std::atomic *) -> task; - -// Root wrapper: schedule() always creates the root task with cancel=nullptr. -// We give the root node its own fresh token so the fuzz spec can signal it. -template -auto fuzz_root(env, const NodeSpec *spec, std::atomic *counter) -> task { - cancellation root_tok; - using S = lf::scope; - co_await S::call(&root_tok, execute_node, spec, &root_tok, counter); - co_return; -} - -// Interprets a NodeSpec subtree as libfork coroutines. -// -// my_tok — the cancel token this task was bound to (either inherited from parent -// or a fresh token created by the parent for this node). This matches -// self.frame.cancel for the Inherit case, and the explicit token for -// the New case. -// -// Counter semantics (must match reference simulation): -// Leaf → counter++ unconditionally -// Internal Fork, no cascade → counter++ after co_await join() -// Internal Fork, cascade → post-join code not reached (frame destroyed) -// Internal Call → no counter increment (pure structure) -template -auto execute_node(env, const NodeSpec *spec, cancellation *my_tok, std::atomic *counter) - -> task { - using S = lf::scope; - - // Signal before children (matches reference: stopped[my_tok]=true before loop) - if (spec->signal_cancel) { - my_tok->request_stop(); - } - - if (spec->is_leaf()) { - counter->fetch_add(1, std::memory_order_relaxed); - co_return; - } - - // Heap-allocate tokens for New-mode children. Lifetime: this coroutine - // frame, which outlives all children (they complete before or at the join). - auto child_toks = std::make_unique(spec->children.size()); - - if (spec->child_kind == NodeSpec::ChildKind::Fork) { - - for (std::size_t i = 0; i < spec->children.size(); ++i) { - const auto &ch = spec->children[i]; - // await_transform checks self.frame.is_cancelled() before creating child. - // If my_tok is stopped the awaitable returns {.child=nullptr} and the - // co_await resumes the parent immediately (same as a no-op). The loop - // continues but all remaining co_awaits also short-circuit. - if (ch.cancel_mode == NodeSpec::CancelMode::Inherit) { - // Child inherits my_tok: no explicit cancel arg - co_await S::fork(execute_node, &ch, my_tok, counter); - } else { - // Child gets fresh isolated token - co_await S::fork(&child_toks[i], execute_node, &ch, &child_toks[i], counter); - } - } - - co_await lf::join(); - - // Post-join: counter increment only if we reach here (join did not cascade) - counter->fetch_add(1, std::memory_order_relaxed); - - } else { // Call mode - - for (std::size_t i = 0; i < spec->children.size(); ++i) { - const auto &ch = spec->children[i]; - if (ch.cancel_mode == NodeSpec::CancelMode::Inherit) { - co_await S::call(execute_node, &ch, my_tok, counter); - } else { - co_await S::call(&child_toks[i], execute_node, &ch, &child_toks[i], counter); - } - } - // No post-join for Call (no explicit join point, call always returns) - } - - co_return; -} - -// ============================================================ -// Fuzz runners -// ============================================================ - -// Exact check — correct for the inline scheduler (deterministic, no stealing). -template -void run_fuzz_exact(Sch &scheduler, std::mt19937 &rng, int n_trees, int max_depth) { - using Ctx = lf::context_t; - - for (int t = 0; t < n_trees; ++t) { - NodeSpec root = gen_node(rng, 0, max_depth); - int expected = expected_count(root); - - std::atomic counter{0}; - auto recv = lf::schedule(scheduler, fuzz_root, &root, &counter); - REQUIRE(recv.valid()); - std::move(recv).get(); - - int actual = counter.load(); - if (actual != expected) { - FAIL("exact mismatch: expected " << expected << " got " << actual << " (depth " << max_depth - << ", iter " << t << ")"); - } - } -} - -// Range check — correct for the busy pool (concurrent, non-deterministic cancel races). -// -// Invariant: expected_count(root) ≤ actual ≤ max_count(root) -// -// Lower bound (expected_count): sequential simulation, signals maximally observed. -// Upper bound (max_count): no cancellation at all, every task runs. -// -// Post-join increments are fully deterministic due to the acquire fence in the -// join winner: if any child signalled the token the post-join is suppressed in -// EVERY scheduling, not just sequential. The non-determinism is confined to -// whether sibling forks were created before a prior sibling's signal propagated. -template -void run_fuzz_range(Sch &scheduler, std::mt19937 &rng, int n_trees, int max_depth) { - using Ctx = lf::context_t; - - for (int t = 0; t < n_trees; ++t) { - NodeSpec root = gen_node(rng, 0, max_depth); - int lo = expected_count(root); - int hi = max_count(root); - - std::atomic counter{0}; - auto recv = lf::schedule(scheduler, fuzz_root, &root, &counter); - REQUIRE(recv.valid()); - std::move(recv).get(); - - int actual = counter.load(); - if (actual < lo || actual > hi) { - FAIL("range violation: " << lo << " ≤ actual ≤ " << hi << " but got " << actual << " (depth " - << max_depth << ", iter " << t << ")"); - } - } -} - -} // namespace - -// ============================================================ -// Test cases -// ============================================================ - -using mono_inline_ctx = lf::mono_context, lf::adapt_vector>; -using poly_inline_ctx = lf::derived_poly_context, lf::adapt_vector>; -using mono_busy_pool = lf::mono_busy_pool>; -using poly_busy_pool = lf::poly_busy_pool>; - -// Inline: deterministic execution, exact expected-count check. -TEMPLATE_TEST_CASE("Cancellation fuzz: random task trees (inline)", "[cancel][fuzz]", mono_inline_ctx, - poly_inline_ctx) { - - lf::inline_scheduler scheduler; - - SECTION("fixed seed, shallow trees (reproducible)") { - std::mt19937 rng{0xDEAD'BEEF}; - run_fuzz_exact(scheduler, rng, 2000, 4); - } - - SECTION("random seed, deeper trees") { - std::mt19937 rng{std::random_device{}()}; - run_fuzz_exact(scheduler, rng, 500, 6); - } -} - -// Busy pool: concurrent execution, range check [min, max]. -// -// min = sequential simulation (signals observed maximally = fewest tasks run) -// max = no-cancel simulation (signals ignored = most tasks run) -// -// Invariant proof sketch: -// actual ≤ max: cancellation suppresses tasks; removing signals can only add. -// actual ≥ min: stealing lets the parent fork sibling B before sibling A -// runs and signals; concurrent execution is a superset of sequential. -// Post-join determinism: the join winner's acquire fence synchronises with -// every child's request_stop() release; the join always sees all signals, -// so the post-join increment is suppressed in every scheduling if any -// child signalled — not just in the sequential case. -TEMPLATE_TEST_CASE("Cancellation fuzz: random task trees (busy pool)", "[cancel][fuzz]", mono_busy_pool, - poly_busy_pool) { - - STATIC_REQUIRE(lf::scheduler); - - SECTION("fixed seed, 1 thread (sequential, exact check degenerates to range check)") { - TestType pool{1}; - std::mt19937 rng{0xDEAD'BEEF}; - run_fuzz_range(pool, rng, 500, 4); - } - - SECTION("fixed seed, 2 threads") { - TestType pool{2}; - std::mt19937 rng{0xCAFE'BABE}; - run_fuzz_range(pool, rng, 300, 4); - } - - SECTION("fixed seed, 4 threads") { - TestType pool{4}; - std::mt19937 rng{0xDEAD'C0DE}; - run_fuzz_range(pool, rng, 300, 4); - } - - SECTION("random seed, variable threads") { - std::mt19937 rng{std::random_device{}()}; - for (std::size_t thr = 1; thr <= 4; ++thr) { - TestType pool{thr}; - run_fuzz_range(pool, rng, 100, 4); - } - } -} From 19dbf7acab55425bdc71827bde7a8723476a12f8 Mon Sep 17 00:00:00 2001 From: Conor Date: Thu, 16 Apr 2026 17:12:54 +0100 Subject: [PATCH 056/123] todo --- src/core/promise.cxx | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index d70d7fe2..e1bbcd7d 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -386,6 +386,8 @@ struct join_awaitable { LF_ASSUME(self.frame); + // TODO: fuse and simplify this path + // Special case: steals==0 means await_ready returned false only because // is_cancelled() is true. We are the exclusive owner of the frame and stack; // take_stack_and_reset() would falsely assert we don't own the stack. From 696dcfb0cd9834d1f2af68c1ec02a993a32b6292 Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 17 Apr 2026 09:10:46 +0100 Subject: [PATCH 057/123] first pass --- benchmark/CMakeLists.txt | 5 +---- benchmark/src/libfork_benchmark/uts/uts.hpp | 2 +- external/uts/CMakeLists.txt | 13 +++++++++++++ .../uts/external => external/uts}/rng/brg_endian.h | 0 .../uts/external => external/uts}/rng/brg_sha1.c | 0 .../uts/external => external/uts}/rng/brg_sha1.h | 0 .../uts/external => external/uts}/rng/brg_types.h | 0 .../uts/external => external/uts}/rng/rng.h | 0 .../uts/external => external/uts}/uts.c | 0 .../uts/external => external/uts}/uts.h | 0 10 files changed, 15 insertions(+), 5 deletions(-) create mode 100644 external/uts/CMakeLists.txt rename {benchmark/src/libfork_benchmark/uts/external => external/uts}/rng/brg_endian.h (100%) rename {benchmark/src/libfork_benchmark/uts/external => external/uts}/rng/brg_sha1.c (100%) rename {benchmark/src/libfork_benchmark/uts/external => external/uts}/rng/brg_sha1.h (100%) rename {benchmark/src/libfork_benchmark/uts/external => external/uts}/rng/brg_types.h (100%) rename {benchmark/src/libfork_benchmark/uts/external => external/uts}/rng/rng.h (100%) rename {benchmark/src/libfork_benchmark/uts/external => external/uts}/uts.c (100%) rename {benchmark/src/libfork_benchmark/uts/external => external/uts}/uts.h (100%) diff --git a/benchmark/CMakeLists.txt b/benchmark/CMakeLists.txt index 95d495b7..ba77ec53 100644 --- a/benchmark/CMakeLists.txt +++ b/benchmark/CMakeLists.txt @@ -36,10 +36,7 @@ target_sources(libfork_benchmark # C lib for UTS -add_library(uts_c OBJECT - src/libfork_benchmark/uts/external/uts.c - src/libfork_benchmark/uts/external/rng/brg_sha1.c -) +add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/../external/uts external/uts) target_link_libraries(libfork_benchmark PRIVATE uts_c) diff --git a/benchmark/src/libfork_benchmark/uts/uts.hpp b/benchmark/src/libfork_benchmark/uts/uts.hpp index a54039bb..eb858200 100644 --- a/benchmark/src/libfork_benchmark/uts/uts.hpp +++ b/benchmark/src/libfork_benchmark/uts/uts.hpp @@ -2,7 +2,7 @@ // Include the C UTS library header first (it defines max/min macros that would // clash with std::max/std::min after import std). -#include "libfork_benchmark/uts/external/uts.h" +#include "uts.h" #undef max #undef min diff --git a/external/uts/CMakeLists.txt b/external/uts/CMakeLists.txt new file mode 100644 index 00000000..a7b73a8d --- /dev/null +++ b/external/uts/CMakeLists.txt @@ -0,0 +1,13 @@ +cmake_minimum_required(VERSION 3.28 FATAL_ERROR) + +project(uts_external LANGUAGES C) + +add_library(uts_c OBJECT + uts.c + rng/brg_sha1.c +) + +target_include_directories(uts_c PUBLIC + $ + $ +) diff --git a/benchmark/src/libfork_benchmark/uts/external/rng/brg_endian.h b/external/uts/rng/brg_endian.h similarity index 100% rename from benchmark/src/libfork_benchmark/uts/external/rng/brg_endian.h rename to external/uts/rng/brg_endian.h diff --git a/benchmark/src/libfork_benchmark/uts/external/rng/brg_sha1.c b/external/uts/rng/brg_sha1.c similarity index 100% rename from benchmark/src/libfork_benchmark/uts/external/rng/brg_sha1.c rename to external/uts/rng/brg_sha1.c diff --git a/benchmark/src/libfork_benchmark/uts/external/rng/brg_sha1.h b/external/uts/rng/brg_sha1.h similarity index 100% rename from benchmark/src/libfork_benchmark/uts/external/rng/brg_sha1.h rename to external/uts/rng/brg_sha1.h diff --git a/benchmark/src/libfork_benchmark/uts/external/rng/brg_types.h b/external/uts/rng/brg_types.h similarity index 100% rename from benchmark/src/libfork_benchmark/uts/external/rng/brg_types.h rename to external/uts/rng/brg_types.h diff --git a/benchmark/src/libfork_benchmark/uts/external/rng/rng.h b/external/uts/rng/rng.h similarity index 100% rename from benchmark/src/libfork_benchmark/uts/external/rng/rng.h rename to external/uts/rng/rng.h diff --git a/benchmark/src/libfork_benchmark/uts/external/uts.c b/external/uts/uts.c similarity index 100% rename from benchmark/src/libfork_benchmark/uts/external/uts.c rename to external/uts/uts.c diff --git a/benchmark/src/libfork_benchmark/uts/external/uts.h b/external/uts/uts.h similarity index 100% rename from benchmark/src/libfork_benchmark/uts/external/uts.h rename to external/uts/uts.h From 5eeb0f2a660d61d35f6d7b2e90f6ab739c652c6d Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 17 Apr 2026 09:33:33 +0100 Subject: [PATCH 058/123] second pass --- benchmark/src/libfork_benchmark/uts/uts.hpp | 2 +- external/uts/CMakeLists.txt | 24 ++++++++++++------- external/uts/{ => include/uts}/rng/brg_sha1.h | 0 .../uts/{ => include/uts}/rng/brg_types.h | 0 external/uts/{ => include/uts}/rng/rng.h | 0 external/uts/{ => include/uts}/uts.h | 0 external/uts/{ => src}/rng/brg_endian.h | 0 external/uts/{ => src}/rng/brg_sha1.c | 0 external/uts/{ => src}/uts.c | 2 +- 9 files changed, 18 insertions(+), 10 deletions(-) rename external/uts/{ => include/uts}/rng/brg_sha1.h (100%) rename external/uts/{ => include/uts}/rng/brg_types.h (100%) rename external/uts/{ => include/uts}/rng/rng.h (100%) rename external/uts/{ => include/uts}/uts.h (100%) rename external/uts/{ => src}/rng/brg_endian.h (100%) rename external/uts/{ => src}/rng/brg_sha1.c (100%) rename external/uts/{ => src}/uts.c (99%) diff --git a/benchmark/src/libfork_benchmark/uts/uts.hpp b/benchmark/src/libfork_benchmark/uts/uts.hpp index eb858200..1e5e1359 100644 --- a/benchmark/src/libfork_benchmark/uts/uts.hpp +++ b/benchmark/src/libfork_benchmark/uts/uts.hpp @@ -2,7 +2,7 @@ // Include the C UTS library header first (it defines max/min macros that would // clash with std::max/std::min after import std). -#include "uts.h" +#include "uts/uts.h" #undef max #undef min diff --git a/external/uts/CMakeLists.txt b/external/uts/CMakeLists.txt index a7b73a8d..4c6bd707 100644 --- a/external/uts/CMakeLists.txt +++ b/external/uts/CMakeLists.txt @@ -1,13 +1,21 @@ -cmake_minimum_required(VERSION 3.28 FATAL_ERROR) +cmake_minimum_required(VERSION 4.2.1 FATAL_ERROR) project(uts_external LANGUAGES C) -add_library(uts_c OBJECT - uts.c - rng/brg_sha1.c -) +add_library(uts_c OBJECT) -target_include_directories(uts_c PUBLIC - $ - $ +target_sources(uts_c + PRIVATE + src/uts.c + src/rng/brg_sha1.c + PUBLIC + FILE_SET HEADERS + BASE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/include + FILES + include/uts/uts.h + include/uts/rng/rng.h + include/uts/rng/brg_sha1.h + include/uts/rng/brg_types.h ) + +target_include_directories(uts_c PRIVATE include) diff --git a/external/uts/rng/brg_sha1.h b/external/uts/include/uts/rng/brg_sha1.h similarity index 100% rename from external/uts/rng/brg_sha1.h rename to external/uts/include/uts/rng/brg_sha1.h diff --git a/external/uts/rng/brg_types.h b/external/uts/include/uts/rng/brg_types.h similarity index 100% rename from external/uts/rng/brg_types.h rename to external/uts/include/uts/rng/brg_types.h diff --git a/external/uts/rng/rng.h b/external/uts/include/uts/rng/rng.h similarity index 100% rename from external/uts/rng/rng.h rename to external/uts/include/uts/rng/rng.h diff --git a/external/uts/uts.h b/external/uts/include/uts/uts.h similarity index 100% rename from external/uts/uts.h rename to external/uts/include/uts/uts.h diff --git a/external/uts/rng/brg_endian.h b/external/uts/src/rng/brg_endian.h similarity index 100% rename from external/uts/rng/brg_endian.h rename to external/uts/src/rng/brg_endian.h diff --git a/external/uts/rng/brg_sha1.c b/external/uts/src/rng/brg_sha1.c similarity index 100% rename from external/uts/rng/brg_sha1.c rename to external/uts/src/rng/brg_sha1.c diff --git a/external/uts/uts.c b/external/uts/src/uts.c similarity index 99% rename from external/uts/uts.c rename to external/uts/src/uts.c index 7a110fcf..507915be 100644 --- a/external/uts/uts.c +++ b/external/uts/src/uts.c @@ -19,7 +19,7 @@ #include #include -#include "uts.h" +#include "uts/uts.h" /*********************************************************** * tree generation and search parameters * From 6d97e26b14a28dd1352727a38c46a5afaefa8977 Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 17 Apr 2026 09:42:30 +0100 Subject: [PATCH 059/123] touchup inculdes --- external/uts/CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/external/uts/CMakeLists.txt b/external/uts/CMakeLists.txt index 4c6bd707..19451830 100644 --- a/external/uts/CMakeLists.txt +++ b/external/uts/CMakeLists.txt @@ -2,7 +2,7 @@ cmake_minimum_required(VERSION 4.2.1 FATAL_ERROR) project(uts_external LANGUAGES C) -add_library(uts_c OBJECT) +add_library(uts_c) target_sources(uts_c PRIVATE @@ -18,4 +18,4 @@ target_sources(uts_c include/uts/rng/brg_types.h ) -target_include_directories(uts_c PRIVATE include) +target_include_directories(uts_c PRIVATE include include/uts/rng) From c983c1d778cd5ad0449052dd548402b6fc3e6d8c Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 17 Apr 2026 09:44:59 +0100 Subject: [PATCH 060/123] drop C from bench --- benchmark/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmark/CMakeLists.txt b/benchmark/CMakeLists.txt index ba77ec53..d9b62030 100644 --- a/benchmark/CMakeLists.txt +++ b/benchmark/CMakeLists.txt @@ -1,6 +1,6 @@ cmake_minimum_required(VERSION 4.2.1 FATAL_ERROR) -project(libfork_benchmark LANGUAGES C CXX) +project(libfork_benchmark LANGUAGES CXX) if(NOT CMAKE_BUILD_TYPE STREQUAL "Release") message(WARNING "It is recommended to build benchmarks in Release mode for accurate results.") From 9a115c3e0cb757b1fe3fe58ceb435340e6626d18 Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 17 Apr 2026 09:52:13 +0100 Subject: [PATCH 061/123] no fixed import --- external/uts/CMakeLists.txt | 2 -- external/uts/src/rng/brg_sha1.c | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/external/uts/CMakeLists.txt b/external/uts/CMakeLists.txt index 19451830..89c8b5e6 100644 --- a/external/uts/CMakeLists.txt +++ b/external/uts/CMakeLists.txt @@ -17,5 +17,3 @@ target_sources(uts_c include/uts/rng/brg_sha1.h include/uts/rng/brg_types.h ) - -target_include_directories(uts_c PRIVATE include include/uts/rng) diff --git a/external/uts/src/rng/brg_sha1.c b/external/uts/src/rng/brg_sha1.c index 8c032f8b..f6757baf 100644 --- a/external/uts/src/rng/brg_sha1.c +++ b/external/uts/src/rng/brg_sha1.c @@ -37,7 +37,7 @@ #include /* for memcpy() etc. */ #include "brg_endian.h" -#include "brg_sha1.h" +#include "uts/rng/brg_sha1.h" #if defined(__cplusplus) extern "C" { From ef30fb2b7858b608d4e2b9acdf381a4ae7b05d5f Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 17 Apr 2026 09:53:07 +0100 Subject: [PATCH 062/123] all use full imports --- external/uts/include/uts/rng/brg_sha1.h | 2 +- external/uts/include/uts/rng/rng.h | 2 +- external/uts/include/uts/uts.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/external/uts/include/uts/rng/brg_sha1.h b/external/uts/include/uts/rng/brg_sha1.h index b01a9f5d..d30f12c0 100644 --- a/external/uts/include/uts/rng/brg_sha1.h +++ b/external/uts/include/uts/rng/brg_sha1.h @@ -33,7 +33,7 @@ #ifndef _SHA1_H #define _SHA1_H -#include "brg_types.h" +#include "uts/rng/brg_types.h" #define SHA1_BLOCK_SIZE 64 #define SHA1_DIGEST_SIZE 20 diff --git a/external/uts/include/uts/rng/rng.h b/external/uts/include/uts/rng/rng.h index 30d99d81..105c4046 100644 --- a/external/uts/include/uts/rng/rng.h +++ b/external/uts/include/uts/rng/rng.h @@ -1,6 +1,6 @@ #ifndef _RNG_H #define _RNG_H -#include "brg_sha1.h" +#include "uts/rng/brg_sha1.h" #endif /* _RNG_H */ \ No newline at end of file diff --git a/external/uts/include/uts/uts.h b/external/uts/include/uts/uts.h index e7c8adee..e86e68f3 100644 --- a/external/uts/include/uts/uts.h +++ b/external/uts/include/uts/uts.h @@ -22,7 +22,7 @@ extern "C" { #endif - #include "rng/rng.h" + #include "uts/rng/rng.h" #define UTS_VERSION "2.1" From 6413dfc481d0de1773d658661a0ea5829b02253b Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 17 Apr 2026 09:59:28 +0100 Subject: [PATCH 063/123] tidy up cmakelists --- CMakeLists.txt | 2 +- benchmark/CMakeLists.txt | 11 +++-------- todo.md | 19 ------------------- 3 files changed, 4 insertions(+), 28 deletions(-) delete mode 100644 todo.md diff --git a/CMakeLists.txt b/CMakeLists.txt index 6edf2467..3dbbfade 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -31,7 +31,7 @@ set(CMAKE_CXX_MODULE_STD 1) add_library(libfork_libfork) add_library(libfork::libfork ALIAS libfork_libfork) -# target_link_libraries(libfork_libfork PRIVATE Threads::Threads) +target_link_libraries(libfork_libfork PUBLIC Threads::Threads) set_property(TARGET libfork_libfork PROPERTY EXPORT_NAME libfork) diff --git a/benchmark/CMakeLists.txt b/benchmark/CMakeLists.txt index d9b62030..e887edf4 100644 --- a/benchmark/CMakeLists.txt +++ b/benchmark/CMakeLists.txt @@ -17,8 +17,10 @@ target_link_libraries(libfork_benchmark benchmark::benchmark_main ) -# Common headers +# Common components target_sources(libfork_benchmark + PRIVATE + src/libfork_benchmark/uts/uts.cpp PRIVATE FILE_SET HEADERS FILES src/libfork_benchmark/common.hpp @@ -28,13 +30,6 @@ target_sources(libfork_benchmark src ) -# Common sources -target_sources(libfork_benchmark - PRIVATE - src/libfork_benchmark/uts/uts.cpp -) - - # C lib for UTS add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/../external/uts external/uts) diff --git a/todo.md b/todo.md deleted file mode 100644 index 1183e6d1..00000000 --- a/todo.md +++ /dev/null @@ -1,19 +0,0 @@ -# TODO - -- [x] Context tag in API -- [x] Context in invocability concepts - -- Integrate geometric allocator (that can throw) - - [x] Initial impl - - [ ] Test correct throwing spec - -- [ ] Optimize release/resume in presence of steals (need benchmark) - -- [ ] `-fassume-nothrow-exception-dtor` - -- [ ] Test nothrow allocator performance (just terminate?) - -- [ ] Cancellation: - - [ ] Maybe in separate `co_await scope` - - [ ] Integrate into join - - [ ] Exception safety From bc609d76c7563146c5521fa86dabb785c3d19615 Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 17 Apr 2026 10:40:49 +0100 Subject: [PATCH 064/123] alternative handling of cancel at join --- src/core/promise.cxx | 42 +++++++++++++++++++++++------------------- 1 file changed, 23 insertions(+), 19 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index e1bbcd7d..a635de12 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -343,22 +343,20 @@ struct join_awaitable { frame_t *frame; - constexpr auto take_stack_and_reset(this join_awaitable self) noexcept -> void { + constexpr auto take_stack(this join_awaitable self) noexcept -> void { stack_t &stack = get_tls_stack(); LF_ASSUME(self.frame->stack_ckpt != stack.checkpoint()); stack.acquire(std::as_const(self.frame->stack_ckpt)); - self.frame->reset_counters(); } constexpr auto await_ready(this join_awaitable self) noexcept -> bool { - if (not_null(self.frame)->steals == 0) [[likely]] { - // If no steals then we are the only owner of the parent and we are - // ready to join. Therefore, no need to reset the control block. if (self.frame->is_cancelled()) [[unlikely]] { // Must unconditionally suspended if canceled return false; } + // If no steals then we are the only owner of the parent and we are + // ready to join. Therefore, no need to reset the control block. return true; } return false; @@ -386,32 +384,38 @@ struct join_awaitable { LF_ASSUME(self.frame); - // TODO: fuse and simplify this path - - // Special case: steals==0 means await_ready returned false only because - // is_cancelled() is true. We are the exclusive owner of the frame and stack; - // take_stack_and_reset() would falsely assert we don't own the stack. - if (self.frame->steals == 0) [[unlikely]] { - self.frame->reset_counters(); - return self.handle_cancel(); - } - std::uint32_t steals = self.frame->steals; std::uint32_t offset = k_u16_max - steals; std::uint32_t joined = self.frame->atomic_joins().fetch_sub(offset, std::memory_order_release); + // If this was a cancel: + // + // steals = 0, joins = k_u16_max then: + // + // steals = 0 + // offset = k_u16_max + // joined = k_u16_max, (self.frame->joins is now 0) + // + // k_u16_max - joined = 0 = steals, hence win the if + if (steals == k_u16_max - joined) { // We set joins after all children had completed therefore we can resume the task. // Need to acquire to ensure we see all writes by other threads to the result. std::atomic_thread_fence(std::memory_order_acquire); - // We must reset the control block and take the stack. We should never - // own the stack at this point because we must have stolen the stack. - self.take_stack_and_reset(); - if (self.frame->is_cancelled()) [[unlikely]] { + // Only take the stack if there were steals + if (steals > 0) { + self.take_stack(); + } + self.frame->reset_counters(); return self.handle_cancel(); } + + // We must reset the control block and take the stack. We should never + // own the stack at this point because we must have stolen the stack. + self.take_stack(); + self.frame->reset_counters(); return task; } // Someone else is responsible for running this task. From 947e203c64dcd1f09f6e3e170d6d4c45479c63a0 Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 17 Apr 2026 10:48:10 +0100 Subject: [PATCH 065/123] further refine cancel path --- src/core/promise.cxx | 53 ++++++++++++++++++++++++-------------------- 1 file changed, 29 insertions(+), 24 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index a635de12..cbabfd24 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -343,12 +343,6 @@ struct join_awaitable { frame_t *frame; - constexpr auto take_stack(this join_awaitable self) noexcept -> void { - stack_t &stack = get_tls_stack(); - LF_ASSUME(self.frame->stack_ckpt != stack.checkpoint()); - stack.acquire(std::as_const(self.frame->stack_ckpt)); - } - constexpr auto await_ready(this join_awaitable self) noexcept -> bool { if (not_null(self.frame)->steals == 0) [[likely]] { if (self.frame->is_cancelled()) [[unlikely]] { @@ -404,11 +398,6 @@ struct join_awaitable { std::atomic_thread_fence(std::memory_order_acquire); if (self.frame->is_cancelled()) [[unlikely]] { - // Only take the stack if there were steals - if (steals > 0) { - self.take_stack(); - } - self.frame->reset_counters(); return self.handle_cancel(); } @@ -433,32 +422,48 @@ struct join_awaitable { return std::noop_coroutine(); } - [[nodiscard]] - constexpr auto handle_cancel(this join_awaitable self) -> coro<> { + constexpr void await_resume(this join_awaitable self) { + // We should have been reset + LF_ASSUME(self.frame->steals == 0); + LF_ASSUME(self.frame->joins == k_u16_max); + + // Outside parallel regions so can touch non-atomically. if constexpr (LF_COMPILER_EXCEPTIONS) { if (self.frame->exception_bit) [[unlikely]] { - std::ignore = extract_exception(self.frame); + self.rethrow_exception(); } } - return final_suspend_leading(self.frame); } - [[noreturn]] - constexpr void rethrow_exception(this join_awaitable self) { - std::rethrow_exception(extract_exception(self.frame)); + constexpr auto take_stack(this join_awaitable self) noexcept -> void { + stack_t &stack = get_tls_stack(); + LF_ASSUME(self.frame->stack_ckpt != stack.checkpoint()); + stack.acquire(std::as_const(self.frame->stack_ckpt)); } - constexpr void await_resume(this join_awaitable self) { - // We should have been reset - LF_ASSUME(self.frame->steals == 0); - LF_ASSUME(self.frame->joins == k_u16_max); + [[nodiscard]] + constexpr auto handle_cancel(this join_awaitable self) -> coro<> { + // Only need to take the stack if there were steals + if (self.frame->steals > 0) { + self.take_stack(); + } - // Outside parallel regions so can touch non-atomically. + // We always need to reset the connters as we modified + self.frame->reset_counters(); + + // Drop any exceptions in the now-cancelled task if constexpr (LF_COMPILER_EXCEPTIONS) { if (self.frame->exception_bit) [[unlikely]] { - self.rethrow_exception(); + std::ignore = extract_exception(self.frame); } } + + return final_suspend_leading(self.frame); + } + + [[noreturn]] + constexpr void rethrow_exception(this join_awaitable self) { + std::rethrow_exception(extract_exception(self.frame)); } }; From a9f1be566ae3f3aa6c120b5b8025ec34dae824cb Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 17 Apr 2026 10:50:56 +0100 Subject: [PATCH 066/123] tmp --- benchmark/src/libfork_benchmark/common.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmark/src/libfork_benchmark/common.hpp b/benchmark/src/libfork_benchmark/common.hpp index 1750cc22..a36f93d8 100644 --- a/benchmark/src/libfork_benchmark/common.hpp +++ b/benchmark/src/libfork_benchmark/common.hpp @@ -12,7 +12,7 @@ struct incorrect_result : public std::runtime_error { using std::runtime_error::runtime_error; }; -inline constexpr unsigned bench_max_threads = 12; +inline constexpr unsigned bench_max_threads = 24; #define CHECK_RESULT(result, expected) \ do { \ From 3f5c9134d0cabe040ba8cc4c0c10a782f71caae5 Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 17 Apr 2026 10:57:04 +0100 Subject: [PATCH 067/123] not all threads --- benchmark/src/libfork_benchmark/common.hpp | 10 +++++++++- benchmark/src/libfork_benchmark/fib/libfork.cpp | 4 ++-- benchmark/src/libfork_benchmark/uts/libfork.cpp | 4 ++-- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/benchmark/src/libfork_benchmark/common.hpp b/benchmark/src/libfork_benchmark/common.hpp index a36f93d8..03a7e554 100644 --- a/benchmark/src/libfork_benchmark/common.hpp +++ b/benchmark/src/libfork_benchmark/common.hpp @@ -12,7 +12,15 @@ struct incorrect_result : public std::runtime_error { using std::runtime_error::runtime_error; }; -inline constexpr unsigned bench_max_threads = 24; +inline void bench_thread_args(benchmark::internal::Benchmark *bench, auto make_args) { + unsigned hw = std::thread::hardware_concurrency(); + for (unsigned t : {1U, 2U, 4U, 6U, 8U, 12U, 16U, 24U, 32U, 48U, 64U, 96U}) { + if (t > hw) { + return; + } + make_args(bench, t); + } +} #define CHECK_RESULT(result, expected) \ do { \ diff --git a/benchmark/src/libfork_benchmark/fib/libfork.cpp b/benchmark/src/libfork_benchmark/fib/libfork.cpp index 7c318858..3eaac185 100644 --- a/benchmark/src/libfork_benchmark/fib/libfork.cpp +++ b/benchmark/src/libfork_benchmark/fib/libfork.cpp @@ -89,9 +89,9 @@ BENCH_ALL(inline_scheduler, adapt_deque>>) BENCHMARK_TEMPLATE(run, __VA_ARGS__) \ ->Name(#mode "/libfork/fib/" #__VA_ARGS__) \ ->Apply([](benchmark::Benchmark *b) -> void { \ - for (unsigned t = 1; t <= bench_max_threads; ++t) { \ + bench_thread_args(b, [](benchmark::Benchmark *b, unsigned t) { \ b->Args({fib_##mode, static_cast(t)}); \ - } \ + }); \ }) \ ->UseRealTime(); diff --git a/benchmark/src/libfork_benchmark/uts/libfork.cpp b/benchmark/src/libfork_benchmark/uts/libfork.cpp index af63a5db..59fd521b 100644 --- a/benchmark/src/libfork_benchmark/uts/libfork.cpp +++ b/benchmark/src/libfork_benchmark/uts/libfork.cpp @@ -90,9 +90,9 @@ void run(benchmark::State &state) { BENCHMARK_TEMPLATE(run, __VA_ARGS__) \ ->Name(#mode "/libfork/uts/" tree_name "/" #__VA_ARGS__) \ ->Apply([](benchmark::Benchmark *b) -> void { \ - for (unsigned t = 1; t <= bench_max_threads; ++t) { \ + bench_thread_args(b, [](benchmark::Benchmark *b, unsigned t) { \ b->Args({tree_id, static_cast(t)}); \ - } \ + }); \ }) \ ->UseRealTime(); From 0f86acc3d11864e63bc1eb51cbaa6816f37f19f5 Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 17 Apr 2026 10:58:17 +0100 Subject: [PATCH 068/123] don't use internal --- benchmark/src/libfork_benchmark/common.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmark/src/libfork_benchmark/common.hpp b/benchmark/src/libfork_benchmark/common.hpp index 03a7e554..0649b8cc 100644 --- a/benchmark/src/libfork_benchmark/common.hpp +++ b/benchmark/src/libfork_benchmark/common.hpp @@ -12,7 +12,7 @@ struct incorrect_result : public std::runtime_error { using std::runtime_error::runtime_error; }; -inline void bench_thread_args(benchmark::internal::Benchmark *bench, auto make_args) { +inline void bench_thread_args(benchmark::Benchmark *bench, auto make_args) { unsigned hw = std::thread::hardware_concurrency(); for (unsigned t : {1U, 2U, 4U, 6U, 8U, 12U, 16U, 24U, 32U, 48U, 64U, 96U}) { if (t > hw) { From 339c5f033561db337c2568a6da50748f270b3dbe Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 17 Apr 2026 11:03:55 +0100 Subject: [PATCH 069/123] complexity --- benchmark/src/libfork_benchmark/fib/libfork.cpp | 2 ++ benchmark/src/libfork_benchmark/uts/libfork.cpp | 2 ++ 2 files changed, 4 insertions(+) diff --git a/benchmark/src/libfork_benchmark/fib/libfork.cpp b/benchmark/src/libfork_benchmark/fib/libfork.cpp index 3eaac185..73fcf322 100644 --- a/benchmark/src/libfork_benchmark/fib/libfork.cpp +++ b/benchmark/src/libfork_benchmark/fib/libfork.cpp @@ -41,6 +41,7 @@ void run(benchmark::State &state) { state.counters["n"] = static_cast(n); state.counters["p"] = static_cast(thread_count(state)); + state.SetComplexityN(static_cast(thread_count(state))); Sch scheduler = make_scheduler(state); @@ -93,6 +94,7 @@ BENCH_ALL(inline_scheduler, adapt_deque>>) b->Args({fib_##mode, static_cast(t)}); \ }); \ }) \ + ->Complexity([](benchmark::IterationCount n) -> double { return 1.0 / static_cast(n); }) \ ->UseRealTime(); #define BENCH_ALL_MT(...) BENCH_ONE_MT(test, __VA_ARGS__) BENCH_ONE_MT(base, __VA_ARGS__) diff --git a/benchmark/src/libfork_benchmark/uts/libfork.cpp b/benchmark/src/libfork_benchmark/uts/libfork.cpp index 59fd521b..bf53bb3d 100644 --- a/benchmark/src/libfork_benchmark/uts/libfork.cpp +++ b/benchmark/src/libfork_benchmark/uts/libfork.cpp @@ -69,6 +69,7 @@ void run(benchmark::State &state) { auto expected = expected_result(tree); state.counters["p"] = static_cast(thread_count(state)); + state.SetComplexityN(static_cast(thread_count(state))); Sch scheduler = make_scheduler(state); @@ -94,6 +95,7 @@ void run(benchmark::State &state) { b->Args({tree_id, static_cast(t)}); \ }); \ }) \ + ->Complexity([](benchmark::IterationCount n) -> double { return 1.0 / static_cast(n); }) \ ->UseRealTime(); #define BENCH_MT(...) \ From 87c992be2f744de563ef619faa97a393769892c3 Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 17 Apr 2026 13:32:10 +0100 Subject: [PATCH 070/123] Revert "cancel test" This reverts commit 30db539cfece1e9a0506dff40268f687ff64b288. --- src/core/frame.cxx | 9 +- test/src/cancel.cpp | 410 -------------------------------------------- 2 files changed, 1 insertion(+), 418 deletions(-) delete mode 100644 test/src/cancel.cpp diff --git a/src/core/frame.cxx b/src/core/frame.cxx index c4ff3714..ddff6627 100644 --- a/src/core/frame.cxx +++ b/src/core/frame.cxx @@ -10,16 +10,9 @@ import libfork.utils; namespace lf { // =================== Cancellation =================== // -export struct cancellation { +struct cancellation { cancellation *parent = nullptr; std::atomic stop = 0; - - constexpr void request_stop() noexcept { stop.store(1, std::memory_order_release); } - - [[nodiscard]] - constexpr auto stop_requested() const noexcept -> bool { - return stop.load(std::memory_order_acquire) != 0; - } }; // =================== Frame =================== // diff --git a/test/src/cancel.cpp b/test/src/cancel.cpp deleted file mode 100644 index f52e03a6..00000000 --- a/test/src/cancel.cpp +++ /dev/null @@ -1,410 +0,0 @@ -#include -#include - -#include "libfork/__impl/exception.hpp" - -import std; - -import libfork; - -// ============================================================ -// Helpers -// ============================================================ - -namespace { - -using lf::cancellation; -using lf::env; -using lf::task; - -// ---- context aliases ---- - -using mono_inline_ctx = lf::mono_context, lf::adapt_vector>; -using poly_inline_ctx = lf::derived_poly_context, lf::adapt_vector>; -using mono_busy_pool = lf::mono_busy_pool>; -using poly_busy_pool = lf::poly_busy_pool>; - -// ---- leaf tasks ---- - -template -auto noop_task(env) -> task { - co_return; -} - -template -auto counting_task(env, std::atomic *counter) -> task { - counter->fetch_add(1, std::memory_order_relaxed); - co_return; -} - -template -auto returning_task(env) -> task { - co_return 99; -} - -// Signals cancellation, then returns. -template -auto signal_cancel_task(env, cancellation *tok) -> task { - tok->request_stop(); - co_return; -} - -// ============================================================ -// Tasks for "pre-cancel: child task runs, grandchild is skipped" -// -// outer_with_cancel [cancel=nullptr] -// → scope::call(&tok, inner_with_cancel) -// inner_with_cancel [cancel=&tok] -// → scope::call(counting_task) ← await_transform checks inner's cancel -// → skipped when tok is stopped -// ============================================================ - -template -auto inner_with_cancel(env, std::atomic *grandchild_ran) -> task { - using S = lf::scope; - // Inherited cancel is checked here. If our cancel chain is stopped, - // counting_task is never created. - co_await S::call(counting_task, grandchild_ran); - co_await lf::join(); - co_return; -} - -template -auto outer_with_cancel(env, cancellation *tok, std::atomic *grandchild_ran) - -> task { - using S = lf::scope; - // Root's cancel=nullptr → await_transform succeeds, creates inner_with_cancel - // and binds it to tok. - co_await S::call(tok, inner_with_cancel, grandchild_ran); - co_await lf::join(); - co_return; -} - -// ============================================================ -// Tasks for "fork: child signals cancel, post-join code unreachable" -// -// fork_outer [cancel=nullptr] -// → scope::call(&tok, fork_signal_join) -// fork_signal_join [cancel=&tok] -// → scope::fork(signal_cancel_task) [cancel inherited=&tok] -// → co_await join() -// join: steals=0, is_cancelled()=true -// await_ready → false -// await_suspend: steals=0 special-case (bug-fix path) -// → handle_cancel → final_suspend cascade -// [post-join code unreachable] -// ============================================================ - -template -auto fork_signal_join(env, cancellation *tok, std::atomic *post_join_ran) - -> task { - using S = lf::scope; - - // Fork a child that sets tok.stop = 1. tok is not yet stopped, so the - // child is created (parent's await_transform sees is_cancelled()=false). - co_await S::fork(signal_cancel_task, tok); - - // Join: with the inline scheduler there are no steals (steals==0). - // tok is now stopped, so is_cancelled() returns true. - // await_ready returns false → await_suspend is called. - // The steals==0 special-case in await_suspend handles this correctly. - co_await lf::join(); - - // Should NOT be reached — cancellation cascades before here. - post_join_ran->fetch_add(1, std::memory_order_relaxed); - co_return; -} - -template -auto fork_outer(env, cancellation *tok, std::atomic *post_join_ran) -> task { - using S = lf::scope; - co_await S::call(tok, fork_signal_join, tok, post_join_ran); - co_await lf::join(); - co_return; -} - -// ============================================================ -// Tasks for "second fork skipped when token already stopped" -// -// fork_two_outer [cancel=nullptr] -// → scope::call(&tok, fork_two_children) -// fork_two_children [cancel=&tok] -// → fork(signal_cancel_task) ← sets tok, completes inline -// → fork(counting_task) ← await_transform: is_cancelled()=true → skipped -// → join() -// ============================================================ - -template -auto fork_two_children(env, cancellation *tok, std::atomic *second_ran) -> task { - using S = lf::scope; - - co_await S::fork(signal_cancel_task, tok); - // After inline child completes tok is stopped. - // This task's cancel = tok → next await_transform checks is_cancelled() → true → skipped. - co_await S::fork(counting_task, second_ran); - co_await lf::join(); - co_return; -} - -template -auto fork_two_outer(env, cancellation *tok, std::atomic *second_ran) -> task { - using S = lf::scope; - co_await S::call(tok, fork_two_children, tok, second_ran); - co_await lf::join(); - co_return; -} - -// ============================================================ -// Tasks for "return value is default-initialised when cancelled" -// -// The inner task cancels via a fork, then cascades before co_return 99. -// The outer task writes the (unset) return value to val, which stays 0. -// ============================================================ - -template -auto inner_returning(env, cancellation *tok) -> task { - using S = lf::scope; - co_await S::fork(signal_cancel_task, tok); - co_await lf::join(); // cascade happens here - co_return 99; // unreachable -} - -template -auto outer_returning(env, cancellation *tok) -> task { - using S = lf::scope; - int val = 0; - // Call inner with tok in its cancel chain; write result to val. - co_await S::call(tok, &val, inner_returning, tok); - co_await lf::join(); - co_return val; -} - -#if LF_COMPILER_EXCEPTIONS - -// ============================================================ -// Tasks for "exception in forked child that also signals cancel" -// -// The child throws AND signals cancellation. Because the parent frame is -// cancelled, handle_cancel() discards the stashed exception (std::ignore = -// extract_exception). The receiver must complete without throwing. -// ============================================================ - -template -auto throw_and_cancel(env, cancellation *tok) -> task { - tok->request_stop(); - LF_THROW(std::runtime_error{"intentional"}); - co_return; -} - -template -auto fork_throw_and_cancel(env, cancellation *tok) -> task { - using S = lf::scope; - co_await S::fork(throw_and_cancel, tok); - co_await lf::join(); - co_return; -} - -template -auto fork_throw_outer(env, cancellation *tok) -> task { - using S = lf::scope; - co_await S::call(tok, fork_throw_and_cancel, tok); - co_await lf::join(); - co_return; -} - -#endif // LF_COMPILER_EXCEPTIONS - -// ============================================================ -// Task for "call-based cancel: pre-stopped token, child skips its own work" -// ============================================================ - -template -auto call_pre_cancel_root(env, cancellation *tok, std::atomic *ran) -> task { - using S = lf::scope; - // tok is already stopped before this call. - // Root's cancel=nullptr → await_transform creates inner_with_cancel. - // inner_with_cancel inherits tok → its own await_transform skips counting_task. - co_await S::call(tok, inner_with_cancel, ran); - co_await lf::join(); - co_return; -} - -// ============================================================ -// Generic test runner -// ============================================================ - -template -void run_cancel_tests(Sch &scheduler) { - - using Ctx = lf::context_t; - - // ---------------------------------------------------------------- - // 1. No cancellation: normal execution still works - // ---------------------------------------------------------------- - SECTION("no cancel: noop task completes") { - auto recv = lf::schedule(scheduler, noop_task); - REQUIRE(recv.valid()); - std::move(recv).get(); - } - - SECTION("no cancel: value task returns correct value") { - auto recv = lf::schedule(scheduler, returning_task); - REQUIRE(recv.valid()); - REQUIRE(std::move(recv).get() == 99); - } - - // ---------------------------------------------------------------- - // 2. Pre-cancelled token: child runs but its own sub-children are skipped - // - // The cancel token is passed ONLY to the inner child (scope::call(&tok, - // inner_with_cancel)). The root's cancel chain is nullptr, so the root's - // await_transform succeeds. Inside inner_with_cancel, is_cancelled() - // returns true (its cancel=&tok, tok stopped), so counting_task is skipped. - // ---------------------------------------------------------------- - SECTION("pre-cancel: grandchild is skipped") { - cancellation tok; - tok.request_stop(); - std::atomic grandchild{0}; - - auto recv = lf::schedule(scheduler, outer_with_cancel, &tok, &grandchild); - REQUIRE(recv.valid()); - std::move(recv).get(); - REQUIRE(grandchild.load() == 0); - } - - // ---------------------------------------------------------------- - // 3. Token signalled from within a FORKED child, then join (steals=0 path) - // - // This exercises: - // (a) fork: child inline-completes before parent reaches join - // (b) join_awaitable::await_ready → false (steals=0, is_cancelled) - // (c) join_awaitable::await_suspend steals=0 special-case (bug fix) - // (d) handle_cancel → final_suspend cascade - // - // post_join_ran must stay 0: code after co_await join() is unreachable. - // ---------------------------------------------------------------- - SECTION("fork-cancel: post-join code unreachable after cancel (steals=0 path)") { - cancellation tok; - std::atomic post_join_ran{0}; - - auto recv = lf::schedule(scheduler, fork_outer, &tok, &post_join_ran); - REQUIRE(recv.valid()); - std::move(recv).get(); - REQUIRE(post_join_ran.load() == 0); - } - - // ---------------------------------------------------------------- - // 4. Second fork is skipped when token is already stopped at await_transform - // - // fork_two_children: first fork signals tok, second fork is skipped - // because the parent's await_transform checks is_cancelled() → true. - // ---------------------------------------------------------------- - SECTION("fork-cancel: second fork skipped when token already stopped") { - cancellation tok; - std::atomic second_ran{0}; - - auto recv = lf::schedule(scheduler, fork_two_outer, &tok, &second_ran); - REQUIRE(recv.valid()); - std::move(recv).get(); - REQUIRE(second_ran.load() == 0); - } - - // ---------------------------------------------------------------- - // 5. Return value is default-initialised when task is cancelled before - // co_return - // - // inner_returning cascades at the join (via fork+signal); outer_returning - // writes the (never-set) return value, which stays 0. - // ---------------------------------------------------------------- - SECTION("cancel before return: receiver holds default-initialised value") { - cancellation tok; - - auto recv = lf::schedule(scheduler, outer_returning, &tok); - REQUIRE(recv.valid()); - REQUIRE(std::move(recv).get() == 0); - } - -#if LF_COMPILER_EXCEPTIONS - // ---------------------------------------------------------------- - // 6. Exception in forked child that also signals cancel - // - // The child throws AND signals tok. When the parent's join cascades via - // handle_cancel(), it discards the stashed exception (std::ignore = - // extract_exception). The receiver must complete without throwing. - // ---------------------------------------------------------------- - SECTION("cancel cleans up stashed exception: receiver does not throw") { - cancellation tok; - - auto recv = lf::schedule(scheduler, fork_throw_outer, &tok); - REQUIRE(recv.valid()); - REQUIRE_NOTHROW(std::move(recv).get()); - } -#endif // LF_COMPILER_EXCEPTIONS -} - -} // namespace - -// ============================================================ -// Token unit tests (no scheduler required) -// ============================================================ - -TEST_CASE("cancellation token: initial state is not stopped", "[cancel]") { - cancellation tok; - REQUIRE_FALSE(tok.stop_requested()); -} - -TEST_CASE("cancellation token: request_stop sets stopped", "[cancel]") { - cancellation tok; - tok.request_stop(); - REQUIRE(tok.stop_requested()); -} - -TEST_CASE("cancellation token: request_stop is idempotent", "[cancel]") { - cancellation tok; - tok.request_stop(); - tok.request_stop(); - REQUIRE(tok.stop_requested()); -} - -TEST_CASE("cancellation token: chain — neither stopped", "[cancel]") { - cancellation parent; - cancellation child{.parent = &parent}; - REQUIRE_FALSE(parent.stop_requested()); - REQUIRE_FALSE(child.stop_requested()); -} - -TEST_CASE("cancellation token: chain — stopping child does not affect parent", "[cancel]") { - cancellation parent; - cancellation child{.parent = &parent}; - child.request_stop(); - REQUIRE(child.stop_requested()); - REQUIRE_FALSE(parent.stop_requested()); // parent unaffected -} - -TEST_CASE("cancellation token: chain — stopping parent does not affect child's own flag", "[cancel]") { - cancellation parent; - cancellation child{.parent = &parent}; - parent.request_stop(); - // stop_requested() only checks this node's flag: - REQUIRE(parent.stop_requested()); - REQUIRE_FALSE(child.stop_requested()); - // But is_cancelled() (on a frame that holds a chain through child → parent) - // would return true. That logic is tested indirectly via the scheduler tests. -} - -// ============================================================ -// Schedule-based tests -// ============================================================ - -TEMPLATE_TEST_CASE("Inline cancellation", "[cancel]", mono_inline_ctx, poly_inline_ctx) { - lf::inline_scheduler scheduler; - run_cancel_tests(scheduler); -} - -TEMPLATE_TEST_CASE("Busy-pool cancellation", "[cancel]", mono_busy_pool, poly_busy_pool) { - STATIC_REQUIRE(lf::scheduler); - for (std::size_t thr = 1; thr < 4; ++thr) { - TestType pool{thr}; - run_cancel_tests(pool); - } -} From 7e9e51569f9733a19d5111f65b2a3f0384df4ee9 Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 17 Apr 2026 13:34:26 +0100 Subject: [PATCH 071/123] structure for stop.cxx --- CMakeLists.txt | 1 + src/core/core.cxx | 1 + src/core/frame.cxx | 1 + src/core/stop.cxx | 14 ++++++++++++++ 4 files changed, 17 insertions(+) create mode 100644 src/core/stop.cxx diff --git a/CMakeLists.txt b/CMakeLists.txt index 3dbbfade..c26eaa3e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -84,6 +84,7 @@ target_sources(libfork_libfork src/core/execute.cxx src/core/receiver.cxx src/core/promise.cxx + src/core/stop.cxx # libfork.batteries src/batteries/batteries.cxx src/batteries/deque.cxx diff --git a/src/core/core.cxx b/src/core/core.cxx index b97552d9..120ced6c 100644 --- a/src/core/core.cxx +++ b/src/core/core.cxx @@ -22,3 +22,4 @@ export import :schedule; export import :root; export import :execute; export import :receiver; +export import :stop; diff --git a/src/core/frame.cxx b/src/core/frame.cxx index ddff6627..7ecd9c30 100644 --- a/src/core/frame.cxx +++ b/src/core/frame.cxx @@ -8,6 +8,7 @@ import std; import libfork.utils; namespace lf { + // =================== Cancellation =================== // struct cancellation { diff --git a/src/core/stop.cxx b/src/core/stop.cxx new file mode 100644 index 00000000..b43d109f --- /dev/null +++ b/src/core/stop.cxx @@ -0,0 +1,14 @@ +module; +#include "libfork/__impl/assume.hpp" +#include "libfork/__impl/compiler.hpp" +#include "libfork/__impl/exception.hpp" +#include "libfork/__impl/utils.hpp" +export module libfork.core:stop; + +import std; + +namespace lf { + +// tmp + +} From 474655518ca86c7526d7c879754b9272f89816b5 Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 17 Apr 2026 14:25:03 +0100 Subject: [PATCH 072/123] stop.cxx --- src/core/stop.cxx | 63 +++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 56 insertions(+), 7 deletions(-) diff --git a/src/core/stop.cxx b/src/core/stop.cxx index b43d109f..55fd9e39 100644 --- a/src/core/stop.cxx +++ b/src/core/stop.cxx @@ -1,14 +1,63 @@ -module; -#include "libfork/__impl/assume.hpp" -#include "libfork/__impl/compiler.hpp" -#include "libfork/__impl/exception.hpp" -#include "libfork/__impl/utils.hpp" export module libfork.core:stop; import std; namespace lf { -// tmp +/** + * @brief An intrusively linked chain of stop sources. + */ +export class stop_source { + public: + constexpr stop_source() noexcept = default; -} + // Immovable + constexpr stop_source(const stop_source &) noexcept = delete; + constexpr stop_source(stop_source &&) noexcept = delete; + constexpr auto operator=(const stop_source &) noexcept -> stop_source & = delete; + constexpr auto operator=(stop_source &&) noexcept -> stop_source & = delete; + + /** + * @brief Request that this stop source (and all its children) stop. + */ + constexpr auto request_stop() noexcept -> void { m_stop.store(1, std::memory_order_release); } + + /** + * @brief Same as `request_stop`, but returns true if this is the first time stop has been requested. + */ + constexpr auto race_request_stop() noexcept -> bool { + return m_stop.exchange(1, std::memory_order_release) == 0; + } + + /** + * @brief Test if this stop source has been requested to stop. + * + * Note that this does not check parent stop sources, use `deep_stop_requested` for that. + */ + [[nodiscard]] + constexpr auto stop_requested() const noexcept -> bool { + return m_stop.load(std::memory_order_acquire) == 1; + } + + /** + * @brief Test if any stop request has been made in the current chain. + * + * Safe to call with a null pointer, in which case it returns false. + */ + [[nodiscard]] + friend constexpr auto deep_stop_requested(stop_source *src) noexcept -> bool { + // TODO: Should exception trigger cancellation? + for (stop_source *ptr = src; ptr != nullptr; src = src->m_parent) { + if (ptr->stop_requested()) { + return true; + } + } + return false; + } + + private: + stop_source *m_parent = nullptr; + std::atomic m_stop = 0; +}; + +} // namespace lf From 0b20224f0583282c7e5f4111188d9e1c565d4e08 Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 17 Apr 2026 14:28:12 +0100 Subject: [PATCH 073/123] use stop.cxx --- src/core/frame.cxx | 22 ++++------------------ src/core/ops.cxx | 5 +++-- 2 files changed, 7 insertions(+), 20 deletions(-) diff --git a/src/core/frame.cxx b/src/core/frame.cxx index 7ecd9c30..174bc8b1 100644 --- a/src/core/frame.cxx +++ b/src/core/frame.cxx @@ -7,16 +7,9 @@ import std; import libfork.utils; -namespace lf { - -// =================== Cancellation =================== // +import :stop; -struct cancellation { - cancellation *parent = nullptr; - std::atomic stop = 0; -}; - -// =================== Frame =================== // +namespace lf { // TODO: remove this and other exports export enum class category : std::uint8_t { @@ -40,7 +33,7 @@ struct frame_type : frame_base { uninitialized except; frame_type *parent; - cancellation *cancel; + stop_source *cancel; [[no_unique_address]] Checkpoint stack_ckpt; @@ -60,14 +53,7 @@ struct frame_type : frame_base { [[nodiscard]] constexpr auto is_cancelled() const noexcept -> bool { // TODO: Should exception trigger cancellation? - for (cancellation *ptr = cancel; ptr != nullptr; ptr = ptr->parent) { - // TODO: if users can't use cancellation outside of fork-join - // then this can be relaxed - if (ptr->stop.load(std::memory_order_acquire) == 1) { - return true; - } - } - return false; + return deep_stop_requested(cancel); } [[nodiscard]] diff --git a/src/core/ops.cxx b/src/core/ops.cxx index f5ee7d6c..0150a698 100644 --- a/src/core/ops.cxx +++ b/src/core/ops.cxx @@ -8,6 +8,7 @@ import libfork.utils; import :concepts_invocable; import :frame; +import :stop; namespace lf { @@ -24,7 +25,7 @@ struct maybe_ptr {}; template struct [[nodiscard("You should immediately co_await this!")]] pkg { - [[no_unique_address]] maybe_ptr<0, std::conditional_t> maybe_cancel; + [[no_unique_address]] maybe_ptr<0, std::conditional_t> maybe_cancel; [[no_unique_address]] maybe_ptr<1, R> maybe_ret_adr; [[no_unique_address]] Fn fn; [[no_unique_address]] tuple args; @@ -68,7 +69,7 @@ struct scope { template using fork_cancel_pkg = pkg; - using cancel_t = cancellation *; + using cancel_t = stop_source *; // TODO: a test that instanticates all of these From dea450964c3c21e7903b5d7719ee49b9495a06f2 Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 17 Apr 2026 14:37:45 +0100 Subject: [PATCH 074/123] better op names --- src/core/ops.cxx | 56 +++++++++++++++++++++++------------------------- 1 file changed, 27 insertions(+), 29 deletions(-) diff --git a/src/core/ops.cxx b/src/core/ops.cxx index 0150a698..71a49084 100644 --- a/src/core/ops.cxx +++ b/src/core/ops.cxx @@ -69,80 +69,78 @@ struct scope { template using fork_cancel_pkg = pkg; - using cancel_t = stop_source *; + using stop_t = stop_source *; - // TODO: a test that instanticates all of these + // TODO: a test that instantiates all of these public: // === Fork no-cancel === // + template Fn> + static constexpr auto fork(R *ret, Fn &&fn, Args &&...args) noexcept -> fork_pkg { + return {.maybe_ret_adr = {ret}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + } template Fn> - static constexpr auto - fork(std::nullptr_t, Fn &&fn, Args &&...args) noexcept -> fork_pkg { + static constexpr auto fork_drop(Fn &&fn, Args &&...args) noexcept -> fork_pkg { return {.maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } template Fn> static constexpr auto fork(Fn &&fn, Args &&...args) noexcept -> fork_pkg { return {.maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } - template Fn> - static constexpr auto fork(R *ret, Fn &&fn, Args &&...args) noexcept -> fork_pkg { - return {.maybe_ret_adr = {ret}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; - } // === Fork with-cancel === // + template Fn> + static constexpr auto + fork_with(stop_t ptr, R *ret, Fn &&fn, Args &&...args) noexcept -> fork_cancel_pkg { + return {.maybe_cancel = {ptr}, .maybe_ret_adr = {ret}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + } template Fn> static constexpr auto - fork(cancel_t ptr, std::nullptr_t, Fn &&fn, Args &&...args) noexcept -> fork_cancel_pkg { + fork_with_drop(stop_t ptr, Fn &&fn, Args &&...args) noexcept -> fork_cancel_pkg { return {.maybe_cancel = {ptr}, .maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } template Fn> static constexpr auto - fork(cancel_t ptr, Fn &&fn, Args &&...args) noexcept -> fork_cancel_pkg { + fork_with(stop_t ptr, Fn &&fn, Args &&...args) noexcept -> fork_cancel_pkg { return {.maybe_cancel = {ptr}, .maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } - template Fn> - static constexpr auto - fork(cancel_t ptr, R *ret, Fn &&fn, Args &&...args) noexcept -> fork_cancel_pkg { - return {.maybe_cancel = {ptr}, .maybe_ret_adr = {ret}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; - } // === Call no-cancel === // + template Fn> + static constexpr auto call(R *ret, Fn &&fn, Args &&...args) noexcept -> call_pkg { + return {.maybe_ret_adr = {ret}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + } template Fn> - static constexpr auto - call(std::nullptr_t, Fn &&fn, Args &&...args) noexcept -> call_pkg { + static constexpr auto call_drop(Fn &&fn, Args &&...args) noexcept -> call_pkg { return {.maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } template Fn> static constexpr auto call(Fn &&fn, Args &&...args) noexcept -> call_pkg { return {.maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } - template Fn> - static constexpr auto call(R *ret, Fn &&fn, Args &&...args) noexcept -> call_pkg { - return {.maybe_ret_adr = {ret}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; - } // === Call with-cancel === // - // TODO: explicitly = delte overloads with cancel ptr = std::nullptr_t to avoid mistakes? + // TODO: explicitly = delete overloads with cancel ptr = std::nullptr_t to avoid mistakes? + template Fn> + static constexpr auto + call_with(stop_t ptr, R *ret, Fn &&fn, Args &&...args) noexcept -> call_cancel_pkg { + return {.maybe_cancel = {ptr}, .maybe_ret_adr = {ret}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + } template Fn> static constexpr auto - call(cancel_t ptr, std::nullptr_t, Fn &&fn, Args &&...args) noexcept -> call_cancel_pkg { + call_with_drop(stop_t ptr, Fn &&fn, Args &&...args) noexcept -> call_cancel_pkg { return {.maybe_cancel = {ptr}, .maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } template Fn> static constexpr auto - call(cancel_t ptr, Fn &&fn, Args &&...args) noexcept -> call_cancel_pkg { + call_with(stop_t ptr, Fn &&fn, Args &&...args) noexcept -> call_cancel_pkg { return {.maybe_cancel = {ptr}, .maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } - template Fn> - static constexpr auto - call(cancel_t ptr, R *ret, Fn &&fn, Args &&...args) noexcept -> call_cancel_pkg { - return {.maybe_cancel = {ptr}, .maybe_ret_adr = {ret}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; - } }; // TODO: do we want join a member of scope? From 571e2ca53126dd304d6c14acb521003d42b11f2a Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 17 Apr 2026 14:42:50 +0100 Subject: [PATCH 075/123] todo --- src/core/stop.cxx | 1 - 1 file changed, 1 deletion(-) diff --git a/src/core/stop.cxx b/src/core/stop.cxx index 55fd9e39..be461dc4 100644 --- a/src/core/stop.cxx +++ b/src/core/stop.cxx @@ -46,7 +46,6 @@ export class stop_source { */ [[nodiscard]] friend constexpr auto deep_stop_requested(stop_source *src) noexcept -> bool { - // TODO: Should exception trigger cancellation? for (stop_source *ptr = src; ptr != nullptr; src = src->m_parent) { if (ptr->stop_requested()) { return true; From e679bf408b62c7037f1fb48a98a72def829332d6 Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 17 Apr 2026 14:43:26 +0100 Subject: [PATCH 076/123] cancel tests --- test/src/cancel.cpp | 65 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) create mode 100644 test/src/cancel.cpp diff --git a/test/src/cancel.cpp b/test/src/cancel.cpp new file mode 100644 index 00000000..972fb925 --- /dev/null +++ b/test/src/cancel.cpp @@ -0,0 +1,65 @@ +#include + +#include +#include + +#include "libfork/__impl/exception.hpp" + +import std; + +import libfork; + +namespace { + +template +auto add_one(lf::env, std::atomic *count) -> lf::task { + co_return count->fetch_add(1); +} + +template +void simple_tests(Sch &scheduler) { + SECTION("void") { + + std::atomic count = 0; + + auto recv = schedule(scheduler, add_one>, &count); + REQUIRE(recv.valid()); + std::move(recv).get(); + } + + // #if LF_COMPILER_EXCEPTIONS + // SECTION("throwing") { + // auto recv = schedule(scheduler, throwing_function>); + // REQUIRE(recv.valid()); + // REQUIRE_THROWS_AS(std::move(recv).get(), std::runtime_error); + // } + // #endif +} + +using mono_inline_ctx = lf::mono_context, lf::adapt_vector>; +using poly_inline_ctx = lf::derived_poly_context, lf::adapt_vector>; + +} // namespace + +TEMPLATE_TEST_CASE("Innline cancel", "[cancel]", mono_inline_ctx, poly_inline_ctx) { + lf::inline_scheduler sch{}; + simple_tests(sch); +} + +// namespace { +// +// using mono_busy_thread_pool = lf::mono_busy_pool>; +// using poly_busy_thread_pool = lf::poly_busy_pool>; +// +// } // namespace +// +// TEMPLATE_TEST_CASE("Busy schedule", "[schedule]", mono_busy_thread_pool, poly_busy_thread_pool) { +// +// STATIC_REQUIRE(lf::scheduler); +// +// for (std::size_t thr = 1; thr < 4; ++thr) { +// TestType scheduler{thr}; +// simple_tests(scheduler); +// } +// } +// mport libfork; From 777e0e5b0435af6b05e1a96e6176183daf308013 Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 17 Apr 2026 15:16:40 +0100 Subject: [PATCH 077/123] root package propagates exception --- src/core/root.cxx | 14 ++++++++++---- src/core/schedule.cxx | 2 ++ 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/src/core/root.cxx b/src/core/root.cxx index 0863c41d..001bf373 100644 --- a/src/core/root.cxx +++ b/src/core/root.cxx @@ -99,10 +99,9 @@ root_pkg(std::shared_ptr> recv, Fn fn, Args... args) -> root_t LF_ASSUME(child != nullptr); - // TODO: cancellation - + // Propagate parent/cancel info to child child->frame.parent = root; - child->frame.cancel = nullptr; + child->frame.cancel = root->cancel; LF_ASSUME(child->frame.kind == category::call); @@ -119,6 +118,8 @@ root_pkg(std::shared_ptr> recv, Fn fn, Args... args) -> root_t // - Normal return // - Exception // - Cancellation + // + // We return any exception stashed unconditionally if constexpr (LF_COMPILER_EXCEPTIONS) { if (root->exception_bit) { @@ -128,10 +129,15 @@ root_pkg(std::shared_ptr> recv, Fn fn, Args... args) -> root_t } cleanup: - // Now to that which we would otherwise do at a final suspend. + // Now do that which we would otherwise do at a final suspend. // Notify the receiver that the task is done. recv->m_ready.test_and_set(); recv->m_ready.notify_one(); + + LF_ASSUME(root->steals == 0); + LF_ASSUME(root->joins == k_u16_max); + LF_ASSUME(root->exception_bit == 0); + co_return; } diff --git a/src/core/schedule.cxx b/src/core/schedule.cxx index 2deb8f62..56899caf 100644 --- a/src/core/schedule.cxx +++ b/src/core/schedule.cxx @@ -74,6 +74,8 @@ schedule(Sch &&sch, Fn &&fn, Args &&...args) -> schedule_result_tframe.kind = category::root; + task.promise->frame.parent = nullptr; // No parent for root tasks + task.promise->frame.cancel = nullptr; // No cancellation for root tasks LF_TRY { sch.post(sched_handle{key(), &task.promise->frame}); From fe1edf1d0a495ebfabcdb054d0ac381f3f586cff Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 17 Apr 2026 15:26:38 +0100 Subject: [PATCH 078/123] stop plumbed --- src/core/receiver.cxx | 4 ++++ src/core/schedule.cxx | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/src/core/receiver.cxx b/src/core/receiver.cxx index 2a83961b..f3b54fd5 100644 --- a/src/core/receiver.cxx +++ b/src/core/receiver.cxx @@ -8,6 +8,8 @@ import std; import libfork.utils; +import :stop; + namespace lf { export struct broken_receiver_error final : libfork_exception { @@ -26,6 +28,8 @@ struct receiver_state { std::conditional_t, empty, T> m_return_value; std::exception_ptr m_exception; std::atomic_flag m_ready; + + stop_source m_stop; }; export template diff --git a/src/core/schedule.cxx b/src/core/schedule.cxx index 56899caf..f4a18a4f 100644 --- a/src/core/schedule.cxx +++ b/src/core/schedule.cxx @@ -75,7 +75,7 @@ schedule(Sch &&sch, Fn &&fn, Args &&...args) -> schedule_result_tframe.kind = category::root; task.promise->frame.parent = nullptr; // No parent for root tasks - task.promise->frame.cancel = nullptr; // No cancellation for root tasks + task.promise->frame.cancel = &state->m_stop; // No cancellation for root tasks LF_TRY { sch.post(sched_handle{key(), &task.promise->frame}); From ecb1c241581e5cc372dd22a70b88e6c1827d97db Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 17 Apr 2026 15:29:09 +0100 Subject: [PATCH 079/123] fix infinite loop --- src/core/stop.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/stop.cxx b/src/core/stop.cxx index be461dc4..e37d0f37 100644 --- a/src/core/stop.cxx +++ b/src/core/stop.cxx @@ -46,7 +46,7 @@ export class stop_source { */ [[nodiscard]] friend constexpr auto deep_stop_requested(stop_source *src) noexcept -> bool { - for (stop_source *ptr = src; ptr != nullptr; src = src->m_parent) { + for (stop_source *ptr = src; ptr != nullptr; ptr = ptr->m_parent) { if (ptr->stop_requested()) { return true; } From c846cbc358692f699531b87b460f6fe965d1e5e9 Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 17 Apr 2026 15:38:26 +0100 Subject: [PATCH 080/123] access the stop source of the reciver --- src/core/receiver.cxx | 11 +++++++++++ src/core/schedule.cxx | 6 ++++-- src/core/stop.cxx | 4 ++-- 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/src/core/receiver.cxx b/src/core/receiver.cxx index f3b54fd5..bbf0e1b5 100644 --- a/src/core/receiver.cxx +++ b/src/core/receiver.cxx @@ -51,6 +51,17 @@ class receiver { return m_state != nullptr; } + /** + * @brief Get a reference to the underlying stop_source. + */ + [[nodiscard]] + constexpr auto stop_source() noexcept -> stop_source & { + if (!valid()) { + LF_THROW(broken_receiver_error{}); + } + return m_state->m_stop; + } + [[nodiscard]] constexpr auto ready() const -> bool { if (!valid()) { diff --git a/src/core/schedule.cxx b/src/core/schedule.cxx index f4a18a4f..0aab065a 100644 --- a/src/core/schedule.cxx +++ b/src/core/schedule.cxx @@ -73,9 +73,11 @@ schedule(Sch &&sch, Fn &&fn, Args &&...args) -> schedule_result_tframe.kind = category::root; - task.promise->frame.parent = nullptr; // No parent for root tasks - task.promise->frame.cancel = &state->m_stop; // No cancellation for root tasks + task.promise->frame.parent = nullptr; + task.promise->frame.cancel = &state->m_stop; LF_TRY { sch.post(sched_handle{key(), &task.promise->frame}); diff --git a/src/core/stop.cxx b/src/core/stop.cxx index e37d0f37..066dd243 100644 --- a/src/core/stop.cxx +++ b/src/core/stop.cxx @@ -45,8 +45,8 @@ export class stop_source { * Safe to call with a null pointer, in which case it returns false. */ [[nodiscard]] - friend constexpr auto deep_stop_requested(stop_source *src) noexcept -> bool { - for (stop_source *ptr = src; ptr != nullptr; ptr = ptr->m_parent) { + friend constexpr auto deep_stop_requested(stop_source const *src) noexcept -> bool { + for (stop_source const *ptr = src; ptr != nullptr; ptr = ptr->m_parent) { if (ptr->stop_requested()) { return true; } From b84d13da916fbbd66cc180c8971df85715e9d1b9 Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 17 Apr 2026 15:40:39 +0100 Subject: [PATCH 081/123] todo --- src/core/promise.cxx | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index cbabfd24..df57fdb0 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -532,6 +532,8 @@ struct mixin_frame { } if constexpr (Cancel) { + // TODO: need some kind of API to launch an unstoppable task? + // currently this prevents the cancel ptr from being null. child_promise->frame.cancel = not_null(pkg.maybe_cancel.ptr); } else { child_promise->frame.cancel = self.frame.cancel; From e1e1ad80147559a37a15ee0ad999d0b707f0268e Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 17 Apr 2026 15:45:48 +0100 Subject: [PATCH 082/123] allow early cancellation of root --- src/core/root.cxx | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/core/root.cxx b/src/core/root.cxx index 001bf373..878e9de5 100644 --- a/src/core/root.cxx +++ b/src/core/root.cxx @@ -89,6 +89,12 @@ root_pkg(std::shared_ptr> recv, Fn fn, Args... args) -> root_t promise_type *child = nullptr; + if (root->is_cancelled()) { + // The root task was cancelled before it even started, we can skip + // straight to cleanup. + goto cleanup; + } + LF_TRY { // Potentially throwing child = get(key(), ctx_invoke_t{}(std::move(fn), std::move(args)...)); From 37e763bd96e431506d66165a27d64791171b957d Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 17 Apr 2026 15:50:42 +0100 Subject: [PATCH 083/123] tmp cancel --- test/src/cancel.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/test/src/cancel.cpp b/test/src/cancel.cpp index 972fb925..f92ac20f 100644 --- a/test/src/cancel.cpp +++ b/test/src/cancel.cpp @@ -16,6 +16,14 @@ auto add_one(lf::env, std::atomic *count) -> lf::taskfetch_add(1); } +template +auto test_cancel(lf::env) -> lf::task { + + lf::stop_source src; + + co_return; +} + template void simple_tests(Sch &scheduler) { SECTION("void") { From 632952e6f95f7c0bbbfee504468d57e75d48350d Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 17 Apr 2026 16:03:16 +0100 Subject: [PATCH 084/123] co_await scope() --- .../src/libfork_benchmark/fib/libfork.cpp | 10 ++++++---- .../src/libfork_benchmark/uts/libfork.cpp | 10 ++++++---- src/core/ops.cxx | 20 ++++++++++++++++--- src/core/promise.cxx | 9 +++++++++ 4 files changed, 38 insertions(+), 11 deletions(-) diff --git a/benchmark/src/libfork_benchmark/fib/libfork.cpp b/benchmark/src/libfork_benchmark/fib/libfork.cpp index 73fcf322..69b6960a 100644 --- a/benchmark/src/libfork_benchmark/fib/libfork.cpp +++ b/benchmark/src/libfork_benchmark/fib/libfork.cpp @@ -22,10 +22,10 @@ struct fib { std::int64_t lhs = 0; std::int64_t rhs = 0; - using scope = lf::scope; + auto sc = co_await lf::scope(); - co_await scope::fork(&rhs, fib{}, n - 2); - co_await scope::call(&lhs, fib{}, n - 1); + co_await sc.fork(&rhs, fib{}, n - 2); + co_await sc.call(&lhs, fib{}, n - 1); co_await lf::join(); @@ -94,7 +94,9 @@ BENCH_ALL(inline_scheduler, adapt_deque>>) b->Args({fib_##mode, static_cast(t)}); \ }); \ }) \ - ->Complexity([](benchmark::IterationCount n) -> double { return 1.0 / static_cast(n); }) \ + ->Complexity([](benchmark::IterationCount n) -> double { \ + return 1.0 / static_cast(n); \ + }) \ ->UseRealTime(); #define BENCH_ALL_MT(...) BENCH_ONE_MT(test, __VA_ARGS__) BENCH_ONE_MT(base, __VA_ARGS__) diff --git a/benchmark/src/libfork_benchmark/uts/libfork.cpp b/benchmark/src/libfork_benchmark/uts/libfork.cpp index bf53bb3d..59906937 100644 --- a/benchmark/src/libfork_benchmark/uts/libfork.cpp +++ b/benchmark/src/libfork_benchmark/uts/libfork.cpp @@ -37,12 +37,12 @@ struct uts_fn { rng_spawn(parent->state.state, cs[i].child.state.state, static_cast(i)); } - using scope = lf::scope; + auto sc = co_await lf::scope(); if (i + 1 == static_cast(num_children)) { - co_await scope::call(&cs[i].res, uts_fn{}, depth + 1, &cs[i].child); + co_await sc.call(&cs[i].res, uts_fn{}, depth + 1, &cs[i].child); } else { - co_await scope::fork(&cs[i].res, uts_fn{}, depth + 1, &cs[i].child); + co_await sc.fork(&cs[i].res, uts_fn{}, depth + 1, &cs[i].child); } } @@ -95,7 +95,9 @@ void run(benchmark::State &state) { b->Args({tree_id, static_cast(t)}); \ }); \ }) \ - ->Complexity([](benchmark::IterationCount n) -> double { return 1.0 / static_cast(n); }) \ + ->Complexity([](benchmark::IterationCount n) -> double { \ + return 1.0 / static_cast(n); \ + }) \ ->UseRealTime(); #define BENCH_MT(...) \ diff --git a/src/core/ops.cxx b/src/core/ops.cxx index 71a49084..d95c121e 100644 --- a/src/core/ops.cxx +++ b/src/core/ops.cxx @@ -51,8 +51,8 @@ constexpr auto fwd_fn(auto &&fn) noexcept -> Fn { } } -export template -struct scope { +template +struct scope_ops { private: // Use && for fn/args for zero move/copy + noexcept // TODO: Is it better to stores values for some types i.e. empty @@ -74,6 +74,13 @@ struct scope { // TODO: a test that instantiates all of these public: + // Immovable + scope_ops() noexcept = default; + scope_ops(const scope_ops &) = delete; + scope_ops(scope_ops &&) = delete; + scope_ops &operator=(const scope_ops &) = delete; + scope_ops &operator=(scope_ops &&) = delete; + // === Fork no-cancel === // template Fn> @@ -143,10 +150,17 @@ struct scope { } }; -// TODO: do we want join a member of scope? +struct scope_type {}; + +export [[nodiscard("You should immediately co_await this!")]] +constexpr auto scope() noexcept -> scope_type { + return {}; +} // =============== Join =============== // +// TODO: do we want join a member of scope? + struct join_type {}; export [[nodiscard("You should immediately co_await this!")]] diff --git a/src/core/promise.cxx b/src/core/promise.cxx index df57fdb0..af0a50d9 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -467,6 +467,13 @@ struct join_awaitable { } }; +// ==== Scope awaitable ==== // + +template +struct scope_awaitable : std::suspend_never { + static constexpr auto await_resume() -> scope_ops { return {}; } +}; + // =============== Frame mixin =============== // template @@ -557,6 +564,8 @@ struct mixin_frame { return {.frame = &self.frame}; } + static constexpr auto await_transform(scope_type) noexcept -> scope_awaitable { return {}; } + constexpr static auto initial_suspend() noexcept -> std::suspend_always { return {}; } constexpr static auto final_suspend() noexcept -> final_awaitable { return {}; } From 60b1635f2fb326fac9d364637c5777f17464c727 Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 17 Apr 2026 16:07:50 +0100 Subject: [PATCH 085/123] no export stop source --- src/core/stop.cxx | 2 +- test/src/cancel.cpp | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/core/stop.cxx b/src/core/stop.cxx index 066dd243..a25a81d8 100644 --- a/src/core/stop.cxx +++ b/src/core/stop.cxx @@ -7,7 +7,7 @@ namespace lf { /** * @brief An intrusively linked chain of stop sources. */ -export class stop_source { +class stop_source { public: constexpr stop_source() noexcept = default; diff --git a/test/src/cancel.cpp b/test/src/cancel.cpp index f92ac20f..eb264f28 100644 --- a/test/src/cancel.cpp +++ b/test/src/cancel.cpp @@ -16,13 +16,13 @@ auto add_one(lf::env, std::atomic *count) -> lf::taskfetch_add(1); } -template -auto test_cancel(lf::env) -> lf::task { - - lf::stop_source src; - - co_return; -} +// template +// auto test_cancel(lf::env) -> lf::task { +// +// lf::stop_source src; +// +// co_return; +// } template void simple_tests(Sch &scheduler) { From 75ef6ca2863a556ee8d125a5761c27b1ded31a7a Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 17 Apr 2026 16:41:18 +0100 Subject: [PATCH 086/123] explicitly default initialize --- src/core/receiver.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/receiver.cxx b/src/core/receiver.cxx index bbf0e1b5..4c40be83 100644 --- a/src/core/receiver.cxx +++ b/src/core/receiver.cxx @@ -25,7 +25,7 @@ struct receiver_state { struct empty {}; [[no_unique_address]] - std::conditional_t, empty, T> m_return_value; + std::conditional_t, empty, T> m_return_value{}; std::exception_ptr m_exception; std::atomic_flag m_ready; From 258f325111d45d7bdad0edd93888c71fc149c84e Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 17 Apr 2026 16:48:42 +0100 Subject: [PATCH 087/123] basic cancel test --- src/core/promise.cxx | 11 +++++++++++ src/core/stop.cxx | 8 ++++++++ test/src/cancel.cpp | 45 +++++++++++++++++++++++++++----------------- 3 files changed, 47 insertions(+), 17 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index af0a50d9..29106959 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -474,6 +474,13 @@ struct scope_awaitable : std::suspend_never { static constexpr auto await_resume() -> scope_ops { return {}; } }; +// ==== Stop awaitable ==== // + +struct stop_awaitable : std::suspend_never { + stop_source *cancel; + constexpr auto await_resume(this stop_awaitable self) -> stop_source { return stop_source{self.cancel}; } +}; + // =============== Frame mixin =============== // template @@ -566,6 +573,10 @@ struct mixin_frame { static constexpr auto await_transform(scope_type) noexcept -> scope_awaitable { return {}; } + constexpr auto await_transform(this auto const &self, stop_type) noexcept -> stop_awaitable { + return {.cancel = self.frame.cancel}; + } + constexpr static auto initial_suspend() noexcept -> std::suspend_always { return {}; } constexpr static auto final_suspend() noexcept -> final_awaitable { return {}; } diff --git a/src/core/stop.cxx b/src/core/stop.cxx index a25a81d8..aa962549 100644 --- a/src/core/stop.cxx +++ b/src/core/stop.cxx @@ -4,11 +4,19 @@ import std; namespace lf { +struct stop_type {}; // Tag type + +export [[nodiscard("You should immediately co_await this!")]] +constexpr auto child_stop_source() noexcept -> stop_type { + return {}; +} + /** * @brief An intrusively linked chain of stop sources. */ class stop_source { public: + constexpr explicit stop_source(stop_source *parent) noexcept : m_parent(parent) {} constexpr stop_source() noexcept = default; // Immovable diff --git a/test/src/cancel.cpp b/test/src/cancel.cpp index eb264f28..2a1a19ae 100644 --- a/test/src/cancel.cpp +++ b/test/src/cancel.cpp @@ -11,28 +11,39 @@ import libfork; namespace { +struct add_one { + template + static auto operator()(lf::env, std::atomic &count) -> lf::task { + co_return count.fetch_add(1); + } +}; + template -auto add_one(lf::env, std::atomic *count) -> lf::task { - co_return count->fetch_add(1); -} +auto test_cancel(lf::env) -> lf::task { -// template -// auto test_cancel(lf::env) -> lf::task { -// -// lf::stop_source src; -// -// co_return; -// } + // Test that a task that a call doesn't run if cancelled -template -void simple_tests(Sch &scheduler) { - SECTION("void") { + std::atomic count = 0; - std::atomic count = 0; + auto stop_src = co_await lf::child_stop_source(); + auto sc = co_await lf::scope(); - auto recv = schedule(scheduler, add_one>, &count); + stop_src.request_stop(); + + co_await sc.call_with_drop(&stop_src, add_one{}, count); + co_await lf::join(); + + co_return count.load() == 0; +} + +auto test_no_join() {} + +template +void tests(Sch &scheduler) { + SECTION("Canceled is not run") { + auto recv = schedule(scheduler, test_cancel>); REQUIRE(recv.valid()); - std::move(recv).get(); + REQUIRE(std::move(recv).get()); } // #if LF_COMPILER_EXCEPTIONS @@ -51,7 +62,7 @@ using poly_inline_ctx = lf::derived_poly_context, lf::adap TEMPLATE_TEST_CASE("Innline cancel", "[cancel]", mono_inline_ctx, poly_inline_ctx) { lf::inline_scheduler sch{}; - simple_tests(sch); + tests(sch); } // namespace { From fab63ffa60a3002c5cb14623f3049970726f6ae0 Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 17 Apr 2026 16:52:08 +0100 Subject: [PATCH 088/123] busy versions --- test/src/cancel.cpp | 41 ++++++++++++++++------------------------- 1 file changed, 16 insertions(+), 25 deletions(-) diff --git a/test/src/cancel.cpp b/test/src/cancel.cpp index 2a1a19ae..839477e8 100644 --- a/test/src/cancel.cpp +++ b/test/src/cancel.cpp @@ -45,14 +45,6 @@ void tests(Sch &scheduler) { REQUIRE(recv.valid()); REQUIRE(std::move(recv).get()); } - - // #if LF_COMPILER_EXCEPTIONS - // SECTION("throwing") { - // auto recv = schedule(scheduler, throwing_function>); - // REQUIRE(recv.valid()); - // REQUIRE_THROWS_AS(std::move(recv).get(), std::runtime_error); - // } - // #endif } using mono_inline_ctx = lf::mono_context, lf::adapt_vector>; @@ -65,20 +57,19 @@ TEMPLATE_TEST_CASE("Innline cancel", "[cancel]", mono_inline_ctx, poly_inline_ct tests(sch); } -// namespace { -// -// using mono_busy_thread_pool = lf::mono_busy_pool>; -// using poly_busy_thread_pool = lf::poly_busy_pool>; -// -// } // namespace -// -// TEMPLATE_TEST_CASE("Busy schedule", "[schedule]", mono_busy_thread_pool, poly_busy_thread_pool) { -// -// STATIC_REQUIRE(lf::scheduler); -// -// for (std::size_t thr = 1; thr < 4; ++thr) { -// TestType scheduler{thr}; -// simple_tests(scheduler); -// } -// } -// mport libfork; +namespace { + +using mono_busy_thread_pool = lf::mono_busy_pool>; +using poly_busy_thread_pool = lf::poly_busy_pool>; + +} // namespace + +TEMPLATE_TEST_CASE("Busy cancel", "[schedule]", mono_busy_thread_pool, poly_busy_thread_pool) { + + STATIC_REQUIRE(lf::scheduler); + + for (std::size_t thr = 1; thr < 4; ++thr) { + TestType scheduler{thr}; + tests(scheduler); + } +} From 1216892b9ca947f933dd78d4d0ecfd71052bfb5c Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 17 Apr 2026 17:14:06 +0100 Subject: [PATCH 089/123] tests --- test/src/cancel.cpp | 453 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 442 insertions(+), 11 deletions(-) diff --git a/test/src/cancel.cpp b/test/src/cancel.cpp index 839477e8..6ef6d9ed 100644 --- a/test/src/cancel.cpp +++ b/test/src/cancel.cpp @@ -9,42 +9,473 @@ import std; import libfork; +// Exhaustive tests for all cancellation paths in promise.cxx. +// +// Cancellation check-points in promise.cxx: +// +// A. awaitable::await_suspend (Cancel=true): +// child->is_cancelled() → child not spawned (fork_with/call_with) +// +// B. awaitable::await_suspend (Cancel=false): +// parent.promise().frame.is_cancelled() → child not spawned (fork/call) +// +// C. final_suspend_full / final_suspend_trailing: +// parent->is_cancelled() after winning join race → exception dropped, +// iterative ancestor cleanup (exercises concurrent/stolen path) +// +// D. join_awaitable::await_ready: +// is_cancelled() forces suspension even when steals==0 +// +// E. join_awaitable::await_suspend: +// is_cancelled() after winning join race → handle_cancel() +// +// F. handle_cancel (exception_bit set on cancelled frame): +// exception dropped, not propagated to caller + namespace { -struct add_one { +// ============================================================ +// Basic helper tasks +// ============================================================ + +// Returns the old count (i.e. before incrementing) +struct count_up { template static auto operator()(lf::env, std::atomic &count) -> lf::task { co_return count.fetch_add(1); } }; +struct count_up_void { + template + static auto operator()(lf::env, std::atomic &count) -> lf::task { + count.fetch_add(1); + co_return; + } +}; + +// ============================================================ +// A. Cancel=true: child-specific cancellation (call_with / fork_with) +// +// Exercises awaitable::await_suspend's Cancel=true branch. +// The check is on the CHILD frame's cancel token. +// ============================================================ + +// Pre-cancelled call_with_drop: child not run template -auto test_cancel(lf::env) -> lf::task { +auto test_call_with_drop_cancelled(lf::env) -> lf::task { + std::atomic count = 0; + auto stop = co_await lf::child_stop_source(); + auto sc = co_await lf::scope(); + stop.request_stop(); + co_await sc.call_with_drop(&stop, count_up_void{}, count); + co_await lf::join(); + co_return count.load() == 0; +} - // Test that a task that a call doesn't run if cancelled +// Pre-cancelled call_with with return value: return address not written +template +auto test_call_with_cancelled(lf::env) -> lf::task { + std::atomic count = 0; + int result = 99; + auto stop = co_await lf::child_stop_source(); + auto sc = co_await lf::scope(); + stop.request_stop(); + co_await sc.call_with(&stop, &result, count_up{}, count); + co_await lf::join(); + co_return result == 99 && count.load() == 0; +} +// Pre-cancelled fork_with_drop: child not run +template +auto test_fork_with_drop_cancelled(lf::env) -> lf::task { std::atomic count = 0; + auto stop = co_await lf::child_stop_source(); + auto sc = co_await lf::scope(); + stop.request_stop(); + co_await sc.fork_with_drop(&stop, count_up_void{}, count); + co_await lf::join(); + co_return count.load() == 0; +} - auto stop_src = co_await lf::child_stop_source(); +// Pre-cancelled fork_with with return value: return address not written +template +auto test_fork_with_cancelled(lf::env) -> lf::task { + std::atomic count = 0; + int result = 99; + auto stop = co_await lf::child_stop_source(); auto sc = co_await lf::scope(); + stop.request_stop(); + co_await sc.fork_with(&stop, &result, count_up{}, count); + co_await lf::join(); + co_return result == 99 && count.load() == 0; +} - stop_src.request_stop(); +// Positive: call_with NOT cancelled - child runs +template +auto test_call_with_not_cancelled(lf::env) -> lf::task { + std::atomic count = 0; + int result = 0; + auto stop = co_await lf::child_stop_source(); + auto sc = co_await lf::scope(); + co_await sc.call_with(&stop, &result, count_up{}, count); + co_await lf::join(); + co_return result == 0 && count.load() == 1; +} - co_await sc.call_with_drop(&stop_src, add_one{}, count); +// Positive: fork_with NOT cancelled - child runs +template +auto test_fork_with_not_cancelled(lf::env) -> lf::task { + std::atomic count = 0; + int result = 0; + auto stop = co_await lf::child_stop_source(); + auto sc = co_await lf::scope(); + co_await sc.fork_with(&stop, &result, count_up{}, count); co_await lf::join(); + co_return result == 0 && count.load() == 1; +} +// Multiple fork_with_drop: all pre-cancelled, none run +template +auto test_multiple_cancelled(lf::env) -> lf::task { + std::atomic count = 0; + auto stop = co_await lf::child_stop_source(); + auto sc = co_await lf::scope(); + stop.request_stop(); + co_await sc.fork_with_drop(&stop, count_up_void{}, count); + co_await sc.fork_with_drop(&stop, count_up_void{}, count); + co_await sc.fork_with_drop(&stop, count_up_void{}, count); + co_await lf::join(); co_return count.load() == 0; } -auto test_no_join() {} +// Mixed: some children have a cancelled token, others don't. +// Only the non-cancelled children should run. +template +auto test_mixed_cancel(lf::env) -> lf::task { + std::atomic count = 0; + auto stop_run = co_await lf::child_stop_source(); + auto stop_skip = co_await lf::child_stop_source(); + auto sc = co_await lf::scope(); + stop_skip.request_stop(); + co_await sc.fork_with_drop(&stop_run, count_up_void{}, count); // runs + co_await sc.fork_with_drop(&stop_skip, count_up_void{}, count); // skipped + co_await sc.fork_with_drop(&stop_run, count_up_void{}, count); // runs + co_await lf::join(); + co_return count.load() == 2; +} + +// ============================================================ +// B. Cancel=false: parent frame cancellation propagation +// +// Exercises awaitable::await_suspend's Cancel=false branch. +// The check is on the PARENT frame's is_cancelled(). +// +// Strategy: use call_with to give an inner task a specific stop source as +// its frame.cancel. The inner task receives that pointer as an argument, +// calls request_stop() on it (making its own is_cancelled() true), then +// tries to launch sub-tasks via the no-cancel (Cancel=false) API. +// +// The sub-tasks are skipped because parent.is_cancelled() is true. +// At the subsequent join, handle_cancel (path E/D) fires, cleans up the +// inner task, and resumes the outer task normally. +// +// Outer task's stop chain does NOT include the inner task's cs, so the +// outer task completes normally and returns the count comparison. +// ============================================================ + +// Inner task: cancels its own stop source, then tries call_drop (Cancel=false) +struct inner_call_after_self_cancel { + template + static auto + operator()(lf::env, CS *my_cancel, std::atomic &count) -> lf::task { + my_cancel->request_stop(); // Make this frame's is_cancelled() return true + auto sc = co_await lf::scope(); + // Cancel=false: parent (this frame) is_cancelled() → child not spawned (path B) + co_await sc.call_drop(count_up_void{}, count); + // Cancel=false: same check for fork (path B) + co_await sc.fork_drop(count_up_void{}, count); + // Paths D+E: join sees is_cancelled(), fires handle_cancel, outer task resumes + co_await lf::join(); + count.fetch_add(100); // must not be reached + } +}; + +// test_call_parent_cancel: outer wraps inner via call_with so inner.frame.cancel = &cs. +// inner cancels cs and verifies both call_drop and fork_drop are skipped. +template +auto test_call_parent_cancel(lf::env) -> lf::task { + std::atomic count = 0; + auto cs = co_await lf::child_stop_source(); + auto sc = co_await lf::scope(); + co_await sc.call_with_drop(&cs, inner_call_after_self_cancel{}, &cs, count); + co_await lf::join(); + co_return count.load() == 0; +} + +// ============================================================ +// C/D/E. Concurrent cancellation: final_suspend + join interaction +// +// A forked child cancels the parent's stop source, then the parent task +// arrives at join. The join detects cancellation (paths D+E) and calls +// handle_cancel. With multiple threads (busy pool), the cancel may also +// be observed in final_suspend_full (path C). +// ============================================================ + +// A child task that cancels a stop source then runs normally. +// Template on CS to avoid naming lf::stop_source directly. +struct cancel_cs { + template + static auto operator()(lf::env, CS *cs, std::atomic &count) -> lf::task { + count.fetch_add(1); + cs->request_stop(); + co_return; + } +}; + +// Outer task: forked children cancel the frame's stop source, then join +// detects cancel (D+E). The outer is NOT the cancelled task - it just +// verifies the inner completes cleanly. +struct inner_fork_then_cancel_at_join { + template + static auto + operator()(lf::env, CS *my_cancel, std::atomic &count) -> lf::task { + auto sc = co_await lf::scope(); + co_await sc.fork_drop(cancel_cs{}, my_cancel, count); + co_await lf::join(); // is_cancelled after child cancels cs → handle_cancel + count.fetch_add(100); // must not be reached + } +}; + +template +auto test_fork_cancel_at_join(lf::env) -> lf::task { + std::atomic count = 0; + auto cs = co_await lf::child_stop_source(); + auto sc = co_await lf::scope(); + co_await sc.call_with_drop(&cs, inner_fork_then_cancel_at_join{}, &cs, count); + co_await lf::join(); + co_return count.load() == 1; // cancel_cs ran exactly once +} + +// ============================================================ +// F. Exception + cancellation interaction +// +// When a frame is cancelled at join time and the frame has an exception +// stashed, handle_cancel drops (not propagates) the exception. +// ============================================================ + +#if LF_COMPILER_EXCEPTIONS + +// A task that throws unconditionally +struct just_throw { + template + static auto operator()(lf::env) -> lf::task { + throw std::runtime_error("test exception"); + co_return; + } +}; + +// Inner task: forks just_throw, then rethrows at join. +// Used to verify exceptions propagate when no cancellation. +struct inner_forks_throwing { + template + static auto operator()(lf::env) -> lf::task { + auto sc = co_await lf::scope(); + co_await sc.fork_drop(just_throw{}); + co_await lf::join(); // not cancelled → exception_bit=1 → rethrow (path await_resume) + co_return; // not reached + } +}; + +// Test F1: exception propagates through join and all the way to recv.get() +// when the task is NOT cancelled. +template +auto test_exception_propagates(lf::env) -> lf::task { + auto cs = co_await lf::child_stop_source(); + auto sc = co_await lf::scope(); + co_await sc.call_with_drop(&cs, inner_forks_throwing{}); + co_await lf::join(); +} + +// A child task that cancels a stop source AND throws. +// Exception stashes in its parent (inner_cancel_and_throw's frame). +struct cancel_cs_and_throw { + template + static auto operator()(lf::env, CS *cs, std::atomic &count) -> lf::task { + count.fetch_add(1); // Confirm this task ran + cs->request_stop(); // Cancel parent's stop source + throw std::runtime_error("should be dropped"); + co_return; + } +}; + +// Inner task: +// 1. fork cancel_cs_and_throw → child cancels my_cancel AND stashes exception +// in this frame (not outer's). +// 2. At join: is_cancelled AND exception_bit → handle_cancel drops exception (path F), +// then final_suspend_leading resumes outer. +// Outer sees no exception and count==1. +struct inner_cancel_and_throw { + template + static auto + operator()(lf::env, CS *my_cancel, std::atomic &count) -> lf::task { + auto sc = co_await lf::scope(); + co_await sc.fork_drop(cancel_cs_and_throw{}, my_cancel, count); + co_await lf::join(); // cancelled + exception → handle_cancel drops exception + count.fetch_add(100); // must not be reached + } +}; + +// Test F2: exception stashed in a cancelled frame is silently dropped. +// recv.get() does NOT throw; cancel_cs_and_throw ran (count==1). +template +auto test_exception_dropped_when_cancelled(lf::env) -> lf::task { + std::atomic count = 0; + auto cs = co_await lf::child_stop_source(); + auto sc = co_await lf::scope(); + co_await sc.call_with_drop(&cs, inner_cancel_and_throw{}, &cs, count); + co_await lf::join(); // outer is NOT cancelled, no exception reaches here + co_return count.load() == 1; +} + +// Test F3: combined - verify that a non-cancellation exception still propagates +// when a sibling child cancelled the frame BUT the throwing child ran first +// (i.e., the exception stash/drop is frame-local, not task-global). +// +// inner_throws_first_then_cancel: +// fork throw_child → exception stashed in this frame +// fork cancel_child → cancels my_cancel +// join → cancelled + exception → exception dropped +struct just_throw_and_count { + template + static auto operator()(lf::env, std::atomic &count) -> lf::task { + count.fetch_add(1); + throw std::runtime_error("sibling exception"); + co_return; + } +}; + +struct inner_sibling_throws_and_cancel { + template + static auto + operator()(lf::env, CS *my_cancel, std::atomic &count) -> lf::task { + auto sc = co_await lf::scope(); + co_await sc.fork_drop(just_throw_and_count{}, count); + co_await sc.fork_drop(cancel_cs{}, my_cancel, count); + co_await lf::join(); // cancelled; any exceptions dropped + count.fetch_add(100); // must not be reached + } +}; + +template +auto test_sibling_exception_dropped_when_cancelled(lf::env) -> lf::task { + std::atomic count = 0; + auto cs = co_await lf::child_stop_source(); + auto sc = co_await lf::scope(); + co_await sc.call_with_drop(&cs, inner_sibling_throws_and_cancel{}, &cs, count); + co_await lf::join(); // outer is NOT cancelled, no exception + // just_throw_and_count and cancel_cs both ran → count >= 2 + co_return count.load() >= 2 && count.load() < 100; +} + +#endif // LF_COMPILER_EXCEPTIONS + +// ============================================================ +// Run all tests against a given scheduler +// ============================================================ template void tests(Sch &scheduler) { - SECTION("Canceled is not run") { - auto recv = schedule(scheduler, test_cancel>); + using Ctx = lf::context_t; + + // A. Cancel=true (child-specific token) + + SECTION("call_with_drop: pre-cancelled child is not run") { + auto recv = schedule(scheduler, test_call_with_drop_cancelled); + REQUIRE(recv.valid()); + REQUIRE(std::move(recv).get()); + } + + SECTION("call_with: pre-cancelled child is not run, return address not written") { + auto recv = schedule(scheduler, test_call_with_cancelled); + REQUIRE(recv.valid()); + REQUIRE(std::move(recv).get()); + } + + SECTION("fork_with_drop: pre-cancelled child is not run") { + auto recv = schedule(scheduler, test_fork_with_drop_cancelled); + REQUIRE(recv.valid()); + REQUIRE(std::move(recv).get()); + } + + SECTION("fork_with: pre-cancelled child is not run, return address not written") { + auto recv = schedule(scheduler, test_fork_with_cancelled); + REQUIRE(recv.valid()); + REQUIRE(std::move(recv).get()); + } + + SECTION("call_with: positive - not cancelled, child runs and writes result") { + auto recv = schedule(scheduler, test_call_with_not_cancelled); + REQUIRE(recv.valid()); + REQUIRE(std::move(recv).get()); + } + + SECTION("fork_with: positive - not cancelled, child runs and writes result") { + auto recv = schedule(scheduler, test_fork_with_not_cancelled); + REQUIRE(recv.valid()); + REQUIRE(std::move(recv).get()); + } + + SECTION("multiple fork_with_drop: all pre-cancelled, none run") { + auto recv = schedule(scheduler, test_multiple_cancelled); + REQUIRE(recv.valid()); + REQUIRE(std::move(recv).get()); + } + + SECTION("fork_with_drop: mixed tokens - only non-cancelled children run") { + auto recv = schedule(scheduler, test_mixed_cancel); + REQUIRE(recv.valid()); + REQUIRE(std::move(recv).get()); + } + + // B/D/E. Cancel=false (parent frame propagation) + join cancel handling + + SECTION("call_drop/fork_drop (Cancel=false): skipped when parent frame is cancelled; " + "join fires handle_cancel") { + auto recv = schedule(scheduler, test_call_parent_cancel); + REQUIRE(recv.valid()); + REQUIRE(std::move(recv).get()); + } + + SECTION("fork child cancels parent stop source; join detects cancel via handle_cancel") { + auto recv = schedule(scheduler, test_fork_cancel_at_join); + REQUIRE(recv.valid()); + REQUIRE(std::move(recv).get()); + } + +#if LF_COMPILER_EXCEPTIONS + + // F. Exception + cancellation + + SECTION("exception propagates through join when frame is NOT cancelled") { + auto recv = schedule(scheduler, test_exception_propagates); + REQUIRE(recv.valid()); + REQUIRE_THROWS_AS(std::move(recv).get(), std::runtime_error); + } + + SECTION("exception in cancelled frame is dropped by handle_cancel; recv.get() does not throw") { + auto recv = schedule(scheduler, test_exception_dropped_when_cancelled); REQUIRE(recv.valid()); REQUIRE(std::move(recv).get()); } + + SECTION("sibling exception dropped when another sibling cancels the frame") { + auto recv = schedule(scheduler, test_sibling_exception_dropped_when_cancelled); + REQUIRE(recv.valid()); + REQUIRE(std::move(recv).get()); + } + +#endif // LF_COMPILER_EXCEPTIONS } using mono_inline_ctx = lf::mono_context, lf::adapt_vector>; @@ -52,7 +483,7 @@ using poly_inline_ctx = lf::derived_poly_context, lf::adap } // namespace -TEMPLATE_TEST_CASE("Innline cancel", "[cancel]", mono_inline_ctx, poly_inline_ctx) { +TEMPLATE_TEST_CASE("Inline cancel", "[cancel]", mono_inline_ctx, poly_inline_ctx) { lf::inline_scheduler sch{}; tests(sch); } @@ -64,7 +495,7 @@ using poly_busy_thread_pool = lf::poly_busy_pool>; } // namespace -TEMPLATE_TEST_CASE("Busy cancel", "[schedule]", mono_busy_thread_pool, poly_busy_thread_pool) { +TEMPLATE_TEST_CASE("Busy cancel", "[cancel]", mono_busy_thread_pool, poly_busy_thread_pool) { STATIC_REQUIRE(lf::scheduler); From 3b9bd7a491a26742ccf89a92e8dfde4a10d98c4c Mon Sep 17 00:00:00 2001 From: Conor Date: Fri, 17 Apr 2026 19:15:36 +0100 Subject: [PATCH 090/123] TMP no bind --- src/core/schedule.cxx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/core/schedule.cxx b/src/core/schedule.cxx index 0aab065a..3d836bcc 100644 --- a/src/core/schedule.cxx +++ b/src/core/schedule.cxx @@ -2,6 +2,7 @@ module; #include "libfork/__impl/assume.hpp" #include "libfork/__impl/compiler.hpp" #include "libfork/__impl/exception.hpp" +#include export module libfork.core:schedule; import std; @@ -77,7 +78,7 @@ schedule(Sch &&sch, Fn &&fn, Args &&...args) -> schedule_result_tframe.kind = category::root; task.promise->frame.parent = nullptr; - task.promise->frame.cancel = &state->m_stop; + task.promise->frame.cancel = nullptr; LF_TRY { sch.post(sched_handle{key(), &task.promise->frame}); From 7f9df1d279552e4a9d0ef1a5a47385c07458e454 Mon Sep 17 00:00:00 2001 From: Conor Date: Sat, 18 Apr 2026 09:49:53 +0100 Subject: [PATCH 091/123] clean ups --- src/core/receiver.cxx | 4 ++-- test/src/cancel.cpp | 3 +-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/src/core/receiver.cxx b/src/core/receiver.cxx index 4c40be83..c30dd563 100644 --- a/src/core/receiver.cxx +++ b/src/core/receiver.cxx @@ -39,11 +39,11 @@ class receiver { public: constexpr receiver(key_t, std::shared_ptr &&state) : m_state(std::move(state)) {} - constexpr receiver(receiver &&) noexcept = default; - constexpr auto operator=(receiver &&) noexcept -> receiver & = default; // Move only + constexpr receiver(receiver &&) noexcept = default; constexpr receiver(const receiver &) = delete; + constexpr auto operator=(receiver &&) noexcept -> receiver & = default; constexpr auto operator=(const receiver &) -> receiver & = delete; [[nodiscard]] diff --git a/test/src/cancel.cpp b/test/src/cancel.cpp index 6ef6d9ed..20225fd4 100644 --- a/test/src/cancel.cpp +++ b/test/src/cancel.cpp @@ -1,5 +1,3 @@ -#include - #include #include @@ -386,6 +384,7 @@ auto test_sibling_exception_dropped_when_cancelled(lf::env) -> lf::task template void tests(Sch &scheduler) { + using Ctx = lf::context_t; // A. Cancel=true (child-specific token) From d10808f0119754f1bc5e06b0f2ed619b66eb036c Mon Sep 17 00:00:00 2001 From: Conor Date: Sat, 18 Apr 2026 10:40:26 +0100 Subject: [PATCH 092/123] cancel notes --- src/core/cancel.md | 53 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 src/core/cancel.md diff --git a/src/core/cancel.md b/src/core/cancel.md new file mode 100644 index 00000000..abc75ab4 --- /dev/null +++ b/src/core/cancel.md @@ -0,0 +1,53 @@ +# Cancel notes + +Goals: + +- Symmetry between schedule and fork/call cancellation binding +- Allocator aware schedule (using shared pointer std:: function) +- Customize the construction of receiver +- Default schedule should be non-cancellable (bind nullptr) +- Join should be a member function of the scope +- Cancel scope instead of separate source+scope + +## Task 1 - cancel scope + +```cpp +auto example() -> task { + + auto sc = co_await child_scope(); + + co_await sc.fork(fn1, 0); + co_await sc.call(fn2, sc.token()); + + co_await sc.join(); +} +``` + +The result of `sc.token()` should be `stop_token` a lightweight wrapper around +a pointer to a stop source that has the stop_requested and other member +functions. + +You can convert the current `stop_source` to a simple internal-only struct and +convert uses of `stop_source*` to use `stop_token`. + +Make `join()` a member of both the regular and child scope's via a shared base +class. + +## Task 2 - Schedule API + +The receiver class and state should have a cancellable template parameter. + +The receiver state should have public constructors which forwards arguments for +in-place construction of the return value. The rest of the members should +become private. + +The API of `schedule` should be something like: + +```cpp + requires decay_invocable_to +auto schedule(std::shared_ptr> recv_state, Fn && fn, Args&&... args...) +``` + +This allows users to customize the allocation if desired. A convenience +overload (which delegated to above) should exist, which just allocates via +`make_shared`. It should default to non-cancellable. From e7212c09ed05ee31921f1cc9c72f3bd9dc663c38 Mon Sep 17 00:00:00 2001 From: Conor Date: Sat, 18 Apr 2026 10:40:31 +0100 Subject: [PATCH 093/123] Revert "TMP no bind" This reverts commit 3b9bd7a491a26742ccf89a92e8dfde4a10d98c4c. --- src/core/schedule.cxx | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/core/schedule.cxx b/src/core/schedule.cxx index 3d836bcc..0aab065a 100644 --- a/src/core/schedule.cxx +++ b/src/core/schedule.cxx @@ -2,7 +2,6 @@ module; #include "libfork/__impl/assume.hpp" #include "libfork/__impl/compiler.hpp" #include "libfork/__impl/exception.hpp" -#include export module libfork.core:schedule; import std; @@ -78,7 +77,7 @@ schedule(Sch &&sch, Fn &&fn, Args &&...args) -> schedule_result_tframe.kind = category::root; task.promise->frame.parent = nullptr; - task.promise->frame.cancel = nullptr; + task.promise->frame.cancel = &state->m_stop; LF_TRY { sch.post(sched_handle{key(), &task.promise->frame}); From 2d6d98b738678c98b7ff9a97003cff4c2046c340 Mon Sep 17 00:00:00 2001 From: Conor Date: Sat, 18 Apr 2026 11:01:18 +0100 Subject: [PATCH 094/123] move external to benchmark --- benchmark/CMakeLists.txt | 10 +++++++--- {external => benchmark/external}/uts/CMakeLists.txt | 0 .../external}/uts/include/uts/rng/brg_sha1.h | 0 .../external}/uts/include/uts/rng/brg_types.h | 0 .../external}/uts/include/uts/rng/rng.h | 0 {external => benchmark/external}/uts/include/uts/uts.h | 0 .../external}/uts/src/rng/brg_endian.h | 0 .../external}/uts/src/rng/brg_sha1.c | 0 {external => benchmark/external}/uts/src/uts.c | 0 9 files changed, 7 insertions(+), 3 deletions(-) rename {external => benchmark/external}/uts/CMakeLists.txt (100%) rename {external => benchmark/external}/uts/include/uts/rng/brg_sha1.h (100%) rename {external => benchmark/external}/uts/include/uts/rng/brg_types.h (100%) rename {external => benchmark/external}/uts/include/uts/rng/rng.h (100%) rename {external => benchmark/external}/uts/include/uts/uts.h (100%) rename {external => benchmark/external}/uts/src/rng/brg_endian.h (100%) rename {external => benchmark/external}/uts/src/rng/brg_sha1.c (100%) rename {external => benchmark/external}/uts/src/uts.c (100%) diff --git a/benchmark/CMakeLists.txt b/benchmark/CMakeLists.txt index e887edf4..6a0f6bcf 100644 --- a/benchmark/CMakeLists.txt +++ b/benchmark/CMakeLists.txt @@ -18,9 +18,8 @@ target_link_libraries(libfork_benchmark ) # Common components + target_sources(libfork_benchmark - PRIVATE - src/libfork_benchmark/uts/uts.cpp PRIVATE FILE_SET HEADERS FILES src/libfork_benchmark/common.hpp @@ -30,8 +29,13 @@ target_sources(libfork_benchmark src ) +target_sources(libfork_benchmark + PRIVATE + src/libfork_benchmark/uts/uts.cpp +) + # C lib for UTS -add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/../external/uts external/uts) +add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/external/uts external/uts) target_link_libraries(libfork_benchmark PRIVATE uts_c) diff --git a/external/uts/CMakeLists.txt b/benchmark/external/uts/CMakeLists.txt similarity index 100% rename from external/uts/CMakeLists.txt rename to benchmark/external/uts/CMakeLists.txt diff --git a/external/uts/include/uts/rng/brg_sha1.h b/benchmark/external/uts/include/uts/rng/brg_sha1.h similarity index 100% rename from external/uts/include/uts/rng/brg_sha1.h rename to benchmark/external/uts/include/uts/rng/brg_sha1.h diff --git a/external/uts/include/uts/rng/brg_types.h b/benchmark/external/uts/include/uts/rng/brg_types.h similarity index 100% rename from external/uts/include/uts/rng/brg_types.h rename to benchmark/external/uts/include/uts/rng/brg_types.h diff --git a/external/uts/include/uts/rng/rng.h b/benchmark/external/uts/include/uts/rng/rng.h similarity index 100% rename from external/uts/include/uts/rng/rng.h rename to benchmark/external/uts/include/uts/rng/rng.h diff --git a/external/uts/include/uts/uts.h b/benchmark/external/uts/include/uts/uts.h similarity index 100% rename from external/uts/include/uts/uts.h rename to benchmark/external/uts/include/uts/uts.h diff --git a/external/uts/src/rng/brg_endian.h b/benchmark/external/uts/src/rng/brg_endian.h similarity index 100% rename from external/uts/src/rng/brg_endian.h rename to benchmark/external/uts/src/rng/brg_endian.h diff --git a/external/uts/src/rng/brg_sha1.c b/benchmark/external/uts/src/rng/brg_sha1.c similarity index 100% rename from external/uts/src/rng/brg_sha1.c rename to benchmark/external/uts/src/rng/brg_sha1.c diff --git a/external/uts/src/uts.c b/benchmark/external/uts/src/uts.c similarity index 100% rename from external/uts/src/uts.c rename to benchmark/external/uts/src/uts.c From 82a53120982076f0e482bb56c9501bcbacd72673 Mon Sep 17 00:00:00 2001 From: Conor Date: Sun, 19 Apr 2026 12:37:47 +0100 Subject: [PATCH 095/123] stop token --- src/core/stop.cxx | 91 ++++++++++++++++++++++++++++++++++------------- 1 file changed, 67 insertions(+), 24 deletions(-) diff --git a/src/core/stop.cxx b/src/core/stop.cxx index aa962549..ab0d96f4 100644 --- a/src/core/stop.cxx +++ b/src/core/stop.cxx @@ -2,29 +2,82 @@ export module libfork.core:stop; import std; -namespace lf { - -struct stop_type {}; // Tag type +import libfork.utils; -export [[nodiscard("You should immediately co_await this!")]] -constexpr auto child_stop_source() noexcept -> stop_type { - return {}; -} +namespace lf { /** - * @brief An intrusively linked chain of stop sources. + * @brief Similar to a linked-list of std::stop_sorce but with an embedded stop_state. */ -class stop_source { +export class stop_source { public: - constexpr explicit stop_source(stop_source *parent) noexcept : m_parent(parent) {} + /** + * @brief Lightweight public handle to a stop_source chain. + * + * A stop_token is a non-owning pointer-sized wrapper around a stop_source. + */ + class stop_token { + public: + /** + * @brief Construct a null (non-cancellable) token. + */ + constexpr stop_token() noexcept = default; + + /** + * @brief Returns true if a stop source is associated (cancellation is possible). + */ + [[nodiscard]] + constexpr auto stop_possible() const noexcept -> bool { + return m_src != nullptr; + } + + /** + * @brief Returns true if any stop source in the ancestor chain has been stopped. + * + * A null token always returns false. + */ + [[nodiscard]] + constexpr auto stop_requested() const noexcept -> bool { + return deep_stop_requested(m_src); + } + + private: + friend class stop_source; + + explicit constexpr stop_token(stop_source const *src) noexcept : m_src(src) {} + + stop_source const *m_src = nullptr; + }; + + /** + * @brief Construct a root stop source with no parent. + */ constexpr stop_source() noexcept = default; + /** + * @brief Construct a stop source chained onto the given parent token. + */ + constexpr explicit stop_source(stop_token parent) noexcept : m_parent(parent.m_src) {} + // Immovable constexpr stop_source(const stop_source &) noexcept = delete; constexpr stop_source(stop_source &&) noexcept = delete; constexpr auto operator=(const stop_source &) noexcept -> stop_source & = delete; constexpr auto operator=(stop_source &&) noexcept -> stop_source & = delete; + /** + * @brief Get a handle to this stop source. + */ + constexpr auto token() const noexcept -> stop_token { return stop_token{this}; } + + /** + * @brief Returns true if any stop source in the ancestor chain has been stopped. + */ + [[nodiscard]] + constexpr auto stop_requested() const noexcept -> bool { + return deep_stop_requested(this); + } + /** * @brief Request that this stop source (and all its children) stop. */ @@ -33,20 +86,12 @@ class stop_source { /** * @brief Same as `request_stop`, but returns true if this is the first time stop has been requested. */ + [[nodiscard("You can use request_stop() if you don't need the return value")]] constexpr auto race_request_stop() noexcept -> bool { return m_stop.exchange(1, std::memory_order_release) == 0; } - /** - * @brief Test if this stop source has been requested to stop. - * - * Note that this does not check parent stop sources, use `deep_stop_requested` for that. - */ - [[nodiscard]] - constexpr auto stop_requested() const noexcept -> bool { - return m_stop.load(std::memory_order_acquire) == 1; - } - + private: /** * @brief Test if any stop request has been made in the current chain. * @@ -55,16 +100,14 @@ class stop_source { [[nodiscard]] friend constexpr auto deep_stop_requested(stop_source const *src) noexcept -> bool { for (stop_source const *ptr = src; ptr != nullptr; ptr = ptr->m_parent) { - if (ptr->stop_requested()) { + if (ptr->m_stop.load(std::memory_order_acquire) == 1) { return true; } } return false; } - private: - stop_source *m_parent = nullptr; + stop_source const *m_parent = nullptr; std::atomic m_stop = 0; }; - } // namespace lf From 1c42326fa45ce26d7674bcf64d243a5373ed2da9 Mon Sep 17 00:00:00 2001 From: Conor Date: Sun, 19 Apr 2026 12:38:30 +0100 Subject: [PATCH 096/123] use stop token --- src/core/frame.cxx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/core/frame.cxx b/src/core/frame.cxx index 174bc8b1..75d323ae 100644 --- a/src/core/frame.cxx +++ b/src/core/frame.cxx @@ -33,7 +33,7 @@ struct frame_type : frame_base { uninitialized except; frame_type *parent; - stop_source *cancel; + stop_source::stop_token cancel; [[no_unique_address]] Checkpoint stack_ckpt; @@ -53,7 +53,7 @@ struct frame_type : frame_base { [[nodiscard]] constexpr auto is_cancelled() const noexcept -> bool { // TODO: Should exception trigger cancellation? - return deep_stop_requested(cancel); + return cancel.stop_requested(); } [[nodiscard]] From 57bedecbbf0a3577ab9280e630d2b054ae8608dd Mon Sep 17 00:00:00 2001 From: Conor Date: Sun, 19 Apr 2026 12:52:02 +0100 Subject: [PATCH 097/123] split ops --- src/core/ops.cxx | 185 ++++++++++++++++++++++++++++------------------- 1 file changed, 112 insertions(+), 73 deletions(-) diff --git a/src/core/ops.cxx b/src/core/ops.cxx index d95c121e..e876d297 100644 --- a/src/core/ops.cxx +++ b/src/core/ops.cxx @@ -12,21 +12,16 @@ import :stop; namespace lf { -// Integer is just to make the types different -template -struct maybe_ptr { - T *ptr; -}; - -template -struct maybe_ptr {}; +// Placeholder types for absent optional fields. +struct no_cnl_t {}; +struct no_ret_t {}; // clang-format off -template +template struct [[nodiscard("You should immediately co_await this!")]] pkg { - [[no_unique_address]] maybe_ptr<0, std::conditional_t> maybe_cancel; - [[no_unique_address]] maybe_ptr<1, R> maybe_ret_adr; + [[no_unique_address]] std::conditional_t maybe_cancel; + [[no_unique_address]] std::conditional_t, no_ret_t, R *> maybe_ret_adr; [[no_unique_address]] Fn fn; [[no_unique_address]] tuple args; }; @@ -34,10 +29,7 @@ struct [[nodiscard("You should immediately co_await this!")]] pkg { // clang-format on /** - * @brief Forward the function member of a pkg correctly - * - * The Fn member should be an l/r value reference, r-value reference need an - * explicit move to be forwarded correctly. + * @brief Forward the function member of a pkg correctly. */ template constexpr auto fwd_fn(auto &&fn) noexcept -> Fn { @@ -51,41 +43,55 @@ constexpr auto fwd_fn(auto &&fn) noexcept -> Fn { } } +// =============== Join =============== // + +struct join_type {}; + +export [[nodiscard("You should immediately co_await this!")]] +constexpr auto join() noexcept -> join_type { + return {}; +} + +// =============== Scope base =============== // + +/** + * @brief Base class shared by scope_ops and child_scope_ops. + * + * Provides a member `join()` so that `co_await sc.join()` works on any scope type. + */ +struct scope_base { + [[nodiscard("You should immediately co_await this!")]] + static constexpr auto join() noexcept -> join_type { + return {}; + } +}; + +// =============== Scope ops (no embedded stop source) =============== // + template -struct scope_ops { +struct scope_ops : scope_base { private: - // Use && for fn/args for zero move/copy + noexcept - // TODO: Is it better to stores values for some types i.e. empty - template using call_pkg = pkg; template using fork_pkg = pkg; - template - using call_cancel_pkg = pkg; - - template - using fork_cancel_pkg = pkg; - - using stop_t = stop_source *; - - // TODO: a test that instantiates all of these - public: - // Immovable + // default constructible scope_ops() noexcept = default; + + // Immovable scope_ops(const scope_ops &) = delete; scope_ops(scope_ops &&) = delete; - scope_ops &operator=(const scope_ops &) = delete; - scope_ops &operator=(scope_ops &&) = delete; + auto operator=(const scope_ops &) -> scope_ops & = delete; + auto operator=(scope_ops &&) -> scope_ops & = delete; - // === Fork no-cancel === // + // === Fork === // template Fn> static constexpr auto fork(R *ret, Fn &&fn, Args &&...args) noexcept -> fork_pkg { - return {.maybe_ret_adr = {ret}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + return {.maybe_ret_adr = ret, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } template Fn> static constexpr auto fork_drop(Fn &&fn, Args &&...args) noexcept -> fork_pkg { @@ -96,75 +102,108 @@ struct scope_ops { return {.maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } - // === Fork with-cancel === // + // === Call === // template Fn> - static constexpr auto - fork_with(stop_t ptr, R *ret, Fn &&fn, Args &&...args) noexcept -> fork_cancel_pkg { - return {.maybe_cancel = {ptr}, .maybe_ret_adr = {ret}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + static constexpr auto call(R *ret, Fn &&fn, Args &&...args) noexcept -> call_pkg { + return {.maybe_ret_adr = ret, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } template Fn> - static constexpr auto - fork_with_drop(stop_t ptr, Fn &&fn, Args &&...args) noexcept -> fork_cancel_pkg { - return {.maybe_cancel = {ptr}, .maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + static constexpr auto call_drop(Fn &&fn, Args &&...args) noexcept -> call_pkg { + return {.maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } template Fn> - static constexpr auto - fork_with(stop_t ptr, Fn &&fn, Args &&...args) noexcept -> fork_cancel_pkg { - return {.maybe_cancel = {ptr}, .maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + static constexpr auto call(Fn &&fn, Args &&...args) noexcept -> call_pkg { + return {.maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } +}; - // === Call no-cancel === // +// ==== Scope awaitable ==== // + +template +struct scope_awaitable : std::suspend_never { + static constexpr auto await_resume() -> scope_ops { return {}; } +}; + +struct scope_type {}; + +export [[nodiscard("You should immediately co_await this!")]] +constexpr auto scope() noexcept -> scope_type { + return {}; +} + +// =============== Child scope ops (with embedded stop source) =============== // + +/** + * @brief A scope that is a stop_source. + */ +template +struct child_scope_ops : scope_base, stop_source { + private: + template + using call_pkg = pkg; + + template + using fork_pkg = pkg; + + public: + /** + * @brief Construct the scope, chaining its stop source onto the parent's token. + */ + explicit constexpr child_scope_ops(stop_source::stop_token parent) noexcept : stop_source(parent) {} + + // Immovable (stop_source base is immovable) + child_scope_ops(const child_scope_ops &) = delete; + child_scope_ops(child_scope_ops &&) = delete; + auto operator=(const child_scope_ops &) -> child_scope_ops & = delete; + auto operator=(child_scope_ops &&) -> child_scope_ops & = delete; + + // === Fork (binds this scope's stop source as child cancel) === // template Fn> - static constexpr auto call(R *ret, Fn &&fn, Args &&...args) noexcept -> call_pkg { - return {.maybe_ret_adr = {ret}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + constexpr auto fork(R *ret, Fn &&fn, Args &&...args) noexcept -> fork_pkg { + return {.maybe_cancel = token(), .maybe_ret_adr = ret, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } template Fn> - static constexpr auto call_drop(Fn &&fn, Args &&...args) noexcept -> call_pkg { - return {.maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + constexpr auto fork_drop(Fn &&fn, Args &&...args) noexcept -> fork_pkg { + return {.maybe_cancel = token(), .maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } template Fn> - static constexpr auto call(Fn &&fn, Args &&...args) noexcept -> call_pkg { - return {.maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + constexpr auto fork(Fn &&fn, Args &&...args) noexcept -> fork_pkg { + return {.maybe_cancel = token(), .maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } - // === Call with-cancel === // - - // TODO: explicitly = delete overloads with cancel ptr = std::nullptr_t to avoid mistakes? + // === Call (binds this scope's stop source as child cancel) === // template Fn> - static constexpr auto - call_with(stop_t ptr, R *ret, Fn &&fn, Args &&...args) noexcept -> call_cancel_pkg { - return {.maybe_cancel = {ptr}, .maybe_ret_adr = {ret}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + constexpr auto call(R *ret, Fn &&fn, Args &&...args) noexcept -> call_pkg { + return {.maybe_cancel = token(), .maybe_ret_adr = ret, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } template Fn> - static constexpr auto - call_with_drop(stop_t ptr, Fn &&fn, Args &&...args) noexcept -> call_cancel_pkg { - return {.maybe_cancel = {ptr}, .maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + constexpr auto call_drop(Fn &&fn, Args &&...args) noexcept -> call_pkg { + return {.maybe_cancel = token(), .maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } template Fn> - static constexpr auto - call_with(stop_t ptr, Fn &&fn, Args &&...args) noexcept -> call_cancel_pkg { - return {.maybe_cancel = {ptr}, .maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + constexpr auto call(Fn &&fn, Args &&...args) noexcept -> call_pkg { + return {.maybe_cancel = token(), .maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } }; -struct scope_type {}; - -export [[nodiscard("You should immediately co_await this!")]] -constexpr auto scope() noexcept -> scope_type { - return {}; -} +// =============== child_scope_awaitable =============== // -// =============== Join =============== // +template +struct child_scope_awaitable : std::suspend_never { + stop_source::stop_token parent_cancel; -// TODO: do we want join a member of scope? + constexpr auto await_resume(this child_scope_awaitable self) -> child_scope_ops { + return child_scope_ops{self.parent_cancel}; + } +}; -struct join_type {}; +struct child_scope_type {}; export [[nodiscard("You should immediately co_await this!")]] -constexpr auto join() noexcept -> join_type { +constexpr auto child_scope() noexcept -> child_scope_type { return {}; } From 827246962674249962370079681788ee534504fe Mon Sep 17 00:00:00 2001 From: Conor Date: Sun, 19 Apr 2026 12:52:26 +0100 Subject: [PATCH 098/123] markdown (delete before merge) --- src/core/cancel.md | 63 +++++++++++++++++++++++++++------------------- 1 file changed, 37 insertions(+), 26 deletions(-) diff --git a/src/core/cancel.md b/src/core/cancel.md index abc75ab4..ce00335f 100644 --- a/src/core/cancel.md +++ b/src/core/cancel.md @@ -2,14 +2,14 @@ Goals: -- Symmetry between schedule and fork/call cancellation binding -- Allocator aware schedule (using shared pointer std:: function) -- Customize the construction of receiver -- Default schedule should be non-cancellable (bind nullptr) -- Join should be a member function of the scope -- Cancel scope instead of separate source+scope +- Symmetry between schedule and fork/call cancellation binding ✓ +- Allocator aware schedule (using shared pointer std:: function) ✓ +- Customize the construction of receiver ✓ +- Default schedule should be non-cancellable (bind nullptr) ✓ +- Join should be a member function of the scope ✓ +- Cancel scope instead of separate source+scope ✓ -## Task 1 - cancel scope +## Task 1 - cancel scope ✓ ```cpp auto example() -> task { @@ -23,31 +23,42 @@ auto example() -> task { } ``` -The result of `sc.token()` should be `stop_token` a lightweight wrapper around -a pointer to a stop source that has the stop_requested and other member -functions. +`child_scope()` returns a `child_scope_ops` that: +- Owns a `stop_source` chained onto the parent frame's cancel token. +- All `fork`/`call` operations automatically bind the scope's stop source + as the child's cancel source (Cancel=true path). +- `.token()` returns a `stop_token` wrapping the scope's stop source. +- `.join()` is available via the shared `scope_base` base class + (same as calling `co_await lf::join()`). -You can convert the current `stop_source` to a simple internal-only struct and -convert uses of `stop_source*` to use `stop_token`. +`stop_source` is now internal-only (not exported). The public API is +`stop_token` — a lightweight pointer-sized wrapper that exposes +`stop_requested()`, `request_stop()`, and `race_request_stop()`. -Make `join()` a member of both the regular and child scope's via a shared base -class. +`scope_ops` (obtained via `co_await lf::scope()`) also inherits `scope_base` +and therefore also exposes `.join()`. For explicit cancel binding from a +regular scope, `fork_with(token, ...)` / `call_with(token, ...)` accept a +`stop_token`. -## Task 2 - Schedule API +## Task 2 - Schedule API ✓ -The receiver class and state should have a cancellable template parameter. +`receiver_state` has: +- Public default constructor + forwarding constructors for in-place T construction. +- All other members private, with accessor methods used by `root_pkg` and `schedule`. +- A `stop_source` member only when `Cancellable=true`. +- `get_stop_token()` (requires `Cancellable=true`) returns a `stop_token`. -The receiver state should have public constructors which forwards arguments for -in-place construction of the return value. The rest of the members should -become private. +`receiver` exposes: +- `token()` (requires `Cancellable=true`) for external cancellation. -The API of `schedule` should be something like: +`schedule` has two overloads: ```cpp - requires decay_invocable_to -auto schedule(std::shared_ptr> recv_state, Fn && fn, Args&&... args...) -``` +// Primary: caller supplies a pre-allocated (possibly custom-allocated) state. +auto schedule(Sch&&, shared_ptr>, Fn&&, Args&&...) + -> receiver; -This allows users to customize the allocation if desired. A convenience -overload (which delegated to above) should exist, which just allocates via -`make_shared`. It should default to non-cancellable. +// Convenience: allocates via make_shared, non-cancellable by default. +auto schedule(Sch&&, Fn&&, Args&&...) + -> receiver; // receiver +``` From 5ae91dfd3a086c4d86e316c9ed6da9233882f0c3 Mon Sep 17 00:00:00 2001 From: Conor Date: Sun, 19 Apr 2026 12:54:09 +0100 Subject: [PATCH 099/123] fix promise --- src/core/promise.cxx | 26 +++++++------------------- 1 file changed, 7 insertions(+), 19 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 29106959..6338d335 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -12,6 +12,7 @@ import libfork.utils; import :concepts_context; import :concepts_invocable; import :frame; +import :stop; import :task; import :thread_locals; import :ops; @@ -467,20 +468,6 @@ struct join_awaitable { } }; -// ==== Scope awaitable ==== // - -template -struct scope_awaitable : std::suspend_never { - static constexpr auto await_resume() -> scope_ops { return {}; } -}; - -// ==== Stop awaitable ==== // - -struct stop_awaitable : std::suspend_never { - stop_source *cancel; - constexpr auto await_resume(this stop_awaitable self) -> stop_source { return stop_source{self.cancel}; } -}; - // =============== Frame mixin =============== // template @@ -538,7 +525,7 @@ struct mixin_frame { // TODO: tests for null return path if constexpr (!std::is_void_v) { - child_promise->return_address = not_null(pkg.maybe_ret_adr.ptr); + child_promise->return_address = not_null(pkg.maybe_ret_adr); } else if constexpr (!std::is_void_v) { // Set child's return address to null to inhibit the return // TODO: add test for this @@ -547,8 +534,8 @@ struct mixin_frame { if constexpr (Cancel) { // TODO: need some kind of API to launch an unstoppable task? - // currently this prevents the cancel ptr from being null. - child_promise->frame.cancel = not_null(pkg.maybe_cancel.ptr); + LF_ASSUME(pkg.maybe_cancel.stop_possible()); + child_promise->frame.cancel = pkg.maybe_cancel; } else { child_promise->frame.cancel = self.frame.cancel; } @@ -573,8 +560,9 @@ struct mixin_frame { static constexpr auto await_transform(scope_type) noexcept -> scope_awaitable { return {}; } - constexpr auto await_transform(this auto const &self, stop_type) noexcept -> stop_awaitable { - return {.cancel = self.frame.cancel}; + constexpr auto + await_transform(this auto const &self, child_scope_type) noexcept -> child_scope_awaitable { + return {.parent_cancel = self.frame.cancel}; } constexpr static auto initial_suspend() noexcept -> std::suspend_always { return {}; } From d2ff9b3c1a9574d91d54a933d72370cfe6657c8e Mon Sep 17 00:00:00 2001 From: Conor Date: Sun, 19 Apr 2026 13:00:15 +0100 Subject: [PATCH 100/123] fix tests --- test/src/cancel.cpp | 322 +++++++++++++++++--------------------------- 1 file changed, 124 insertions(+), 198 deletions(-) diff --git a/test/src/cancel.cpp b/test/src/cancel.cpp index 20225fd4..2a68879c 100644 --- a/test/src/cancel.cpp +++ b/test/src/cancel.cpp @@ -12,10 +12,10 @@ import libfork; // Cancellation check-points in promise.cxx: // // A. awaitable::await_suspend (Cancel=true): -// child->is_cancelled() → child not spawned (fork_with/call_with) +// child->is_cancelled() → child not spawned (fork/call via child_scope_ops) // // B. awaitable::await_suspend (Cancel=false): -// parent.promise().frame.is_cancelled() → child not spawned (fork/call) +// parent.promise().frame.is_cancelled() → child not spawned (fork/call via scope_ops) // // C. final_suspend_full / final_suspend_trailing: // parent->is_cancelled() after winning join race → exception dropped, @@ -36,7 +36,6 @@ namespace { // Basic helper tasks // ============================================================ -// Returns the old count (i.e. before incrementing) struct count_up { template static auto operator()(lf::env, std::atomic &count) -> lf::task { @@ -53,194 +52,157 @@ struct count_up_void { }; // ============================================================ -// A. Cancel=true: child-specific cancellation (call_with / fork_with) +// A. Cancel=true: child-specific cancellation via child_scope_ops. // -// Exercises awaitable::await_suspend's Cancel=true branch. -// The check is on the CHILD frame's cancel token. +// child_scope_ops binds its stop_source as Cancel=true on every fork/call. +// Calling sc.request_stop() before launching exercises +// awaitable::await_suspend's Cancel=true branch. // ============================================================ -// Pre-cancelled call_with_drop: child not run template -auto test_call_with_drop_cancelled(lf::env) -> lf::task { +auto test_call_drop_cancelled(lf::env) -> lf::task { std::atomic count = 0; - auto stop = co_await lf::child_stop_source(); - auto sc = co_await lf::scope(); - stop.request_stop(); - co_await sc.call_with_drop(&stop, count_up_void{}, count); - co_await lf::join(); + auto sc = co_await lf::child_scope(); + sc.request_stop(); + co_await sc.call_drop(count_up_void{}, count); + co_await sc.join(); co_return count.load() == 0; } -// Pre-cancelled call_with with return value: return address not written template -auto test_call_with_cancelled(lf::env) -> lf::task { +auto test_call_cancelled(lf::env) -> lf::task { std::atomic count = 0; int result = 99; - auto stop = co_await lf::child_stop_source(); - auto sc = co_await lf::scope(); - stop.request_stop(); - co_await sc.call_with(&stop, &result, count_up{}, count); - co_await lf::join(); + auto sc = co_await lf::child_scope(); + sc.request_stop(); + co_await sc.call(&result, count_up{}, count); + co_await sc.join(); co_return result == 99 && count.load() == 0; } -// Pre-cancelled fork_with_drop: child not run template -auto test_fork_with_drop_cancelled(lf::env) -> lf::task { +auto test_fork_drop_cancelled(lf::env) -> lf::task { std::atomic count = 0; - auto stop = co_await lf::child_stop_source(); - auto sc = co_await lf::scope(); - stop.request_stop(); - co_await sc.fork_with_drop(&stop, count_up_void{}, count); - co_await lf::join(); + auto sc = co_await lf::child_scope(); + sc.request_stop(); + co_await sc.fork_drop(count_up_void{}, count); + co_await sc.join(); co_return count.load() == 0; } -// Pre-cancelled fork_with with return value: return address not written template -auto test_fork_with_cancelled(lf::env) -> lf::task { +auto test_fork_cancelled(lf::env) -> lf::task { std::atomic count = 0; int result = 99; - auto stop = co_await lf::child_stop_source(); - auto sc = co_await lf::scope(); - stop.request_stop(); - co_await sc.fork_with(&stop, &result, count_up{}, count); - co_await lf::join(); + auto sc = co_await lf::child_scope(); + sc.request_stop(); + co_await sc.fork(&result, count_up{}, count); + co_await sc.join(); co_return result == 99 && count.load() == 0; } -// Positive: call_with NOT cancelled - child runs template -auto test_call_with_not_cancelled(lf::env) -> lf::task { +auto test_call_not_cancelled(lf::env) -> lf::task { std::atomic count = 0; int result = 0; - auto stop = co_await lf::child_stop_source(); - auto sc = co_await lf::scope(); - co_await sc.call_with(&stop, &result, count_up{}, count); - co_await lf::join(); + auto sc = co_await lf::child_scope(); + co_await sc.call(&result, count_up{}, count); + co_await sc.join(); co_return result == 0 && count.load() == 1; } -// Positive: fork_with NOT cancelled - child runs template -auto test_fork_with_not_cancelled(lf::env) -> lf::task { +auto test_fork_not_cancelled(lf::env) -> lf::task { std::atomic count = 0; int result = 0; - auto stop = co_await lf::child_stop_source(); - auto sc = co_await lf::scope(); - co_await sc.fork_with(&stop, &result, count_up{}, count); - co_await lf::join(); + auto sc = co_await lf::child_scope(); + co_await sc.fork(&result, count_up{}, count); + co_await sc.join(); co_return result == 0 && count.load() == 1; } -// Multiple fork_with_drop: all pre-cancelled, none run template auto test_multiple_cancelled(lf::env) -> lf::task { std::atomic count = 0; - auto stop = co_await lf::child_stop_source(); - auto sc = co_await lf::scope(); - stop.request_stop(); - co_await sc.fork_with_drop(&stop, count_up_void{}, count); - co_await sc.fork_with_drop(&stop, count_up_void{}, count); - co_await sc.fork_with_drop(&stop, count_up_void{}, count); - co_await lf::join(); + auto sc = co_await lf::child_scope(); + sc.request_stop(); + co_await sc.fork_drop(count_up_void{}, count); + co_await sc.fork_drop(count_up_void{}, count); + co_await sc.fork_drop(count_up_void{}, count); + co_await sc.join(); co_return count.load() == 0; } -// Mixed: some children have a cancelled token, others don't. -// Only the non-cancelled children should run. template auto test_mixed_cancel(lf::env) -> lf::task { std::atomic count = 0; - auto stop_run = co_await lf::child_stop_source(); - auto stop_skip = co_await lf::child_stop_source(); - auto sc = co_await lf::scope(); - stop_skip.request_stop(); - co_await sc.fork_with_drop(&stop_run, count_up_void{}, count); // runs - co_await sc.fork_with_drop(&stop_skip, count_up_void{}, count); // skipped - co_await sc.fork_with_drop(&stop_run, count_up_void{}, count); // runs - co_await lf::join(); + auto sc_run = co_await lf::child_scope(); + auto sc_skip = co_await lf::child_scope(); + sc_skip.request_stop(); + co_await sc_run.fork_drop(count_up_void{}, count); // runs + co_await sc_skip.fork_drop(count_up_void{}, count); // skipped + co_await sc_run.fork_drop(count_up_void{}, count); // runs + co_await sc_run.join(); + co_await sc_skip.join(); co_return count.load() == 2; } // ============================================================ -// B. Cancel=false: parent frame cancellation propagation +// B. Cancel=false: parent frame cancellation propagation. // -// Exercises awaitable::await_suspend's Cancel=false branch. -// The check is on the PARENT frame's is_cancelled(). -// -// Strategy: use call_with to give an inner task a specific stop source as -// its frame.cancel. The inner task receives that pointer as an argument, -// calls request_stop() on it (making its own is_cancelled() true), then -// tries to launch sub-tasks via the no-cancel (Cancel=false) API. -// -// The sub-tasks are skipped because parent.is_cancelled() is true. -// At the subsequent join, handle_cancel (path E/D) fires, cleans up the -// inner task, and resumes the outer task normally. -// -// Outer task's stop chain does NOT include the inner task's cs, so the -// outer task completes normally and returns the count comparison. +// An inner task receives a stop_source& that IS its own frame's cancel +// source (bound via child_scope_ops::call_drop / Cancel=true). It calls +// request_stop() on it, making its own is_cancelled() return true, then +// tries to launch sub-tasks via scope_ops (Cancel=false). Those are +// skipped because parent.is_cancelled() is true (path B). +// At join, handle_cancel fires (paths D+E). // ============================================================ -// Inner task: cancels its own stop source, then tries call_drop (Cancel=false) struct inner_call_after_self_cancel { - template - static auto - operator()(lf::env, CS *my_cancel, std::atomic &count) -> lf::task { - my_cancel->request_stop(); // Make this frame's is_cancelled() return true + template + static auto operator()(lf::env, lf::stop_source &my_cancel, std::atomic &count) + -> lf::task { + my_cancel.request_stop(); // make this frame's is_cancelled() == true auto sc = co_await lf::scope(); - // Cancel=false: parent (this frame) is_cancelled() → child not spawned (path B) - co_await sc.call_drop(count_up_void{}, count); - // Cancel=false: same check for fork (path B) - co_await sc.fork_drop(count_up_void{}, count); - // Paths D+E: join sees is_cancelled(), fires handle_cancel, outer task resumes - co_await lf::join(); - count.fetch_add(100); // must not be reached + co_await sc.call_drop(count_up_void{}, count); // Cancel=false: parent cancelled → skip + co_await sc.fork_drop(count_up_void{}, count); // Cancel=false: parent cancelled → skip + co_await lf::join(); // paths D+E: join fires handle_cancel + count.fetch_add(100); // must not be reached } }; -// test_call_parent_cancel: outer wraps inner via call_with so inner.frame.cancel = &cs. -// inner cancels cs and verifies both call_drop and fork_drop are skipped. template auto test_call_parent_cancel(lf::env) -> lf::task { std::atomic count = 0; - auto cs = co_await lf::child_stop_source(); - auto sc = co_await lf::scope(); - co_await sc.call_with_drop(&cs, inner_call_after_self_cancel{}, &cs, count); - co_await lf::join(); + auto outer_sc = co_await lf::child_scope(); + // Pass the scope's stop_source by reference so the inner task can cancel it. + co_await outer_sc.call_drop(inner_call_after_self_cancel{}, outer_sc, count); + co_await outer_sc.join(); co_return count.load() == 0; } // ============================================================ -// C/D/E. Concurrent cancellation: final_suspend + join interaction -// -// A forked child cancels the parent's stop source, then the parent task -// arrives at join. The join detects cancellation (paths D+E) and calls -// handle_cancel. With multiple threads (busy pool), the cancel may also -// be observed in final_suspend_full (path C). +// C/D/E. Concurrent cancellation: final_suspend + join interaction. // ============================================================ -// A child task that cancels a stop source then runs normally. -// Template on CS to avoid naming lf::stop_source directly. -struct cancel_cs { - template - static auto operator()(lf::env, CS *cs, std::atomic &count) -> lf::task { +// A child task that cancels a stop_source then completes normally. +struct cancel_source { + template + static auto + operator()(lf::env, lf::stop_source &src, std::atomic &count) -> lf::task { count.fetch_add(1); - cs->request_stop(); + src.request_stop(); co_return; } }; -// Outer task: forked children cancel the frame's stop source, then join -// detects cancel (D+E). The outer is NOT the cancelled task - it just -// verifies the inner completes cleanly. struct inner_fork_then_cancel_at_join { - template - static auto - operator()(lf::env, CS *my_cancel, std::atomic &count) -> lf::task { + template + static auto operator()(lf::env, lf::stop_source &my_cancel, std::atomic &count) + -> lf::task { auto sc = co_await lf::scope(); - co_await sc.fork_drop(cancel_cs{}, my_cancel, count); - co_await lf::join(); // is_cancelled after child cancels cs → handle_cancel + co_await sc.fork_drop(cancel_source{}, my_cancel, count); + co_await lf::join(); // is_cancelled after child cancels → handle_cancel count.fetch_add(100); // must not be reached } }; @@ -248,23 +210,18 @@ struct inner_fork_then_cancel_at_join { template auto test_fork_cancel_at_join(lf::env) -> lf::task { std::atomic count = 0; - auto cs = co_await lf::child_stop_source(); - auto sc = co_await lf::scope(); - co_await sc.call_with_drop(&cs, inner_fork_then_cancel_at_join{}, &cs, count); - co_await lf::join(); - co_return count.load() == 1; // cancel_cs ran exactly once + auto outer_sc = co_await lf::child_scope(); + co_await outer_sc.call_drop(inner_fork_then_cancel_at_join{}, outer_sc, count); + co_await outer_sc.join(); + co_return count.load() == 1; } // ============================================================ -// F. Exception + cancellation interaction -// -// When a frame is cancelled at join time and the frame has an exception -// stashed, handle_cancel drops (not propagates) the exception. +// F. Exception + cancellation interaction. // ============================================================ #if LF_COMPILER_EXCEPTIONS -// A task that throws unconditionally struct just_throw { template static auto operator()(lf::env) -> lf::task { @@ -273,77 +230,54 @@ struct just_throw { } }; -// Inner task: forks just_throw, then rethrows at join. -// Used to verify exceptions propagate when no cancellation. struct inner_forks_throwing { template static auto operator()(lf::env) -> lf::task { auto sc = co_await lf::scope(); co_await sc.fork_drop(just_throw{}); - co_await lf::join(); // not cancelled → exception_bit=1 → rethrow (path await_resume) - co_return; // not reached + co_await lf::join(); // not cancelled → rethrow + co_return; } }; -// Test F1: exception propagates through join and all the way to recv.get() -// when the task is NOT cancelled. template auto test_exception_propagates(lf::env) -> lf::task { - auto cs = co_await lf::child_stop_source(); - auto sc = co_await lf::scope(); - co_await sc.call_with_drop(&cs, inner_forks_throwing{}); - co_await lf::join(); + auto outer_sc = co_await lf::child_scope(); + co_await outer_sc.call_drop(inner_forks_throwing{}); + co_await outer_sc.join(); } -// A child task that cancels a stop source AND throws. -// Exception stashes in its parent (inner_cancel_and_throw's frame). -struct cancel_cs_and_throw { - template - static auto operator()(lf::env, CS *cs, std::atomic &count) -> lf::task { - count.fetch_add(1); // Confirm this task ran - cs->request_stop(); // Cancel parent's stop source +struct cancel_source_and_throw { + template + static auto + operator()(lf::env, lf::stop_source &src, std::atomic &count) -> lf::task { + count.fetch_add(1); + src.request_stop(); throw std::runtime_error("should be dropped"); co_return; } }; -// Inner task: -// 1. fork cancel_cs_and_throw → child cancels my_cancel AND stashes exception -// in this frame (not outer's). -// 2. At join: is_cancelled AND exception_bit → handle_cancel drops exception (path F), -// then final_suspend_leading resumes outer. -// Outer sees no exception and count==1. struct inner_cancel_and_throw { - template - static auto - operator()(lf::env, CS *my_cancel, std::atomic &count) -> lf::task { + template + static auto operator()(lf::env, lf::stop_source &my_cancel, std::atomic &count) + -> lf::task { auto sc = co_await lf::scope(); - co_await sc.fork_drop(cancel_cs_and_throw{}, my_cancel, count); + co_await sc.fork_drop(cancel_source_and_throw{}, my_cancel, count); co_await lf::join(); // cancelled + exception → handle_cancel drops exception count.fetch_add(100); // must not be reached } }; -// Test F2: exception stashed in a cancelled frame is silently dropped. -// recv.get() does NOT throw; cancel_cs_and_throw ran (count==1). template auto test_exception_dropped_when_cancelled(lf::env) -> lf::task { std::atomic count = 0; - auto cs = co_await lf::child_stop_source(); - auto sc = co_await lf::scope(); - co_await sc.call_with_drop(&cs, inner_cancel_and_throw{}, &cs, count); - co_await lf::join(); // outer is NOT cancelled, no exception reaches here + auto outer_sc = co_await lf::child_scope(); + co_await outer_sc.call_drop(inner_cancel_and_throw{}, outer_sc, count); + co_await outer_sc.join(); co_return count.load() == 1; } -// Test F3: combined - verify that a non-cancellation exception still propagates -// when a sibling child cancelled the frame BUT the throwing child ran first -// (i.e., the exception stash/drop is frame-local, not task-global). -// -// inner_throws_first_then_cancel: -// fork throw_child → exception stashed in this frame -// fork cancel_child → cancels my_cancel -// join → cancelled + exception → exception dropped struct just_throw_and_count { template static auto operator()(lf::env, std::atomic &count) -> lf::task { @@ -354,13 +288,13 @@ struct just_throw_and_count { }; struct inner_sibling_throws_and_cancel { - template - static auto - operator()(lf::env, CS *my_cancel, std::atomic &count) -> lf::task { + template + static auto operator()(lf::env, lf::stop_source &my_cancel, std::atomic &count) + -> lf::task { auto sc = co_await lf::scope(); co_await sc.fork_drop(just_throw_and_count{}, count); - co_await sc.fork_drop(cancel_cs{}, my_cancel, count); - co_await lf::join(); // cancelled; any exceptions dropped + co_await sc.fork_drop(cancel_source{}, my_cancel, count); + co_await lf::join(); // cancelled; exceptions dropped count.fetch_add(100); // must not be reached } }; @@ -368,11 +302,9 @@ struct inner_sibling_throws_and_cancel { template auto test_sibling_exception_dropped_when_cancelled(lf::env) -> lf::task { std::atomic count = 0; - auto cs = co_await lf::child_stop_source(); - auto sc = co_await lf::scope(); - co_await sc.call_with_drop(&cs, inner_sibling_throws_and_cancel{}, &cs, count); - co_await lf::join(); // outer is NOT cancelled, no exception - // just_throw_and_count and cancel_cs both ran → count >= 2 + auto outer_sc = co_await lf::child_scope(); + co_await outer_sc.call_drop(inner_sibling_throws_and_cancel{}, outer_sc, count); + co_await outer_sc.join(); co_return count.load() >= 2 && count.load() < 100; } @@ -387,58 +319,54 @@ void tests(Sch &scheduler) { using Ctx = lf::context_t; - // A. Cancel=true (child-specific token) - - SECTION("call_with_drop: pre-cancelled child is not run") { - auto recv = schedule(scheduler, test_call_with_drop_cancelled); + SECTION("call_drop: pre-cancelled child is not run") { + auto recv = schedule(scheduler, test_call_drop_cancelled); REQUIRE(recv.valid()); REQUIRE(std::move(recv).get()); } - SECTION("call_with: pre-cancelled child is not run, return address not written") { - auto recv = schedule(scheduler, test_call_with_cancelled); + SECTION("call: pre-cancelled child is not run, return address not written") { + auto recv = schedule(scheduler, test_call_cancelled); REQUIRE(recv.valid()); REQUIRE(std::move(recv).get()); } - SECTION("fork_with_drop: pre-cancelled child is not run") { - auto recv = schedule(scheduler, test_fork_with_drop_cancelled); + SECTION("fork_drop: pre-cancelled child is not run") { + auto recv = schedule(scheduler, test_fork_drop_cancelled); REQUIRE(recv.valid()); REQUIRE(std::move(recv).get()); } - SECTION("fork_with: pre-cancelled child is not run, return address not written") { - auto recv = schedule(scheduler, test_fork_with_cancelled); + SECTION("fork: pre-cancelled child is not run, return address not written") { + auto recv = schedule(scheduler, test_fork_cancelled); REQUIRE(recv.valid()); REQUIRE(std::move(recv).get()); } - SECTION("call_with: positive - not cancelled, child runs and writes result") { - auto recv = schedule(scheduler, test_call_with_not_cancelled); + SECTION("call: positive - not cancelled, child runs and writes result") { + auto recv = schedule(scheduler, test_call_not_cancelled); REQUIRE(recv.valid()); REQUIRE(std::move(recv).get()); } - SECTION("fork_with: positive - not cancelled, child runs and writes result") { - auto recv = schedule(scheduler, test_fork_with_not_cancelled); + SECTION("fork: positive - not cancelled, child runs and writes result") { + auto recv = schedule(scheduler, test_fork_not_cancelled); REQUIRE(recv.valid()); REQUIRE(std::move(recv).get()); } - SECTION("multiple fork_with_drop: all pre-cancelled, none run") { + SECTION("multiple fork_drops: all pre-cancelled, none run") { auto recv = schedule(scheduler, test_multiple_cancelled); REQUIRE(recv.valid()); REQUIRE(std::move(recv).get()); } - SECTION("fork_with_drop: mixed tokens - only non-cancelled children run") { + SECTION("mixed scopes: only non-cancelled children run") { auto recv = schedule(scheduler, test_mixed_cancel); REQUIRE(recv.valid()); REQUIRE(std::move(recv).get()); } - // B/D/E. Cancel=false (parent frame propagation) + join cancel handling - SECTION("call_drop/fork_drop (Cancel=false): skipped when parent frame is cancelled; " "join fires handle_cancel") { auto recv = schedule(scheduler, test_call_parent_cancel); @@ -454,8 +382,6 @@ void tests(Sch &scheduler) { #if LF_COMPILER_EXCEPTIONS - // F. Exception + cancellation - SECTION("exception propagates through join when frame is NOT cancelled") { auto recv = schedule(scheduler, test_exception_propagates); REQUIRE(recv.valid()); From 31bfee8db15a48ad8557b03167d9d9a1237dafe1 Mon Sep 17 00:00:00 2001 From: Conor Date: Sun, 19 Apr 2026 13:00:44 +0100 Subject: [PATCH 101/123] reciever changes --- src/core/receiver.cxx | 105 +++++++++++++++++++++++++++++++++++------- src/core/root.cxx | 16 +++---- src/core/schedule.cxx | 54 ++++++++++++++++------ 3 files changed, 138 insertions(+), 37 deletions(-) diff --git a/src/core/receiver.cxx b/src/core/receiver.cxx index c30dd563..53e4675f 100644 --- a/src/core/receiver.cxx +++ b/src/core/receiver.cxx @@ -19,23 +19,91 @@ export struct broken_receiver_error final : libfork_exception { } }; -template -struct receiver_state { - +/** + * @brief Shared state between a scheduled task and its receiver handle. + * + * @tparam T The return type of the scheduled coroutine. + * @tparam Cancellable If true, the state owns a stop_source that can be used + * to cancel the root task externally. + * + * Constructors forward arguments for in-place construction of the return value. + * Internal access is gated behind a hidden friend: `get(key_t, receiver_state&)`. + */ +export template +class receiver_state { + public: struct empty {}; + /// Default construction — return value is default-initialised (or empty for void). + constexpr receiver_state() = default; + + /// In-place construction of the return value from arbitrary args. + template + requires (!std::is_void_v) && std::constructible_from + constexpr explicit receiver_state(Args &&...args) + : m_return_value(std::forward(args)...) {} + + private: + template + friend class receiver; + + /** + * @brief Internal accessor returned by `get(key_t, receiver_state&)`. + * + * Not reachable by name from outside this translation unit because view + * is a private nested type. Callers use `auto` with the hidden friend. + */ + struct view { + receiver_state *p; + + constexpr void set_exception(std::exception_ptr e) noexcept { + p->m_exception = std::move(e); + } + + constexpr void notify_ready() noexcept { + p->m_ready.test_and_set(); + p->m_ready.notify_one(); + } + + [[nodiscard]] + constexpr auto return_value_address() noexcept -> T * + requires (!std::is_void_v) + { + return std::addressof(p->m_return_value); + } + + [[nodiscard]] + constexpr auto get_stop_token() noexcept -> stop_source::stop_token + requires Cancellable + { + return p->m_stop.token(); + } + }; + + /** + * @brief Hidden friend accessor for internal library use. + * + * Only callable via ADL when a `key_t` is available (i.e. by calling `key()`). + * Returns a `view` proxy to manipulate the state's private members. + */ + [[nodiscard]] + friend constexpr auto get(key_t, receiver_state &self) noexcept -> view { + return {&self}; + } + [[no_unique_address]] std::conditional_t, empty, T> m_return_value{}; std::exception_ptr m_exception; std::atomic_flag m_ready; - stop_source m_stop; + [[no_unique_address]] + std::conditional_t m_stop; }; -export template +export template class receiver { - using state_type = receiver_state; + using state_type = receiver_state; public: constexpr receiver(key_t, std::shared_ptr &&state) : m_state(std::move(state)) {} @@ -51,30 +119,35 @@ class receiver { return m_state != nullptr; } - /** - * @brief Get a reference to the underlying stop_source. - */ [[nodiscard]] - constexpr auto stop_source() noexcept -> stop_source & { + constexpr auto ready() const -> bool { if (!valid()) { LF_THROW(broken_receiver_error{}); } - return m_state->m_stop; + return m_state->m_ready.test(); } - [[nodiscard]] - constexpr auto ready() const -> bool { + constexpr void wait() const { if (!valid()) { LF_THROW(broken_receiver_error{}); } - return m_state->m_ready.test(); + m_state->m_ready.wait(false); } - constexpr void wait() const { + /** + * @brief Returns a stop_token for this task's stop source. + * + * Only available when Cancellable=true. The token can be used to request + * cancellation of the scheduled task before or after it has started. + */ + [[nodiscard]] + constexpr auto token() noexcept -> stop_source::stop_token + requires Cancellable + { if (!valid()) { LF_THROW(broken_receiver_error{}); } - m_state->m_ready.wait(false); + return get(key(), *m_state).get_stop_token(); } [[nodiscard]] diff --git a/src/core/root.cxx b/src/core/root.cxx index 878e9de5..2ebdd465 100644 --- a/src/core/root.cxx +++ b/src/core/root.cxx @@ -17,7 +17,7 @@ import :task; namespace lf { -// TODO: allocator aware! +// TODO: allocator aware! -> IDEA embed in frame/state struct get_frame_t {}; @@ -68,11 +68,12 @@ struct root_task { promise_type *promise; }; -template +template requires async_invocable_to [[nodiscard]] auto // -root_pkg(std::shared_ptr> recv, Fn fn, Args... args) -> root_task> { +root_pkg(std::shared_ptr> recv, Fn fn, Args... args) + -> root_task> { // This should be resumed on a valid context. LF_ASSUME(thread_local_context != nullptr); @@ -99,7 +100,7 @@ root_pkg(std::shared_ptr> recv, Fn fn, Args... args) -> root_t // Potentially throwing child = get(key(), ctx_invoke_t{}(std::move(fn), std::move(args)...)); } LF_CATCH_ALL { - recv->m_exception = std::current_exception(); + get(key(), *recv).set_exception(std::current_exception()); goto cleanup; } @@ -112,7 +113,7 @@ root_pkg(std::shared_ptr> recv, Fn fn, Args... args) -> root_t LF_ASSUME(child->frame.kind == category::call); if constexpr (!std::is_void_v>) { - child->return_address = std::addressof(recv->m_return_value); + child->return_address = get(key(), *recv).return_value_address(); } // Begin normal execution of the child task, it will clean itself @@ -130,15 +131,14 @@ root_pkg(std::shared_ptr> recv, Fn fn, Args... args) -> root_t if constexpr (LF_COMPILER_EXCEPTIONS) { if (root->exception_bit) { // The child threw an exception, propagate it to the receiver. - recv->m_exception = extract_exception(root); + get(key(), *recv).set_exception(extract_exception(root)); } } cleanup: // Now do that which we would otherwise do at a final suspend. // Notify the receiver that the task is done. - recv->m_ready.test_and_set(); - recv->m_ready.notify_one(); + get(key(), *recv).notify_ready(); LF_ASSUME(root->steals == 0); LF_ASSUME(root->joins == k_u16_max); diff --git a/src/core/schedule.cxx b/src/core/schedule.cxx index 0aab065a..d0e05a77 100644 --- a/src/core/schedule.cxx +++ b/src/core/schedule.cxx @@ -11,6 +11,7 @@ import libfork.utils; import :concepts_invocable; import :concepts_scheduler; import :frame; +import :stop; import :thread_locals; import :promise; import :root; @@ -42,22 +43,28 @@ concept schedulable = schedulable_decayed, Context, std::decay_ template using invoke_decay_result_t = async_result_t, Context, std::decay_t...>; -template -using schedule_state_t = receiver_state>; +template +using schedule_state_t = receiver_state, Cancellable>; export template requires schedulable using schedule_result_t = receiver>; /** - * @brief Schedule a function to be run on a scheduler. + * @brief Schedule a function with a pre-allocated receiver state. + * + * This is the primary overload: the caller provides a shared_ptr to the + * receiver state, allowing custom allocation. The stop_token bound to the + * root frame depends on whether the state is cancellable. * * This function is strongly exception safe. */ -export template - requires schedulable, Args...> +export template + requires schedulable, Args...> && + std::same_as, Args...>> constexpr auto -schedule(Sch &&sch, Fn &&fn, Args &&...args) -> schedule_result_t, Args...> { +schedule(Sch &&sch, std::shared_ptr> state, Fn &&fn, Args &&...args) + -> receiver { using context_type = context_t; @@ -65,21 +72,22 @@ schedule(Sch &&sch, Fn &&fn, Args &&...args) -> schedule_result_t>(); - - // Package has shared ownership of the state, fine if this throws + // Package takes shared ownership of the state; fine if this throws. root_task task = root_pkg(state, std::forward(fn), std::forward(args)...); LF_ASSUME(task.promise != nullptr); - // TODO: benchmark if it's worth having an unstoppable root task - task.promise->frame.kind = category::root; task.promise->frame.parent = nullptr; - task.promise->frame.cancel = &state->m_stop; + + if constexpr (Cancellable) { + task.promise->frame.cancel = get(key(), *state).get_stop_token(); + } else { + task.promise->frame.cancel = stop_source::stop_token{}; // non-cancellable root + } LF_TRY { + // TODO: forward sch + modify concept sch.post(sched_handle{key(), &task.promise->frame}); // If ^ didn't throw then the root_task will destroy itself at the final suspend. } LF_CATCH_ALL { @@ -90,4 +98,24 @@ schedule(Sch &&sch, Fn &&fn, Args &&...args) -> schedule_result_t + requires schedulable, Args...> +constexpr auto +schedule(Sch &&sch, Fn &&fn, Args &&...args) -> receiver, Args...>> { + + using context_type = context_t; + using R = invoke_decay_result_t; + + auto state = std::make_shared>(); + + return schedule( + std::forward(sch), std::move(state), std::forward(fn), std::forward(args)...); +} + } // namespace lf From 80f4e0d1136ba5d30a21edfb18aee48851fbf69e Mon Sep 17 00:00:00 2001 From: Conor Date: Sun, 19 Apr 2026 13:23:29 +0100 Subject: [PATCH 102/123] todo --- src/core/ops.cxx | 1 + 1 file changed, 1 insertion(+) diff --git a/src/core/ops.cxx b/src/core/ops.cxx index e876d297..d9619258 100644 --- a/src/core/ops.cxx +++ b/src/core/ops.cxx @@ -47,6 +47,7 @@ constexpr auto fwd_fn(auto &&fn) noexcept -> Fn { struct join_type {}; +// TODO: remove export [[nodiscard("You should immediately co_await this!")]] constexpr auto join() noexcept -> join_type { return {}; From 516184735cac162a995b36c5d4746b1ddfa91d20 Mon Sep 17 00:00:00 2001 From: Conor Date: Sun, 19 Apr 2026 15:40:36 +0100 Subject: [PATCH 103/123] use scoped join --- benchmark/src/libfork_benchmark/fib/libfork.cpp | 2 +- benchmark/src/libfork_benchmark/uts/libfork.cpp | 6 ++++-- src/core/ops.cxx | 10 +--------- test/src/cancel.cpp | 10 +++++----- 4 files changed, 11 insertions(+), 17 deletions(-) diff --git a/benchmark/src/libfork_benchmark/fib/libfork.cpp b/benchmark/src/libfork_benchmark/fib/libfork.cpp index 69b6960a..e2a5d794 100644 --- a/benchmark/src/libfork_benchmark/fib/libfork.cpp +++ b/benchmark/src/libfork_benchmark/fib/libfork.cpp @@ -27,7 +27,7 @@ struct fib { co_await sc.fork(&rhs, fib{}, n - 2); co_await sc.call(&lhs, fib{}, n - 1); - co_await lf::join(); + co_await sc.join(); co_return lhs + rhs; } diff --git a/benchmark/src/libfork_benchmark/uts/libfork.cpp b/benchmark/src/libfork_benchmark/uts/libfork.cpp index 59906937..79b54894 100644 --- a/benchmark/src/libfork_benchmark/uts/libfork.cpp +++ b/benchmark/src/libfork_benchmark/uts/libfork.cpp @@ -24,9 +24,12 @@ struct uts_fn { int child_type = uts_childType(parent); parent->numChildren = num_children; + if (num_children > 0) { std::vector cs(static_cast(num_children)); + + auto sc = co_await lf::scope(); for (std::size_t i = 0; i < static_cast(num_children); ++i) { cs[i].child.type = child_type; @@ -37,7 +40,6 @@ struct uts_fn { rng_spawn(parent->state.state, cs[i].child.state.state, static_cast(i)); } - auto sc = co_await lf::scope(); if (i + 1 == static_cast(num_children)) { co_await sc.call(&cs[i].res, uts_fn{}, depth + 1, &cs[i].child); @@ -46,7 +48,7 @@ struct uts_fn { } } - co_await lf::join(); + co_await sc.join(); for (auto &&elem : cs) { r.maxdepth = std::max(r.maxdepth, elem.res.maxdepth); diff --git a/src/core/ops.cxx b/src/core/ops.cxx index d9619258..4db627ca 100644 --- a/src/core/ops.cxx +++ b/src/core/ops.cxx @@ -47,14 +47,6 @@ constexpr auto fwd_fn(auto &&fn) noexcept -> Fn { struct join_type {}; -// TODO: remove -export [[nodiscard("You should immediately co_await this!")]] -constexpr auto join() noexcept -> join_type { - return {}; -} - -// =============== Scope base =============== // - /** * @brief Base class shared by scope_ops and child_scope_ops. * @@ -79,7 +71,7 @@ struct scope_ops : scope_base { using fork_pkg = pkg; public: - // default constructible + // Default constructible scope_ops() noexcept = default; // Immovable diff --git a/test/src/cancel.cpp b/test/src/cancel.cpp index 2a68879c..242e3182 100644 --- a/test/src/cancel.cpp +++ b/test/src/cancel.cpp @@ -166,7 +166,7 @@ struct inner_call_after_self_cancel { auto sc = co_await lf::scope(); co_await sc.call_drop(count_up_void{}, count); // Cancel=false: parent cancelled → skip co_await sc.fork_drop(count_up_void{}, count); // Cancel=false: parent cancelled → skip - co_await lf::join(); // paths D+E: join fires handle_cancel + co_await sc.join(); // paths D+E: join fires handle_cancel count.fetch_add(100); // must not be reached } }; @@ -202,7 +202,7 @@ struct inner_fork_then_cancel_at_join { -> lf::task { auto sc = co_await lf::scope(); co_await sc.fork_drop(cancel_source{}, my_cancel, count); - co_await lf::join(); // is_cancelled after child cancels → handle_cancel + co_await sc.join(); // is_cancelled after child cancels → handle_cancel count.fetch_add(100); // must not be reached } }; @@ -235,7 +235,7 @@ struct inner_forks_throwing { static auto operator()(lf::env) -> lf::task { auto sc = co_await lf::scope(); co_await sc.fork_drop(just_throw{}); - co_await lf::join(); // not cancelled → rethrow + co_await sc.join(); // not cancelled → rethrow co_return; } }; @@ -264,7 +264,7 @@ struct inner_cancel_and_throw { -> lf::task { auto sc = co_await lf::scope(); co_await sc.fork_drop(cancel_source_and_throw{}, my_cancel, count); - co_await lf::join(); // cancelled + exception → handle_cancel drops exception + co_await sc.join(); // cancelled + exception → handle_cancel drops exception count.fetch_add(100); // must not be reached } }; @@ -294,7 +294,7 @@ struct inner_sibling_throws_and_cancel { auto sc = co_await lf::scope(); co_await sc.fork_drop(just_throw_and_count{}, count); co_await sc.fork_drop(cancel_source{}, my_cancel, count); - co_await lf::join(); // cancelled; exceptions dropped + co_await sc.join(); // cancelled; exceptions dropped count.fetch_add(100); // must not be reached } }; From 6764b4e8c3d5c9a6f38dc44089b5bf9f70cb758e Mon Sep 17 00:00:00 2001 From: Conor Date: Sun, 19 Apr 2026 15:40:51 +0100 Subject: [PATCH 104/123] format --- benchmark/src/libfork_benchmark/uts/libfork.cpp | 4 +--- src/core/receiver.cxx | 7 ++----- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/benchmark/src/libfork_benchmark/uts/libfork.cpp b/benchmark/src/libfork_benchmark/uts/libfork.cpp index 79b54894..00a18e89 100644 --- a/benchmark/src/libfork_benchmark/uts/libfork.cpp +++ b/benchmark/src/libfork_benchmark/uts/libfork.cpp @@ -24,11 +24,10 @@ struct uts_fn { int child_type = uts_childType(parent); parent->numChildren = num_children; - if (num_children > 0) { std::vector cs(static_cast(num_children)); - + auto sc = co_await lf::scope(); for (std::size_t i = 0; i < static_cast(num_children); ++i) { @@ -40,7 +39,6 @@ struct uts_fn { rng_spawn(parent->state.state, cs[i].child.state.state, static_cast(i)); } - if (i + 1 == static_cast(num_children)) { co_await sc.call(&cs[i].res, uts_fn{}, depth + 1, &cs[i].child); } else { diff --git a/src/core/receiver.cxx b/src/core/receiver.cxx index 53e4675f..16c0ebea 100644 --- a/src/core/receiver.cxx +++ b/src/core/receiver.cxx @@ -40,8 +40,7 @@ class receiver_state { /// In-place construction of the return value from arbitrary args. template requires (!std::is_void_v) && std::constructible_from - constexpr explicit receiver_state(Args &&...args) - : m_return_value(std::forward(args)...) {} + constexpr explicit receiver_state(Args &&...args) : m_return_value(std::forward(args)...) {} private: template @@ -56,9 +55,7 @@ class receiver_state { struct view { receiver_state *p; - constexpr void set_exception(std::exception_ptr e) noexcept { - p->m_exception = std::move(e); - } + constexpr void set_exception(std::exception_ptr e) noexcept { p->m_exception = std::move(e); } constexpr void notify_ready() noexcept { p->m_ready.test_and_set(); From 07f6f976606653f425233ce822ffc4ee25251b1f Mon Sep 17 00:00:00 2001 From: Conor Date: Sun, 19 Apr 2026 15:49:26 +0100 Subject: [PATCH 105/123] rename 1 --- src/core/cancel.md | 14 +++++++------- src/core/ops.cxx | 4 ++-- src/core/receiver.cxx | 16 ++++++++-------- src/core/root.cxx | 4 ++-- src/core/schedule.cxx | 14 +++++++------- 5 files changed, 26 insertions(+), 26 deletions(-) diff --git a/src/core/cancel.md b/src/core/cancel.md index ce00335f..cfaef8da 100644 --- a/src/core/cancel.md +++ b/src/core/cancel.md @@ -42,21 +42,21 @@ regular scope, `fork_with(token, ...)` / `call_with(token, ...)` accept a ## Task 2 - Schedule API ✓ -`receiver_state` has: +`receiver_state` has: - Public default constructor + forwarding constructors for in-place T construction. - All other members private, with accessor methods used by `root_pkg` and `schedule`. -- A `stop_source` member only when `Cancellable=true`. -- `get_stop_token()` (requires `Cancellable=true`) returns a `stop_token`. +- A `stop_source` member only when `Stoppable=true`. +- `get_stop_token()` (requires `Stoppable=true`) returns a `stop_token`. -`receiver` exposes: -- `token()` (requires `Cancellable=true`) for external cancellation. +`receiver` exposes: +- `token()` (requires `Stoppable=true`) for external cancellation. `schedule` has two overloads: ```cpp // Primary: caller supplies a pre-allocated (possibly custom-allocated) state. -auto schedule(Sch&&, shared_ptr>, Fn&&, Args&&...) - -> receiver; +auto schedule(Sch&&, shared_ptr>, Fn&&, Args&&...) + -> receiver; // Convenience: allocates via make_shared, non-cancellable by default. auto schedule(Sch&&, Fn&&, Args&&...) diff --git a/src/core/ops.cxx b/src/core/ops.cxx index 4db627ca..1fa8b315 100644 --- a/src/core/ops.cxx +++ b/src/core/ops.cxx @@ -18,9 +18,9 @@ struct no_ret_t {}; // clang-format off -template +template struct [[nodiscard("You should immediately co_await this!")]] pkg { - [[no_unique_address]] std::conditional_t maybe_cancel; + [[no_unique_address]] std::conditional_t maybe_cancel; [[no_unique_address]] std::conditional_t, no_ret_t, R *> maybe_ret_adr; [[no_unique_address]] Fn fn; [[no_unique_address]] tuple args; diff --git a/src/core/receiver.cxx b/src/core/receiver.cxx index 16c0ebea..edc0df61 100644 --- a/src/core/receiver.cxx +++ b/src/core/receiver.cxx @@ -23,13 +23,13 @@ export struct broken_receiver_error final : libfork_exception { * @brief Shared state between a scheduled task and its receiver handle. * * @tparam T The return type of the scheduled coroutine. - * @tparam Cancellable If true, the state owns a stop_source that can be used + * @tparam Stoppable If true, the state owns a stop_source that can be used * to cancel the root task externally. * * Constructors forward arguments for in-place construction of the return value. * Internal access is gated behind a hidden friend: `get(key_t, receiver_state&)`. */ -export template +export template class receiver_state { public: struct empty {}; @@ -71,7 +71,7 @@ class receiver_state { [[nodiscard]] constexpr auto get_stop_token() noexcept -> stop_source::stop_token - requires Cancellable + requires Stoppable { return p->m_stop.token(); } @@ -94,13 +94,13 @@ class receiver_state { std::atomic_flag m_ready; [[no_unique_address]] - std::conditional_t m_stop; + std::conditional_t m_stop; }; -export template +export template class receiver { - using state_type = receiver_state; + using state_type = receiver_state; public: constexpr receiver(key_t, std::shared_ptr &&state) : m_state(std::move(state)) {} @@ -134,12 +134,12 @@ class receiver { /** * @brief Returns a stop_token for this task's stop source. * - * Only available when Cancellable=true. The token can be used to request + * Only available when Stoppable=true. The token can be used to request * cancellation of the scheduled task before or after it has started. */ [[nodiscard]] constexpr auto token() noexcept -> stop_source::stop_token - requires Cancellable + requires Stoppable { if (!valid()) { LF_THROW(broken_receiver_error{}); diff --git a/src/core/root.cxx b/src/core/root.cxx index 2ebdd465..cb0822ec 100644 --- a/src/core/root.cxx +++ b/src/core/root.cxx @@ -68,11 +68,11 @@ struct root_task { promise_type *promise; }; -template +template requires async_invocable_to [[nodiscard]] auto // -root_pkg(std::shared_ptr> recv, Fn fn, Args... args) +root_pkg(std::shared_ptr> recv, Fn fn, Args... args) -> root_task> { // This should be resumed on a valid context. diff --git a/src/core/schedule.cxx b/src/core/schedule.cxx index d0e05a77..c6e3cc24 100644 --- a/src/core/schedule.cxx +++ b/src/core/schedule.cxx @@ -43,8 +43,8 @@ concept schedulable = schedulable_decayed, Context, std::decay_ template using invoke_decay_result_t = async_result_t, Context, std::decay_t...>; -template -using schedule_state_t = receiver_state, Cancellable>; +template +using schedule_state_t = receiver_state, Stoppable>; export template requires schedulable @@ -59,12 +59,12 @@ using schedule_result_t = receiver>; * * This function is strongly exception safe. */ -export template +export template requires schedulable, Args...> && std::same_as, Args...>> constexpr auto -schedule(Sch &&sch, std::shared_ptr> state, Fn &&fn, Args &&...args) - -> receiver { +schedule(Sch &&sch, std::shared_ptr> state, Fn &&fn, Args &&...args) + -> receiver { using context_type = context_t; @@ -80,7 +80,7 @@ schedule(Sch &&sch, std::shared_ptr> state, Fn && task.promise->frame.kind = category::root; task.promise->frame.parent = nullptr; - if constexpr (Cancellable) { + if constexpr (Stoppable) { task.promise->frame.cancel = get(key(), *state).get_stop_token(); } else { task.promise->frame.cancel = stop_source::stop_token{}; // non-cancellable root @@ -101,7 +101,7 @@ schedule(Sch &&sch, std::shared_ptr> state, Fn && /** * @brief Convenience overload: allocates receiver state via make_shared. * - * Defaults to non-cancellable (Cancellable=false). Delegates to the primary + * Defaults to non-cancellable (Stoppable=false). Delegates to the primary * overload above. */ export template From 56bbc33f7fc9fd38b4b2af8c86ab3a4c447118ab Mon Sep 17 00:00:00 2001 From: Conor Date: Sun, 19 Apr 2026 15:50:37 +0100 Subject: [PATCH 106/123] rename 2 --- src/core/ops.cxx | 28 ++++++++++++++-------------- src/core/promise.cxx | 6 +++--- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/src/core/ops.cxx b/src/core/ops.cxx index 1fa8b315..8b4c9278 100644 --- a/src/core/ops.cxx +++ b/src/core/ops.cxx @@ -20,8 +20,8 @@ struct no_ret_t {}; template struct [[nodiscard("You should immediately co_await this!")]] pkg { - [[no_unique_address]] std::conditional_t maybe_cancel; - [[no_unique_address]] std::conditional_t, no_ret_t, R *> maybe_ret_adr; + [[no_unique_address]] std::conditional_t stop_token; + [[no_unique_address]] std::conditional_t, no_ret_t, R *> return_addr; [[no_unique_address]] Fn fn; [[no_unique_address]] tuple args; }; @@ -84,30 +84,30 @@ struct scope_ops : scope_base { template Fn> static constexpr auto fork(R *ret, Fn &&fn, Args &&...args) noexcept -> fork_pkg { - return {.maybe_ret_adr = ret, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + return {.return_addr = ret, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } template Fn> static constexpr auto fork_drop(Fn &&fn, Args &&...args) noexcept -> fork_pkg { - return {.maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + return {.return_addr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } template Fn> static constexpr auto fork(Fn &&fn, Args &&...args) noexcept -> fork_pkg { - return {.maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + return {.return_addr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } // === Call === // template Fn> static constexpr auto call(R *ret, Fn &&fn, Args &&...args) noexcept -> call_pkg { - return {.maybe_ret_adr = ret, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + return {.return_addr = ret, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } template Fn> static constexpr auto call_drop(Fn &&fn, Args &&...args) noexcept -> call_pkg { - return {.maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + return {.return_addr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } template Fn> static constexpr auto call(Fn &&fn, Args &&...args) noexcept -> call_pkg { - return {.maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + return {.return_addr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } }; @@ -155,30 +155,30 @@ struct child_scope_ops : scope_base, stop_source { template Fn> constexpr auto fork(R *ret, Fn &&fn, Args &&...args) noexcept -> fork_pkg { - return {.maybe_cancel = token(), .maybe_ret_adr = ret, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + return {.stop_token = token(), .return_addr = ret, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } template Fn> constexpr auto fork_drop(Fn &&fn, Args &&...args) noexcept -> fork_pkg { - return {.maybe_cancel = token(), .maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + return {.stop_token = token(), .return_addr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } template Fn> constexpr auto fork(Fn &&fn, Args &&...args) noexcept -> fork_pkg { - return {.maybe_cancel = token(), .maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + return {.stop_token = token(), .return_addr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } // === Call (binds this scope's stop source as child cancel) === // template Fn> constexpr auto call(R *ret, Fn &&fn, Args &&...args) noexcept -> call_pkg { - return {.maybe_cancel = token(), .maybe_ret_adr = ret, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + return {.stop_token = token(), .return_addr = ret, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } template Fn> constexpr auto call_drop(Fn &&fn, Args &&...args) noexcept -> call_pkg { - return {.maybe_cancel = token(), .maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + return {.stop_token = token(), .return_addr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } template Fn> constexpr auto call(Fn &&fn, Args &&...args) noexcept -> call_pkg { - return {.maybe_cancel = token(), .maybe_ret_adr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; + return {.stop_token = token(), .return_addr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } }; diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 6338d335..e063e38d 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -525,7 +525,7 @@ struct mixin_frame { // TODO: tests for null return path if constexpr (!std::is_void_v) { - child_promise->return_address = not_null(pkg.maybe_ret_adr); + child_promise->return_address = not_null(pkg.return_addr); } else if constexpr (!std::is_void_v) { // Set child's return address to null to inhibit the return // TODO: add test for this @@ -534,8 +534,8 @@ struct mixin_frame { if constexpr (Cancel) { // TODO: need some kind of API to launch an unstoppable task? - LF_ASSUME(pkg.maybe_cancel.stop_possible()); - child_promise->frame.cancel = pkg.maybe_cancel; + LF_ASSUME(pkg.stop_token.stop_possible()); + child_promise->frame.cancel = pkg.stop_token; } else { child_promise->frame.cancel = self.frame.cancel; } From 7cce296bdb94ed8a3a763486026348cb4ec67bbc Mon Sep 17 00:00:00 2001 From: Conor Date: Sun, 19 Apr 2026 15:53:20 +0100 Subject: [PATCH 107/123] rename 3 --- src/core/ops.cxx | 9 +++++---- src/core/promise.cxx | 2 +- test/src/cancel.cpp | 7 +++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/core/ops.cxx b/src/core/ops.cxx index 8b4c9278..31954946 100644 --- a/src/core/ops.cxx +++ b/src/core/ops.cxx @@ -151,7 +151,7 @@ struct child_scope_ops : scope_base, stop_source { auto operator=(const child_scope_ops &) -> child_scope_ops & = delete; auto operator=(child_scope_ops &&) -> child_scope_ops & = delete; - // === Fork (binds this scope's stop source as child cancel) === // + // === Fork (binds this scope's stop source as child stop source) === // template Fn> constexpr auto fork(R *ret, Fn &&fn, Args &&...args) noexcept -> fork_pkg { @@ -166,7 +166,7 @@ struct child_scope_ops : scope_base, stop_source { return {.stop_token = token(), .return_addr = {}, .fn = LF_FWD(fn), .args = {LF_FWD(args)...}}; } - // === Call (binds this scope's stop source as child cancel) === // + // === Call (binds this scope's stop source as child stop source) === // template Fn> constexpr auto call(R *ret, Fn &&fn, Args &&...args) noexcept -> call_pkg { @@ -186,10 +186,11 @@ struct child_scope_ops : scope_base, stop_source { template struct child_scope_awaitable : std::suspend_never { - stop_source::stop_token parent_cancel; + + stop_source::stop_token parent_stop_source; constexpr auto await_resume(this child_scope_awaitable self) -> child_scope_ops { - return child_scope_ops{self.parent_cancel}; + return child_scope_ops{self.parent_stop_source}; } }; diff --git a/src/core/promise.cxx b/src/core/promise.cxx index e063e38d..02d3815f 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -562,7 +562,7 @@ struct mixin_frame { constexpr auto await_transform(this auto const &self, child_scope_type) noexcept -> child_scope_awaitable { - return {.parent_cancel = self.frame.cancel}; + return {.parent_stop_source = self.frame.cancel}; } constexpr static auto initial_suspend() noexcept -> std::suspend_always { return {}; } diff --git a/test/src/cancel.cpp b/test/src/cancel.cpp index 242e3182..c1e4a475 100644 --- a/test/src/cancel.cpp +++ b/test/src/cancel.cpp @@ -172,7 +172,7 @@ struct inner_call_after_self_cancel { }; template -auto test_call_parent_cancel(lf::env) -> lf::task { +auto test_call_parent_stop_source(lf::env) -> lf::task { std::atomic count = 0; auto outer_sc = co_await lf::child_scope(); // Pass the scope's stop_source by reference so the inner task can cancel it. @@ -367,9 +367,8 @@ void tests(Sch &scheduler) { REQUIRE(std::move(recv).get()); } - SECTION("call_drop/fork_drop (Cancel=false): skipped when parent frame is cancelled; " - "join fires handle_cancel") { - auto recv = schedule(scheduler, test_call_parent_cancel); + SECTION("call_drop/fork_drop: skipped when parent frame is cancelled; join fires handle_cancel") { + auto recv = schedule(scheduler, test_call_parent_stop_source); REQUIRE(recv.valid()); REQUIRE(std::move(recv).get()); } From 282f9a26bb2d3c62018493753991a388b9bcd628 Mon Sep 17 00:00:00 2001 From: Conor Date: Sun, 19 Apr 2026 15:56:21 +0100 Subject: [PATCH 108/123] rename 4 --- src/core/frame.cxx | 4 ++-- src/core/promise.cxx | 6 +++--- src/core/root.cxx | 2 +- src/core/schedule.cxx | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/core/frame.cxx b/src/core/frame.cxx index 75d323ae..053e56d6 100644 --- a/src/core/frame.cxx +++ b/src/core/frame.cxx @@ -33,7 +33,7 @@ struct frame_type : frame_base { uninitialized except; frame_type *parent; - stop_source::stop_token cancel; + stop_source::stop_token stop_token; [[no_unique_address]] Checkpoint stack_ckpt; @@ -53,7 +53,7 @@ struct frame_type : frame_base { [[nodiscard]] constexpr auto is_cancelled() const noexcept -> bool { // TODO: Should exception trigger cancellation? - return cancel.stop_requested(); + return stop_token.stop_requested(); } [[nodiscard]] diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 02d3815f..79e03761 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -535,9 +535,9 @@ struct mixin_frame { if constexpr (Cancel) { // TODO: need some kind of API to launch an unstoppable task? LF_ASSUME(pkg.stop_token.stop_possible()); - child_promise->frame.cancel = pkg.stop_token; + child_promise->frame.stop_token = pkg.stop_token; } else { - child_promise->frame.cancel = self.frame.cancel; + child_promise->frame.stop_token = self.frame.stop_token; } return {.child = &child_promise->frame}; @@ -562,7 +562,7 @@ struct mixin_frame { constexpr auto await_transform(this auto const &self, child_scope_type) noexcept -> child_scope_awaitable { - return {.parent_stop_source = self.frame.cancel}; + return {.parent_stop_source = self.frame.stop_token}; } constexpr static auto initial_suspend() noexcept -> std::suspend_always { return {}; } diff --git a/src/core/root.cxx b/src/core/root.cxx index cb0822ec..e56b16d6 100644 --- a/src/core/root.cxx +++ b/src/core/root.cxx @@ -108,7 +108,7 @@ root_pkg(std::shared_ptr> recv, Fn fn, Args... args // Propagate parent/cancel info to child child->frame.parent = root; - child->frame.cancel = root->cancel; + child->frame.stop_token = root->stop_token; LF_ASSUME(child->frame.kind == category::call); diff --git a/src/core/schedule.cxx b/src/core/schedule.cxx index c6e3cc24..5980ea43 100644 --- a/src/core/schedule.cxx +++ b/src/core/schedule.cxx @@ -81,9 +81,9 @@ schedule(Sch &&sch, std::shared_ptr> state, Fn &&fn task.promise->frame.parent = nullptr; if constexpr (Stoppable) { - task.promise->frame.cancel = get(key(), *state).get_stop_token(); + task.promise->frame.stop_token = get(key(), *state).get_stop_token(); } else { - task.promise->frame.cancel = stop_source::stop_token{}; // non-cancellable root + task.promise->frame.stop_token = stop_source::stop_token{}; // non-cancellable root } LF_TRY { From a68264db06ce3cf45d4d6a55183b5fb2d44e8cad Mon Sep 17 00:00:00 2001 From: Conor Date: Sun, 19 Apr 2026 15:57:49 +0100 Subject: [PATCH 109/123] rename 5 --- src/core/frame.cxx | 2 +- src/core/promise.cxx | 12 ++++++------ src/core/root.cxx | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/core/frame.cxx b/src/core/frame.cxx index 053e56d6..e6a1ec11 100644 --- a/src/core/frame.cxx +++ b/src/core/frame.cxx @@ -51,7 +51,7 @@ struct frame_type : frame_base { constexpr frame_type(Checkpoint &&ckpt) noexcept : stack_ckpt(std::move(ckpt)) { joins = k_u16_max; } [[nodiscard]] - constexpr auto is_cancelled() const noexcept -> bool { + constexpr auto stop_requested() const noexcept -> bool { // TODO: Should exception trigger cancellation? return stop_token.stop_requested(); } diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 79e03761..1a29ce6d 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -126,7 +126,7 @@ constexpr auto final_suspend_full(Context &context, frame_t *frame) noe // Must reset parent's control block before resuming parent. parent->reset_counters(); - if (parent->is_cancelled()) [[unlikely]] { + if (parent->stop_requested()) [[unlikely]] { // Don't resume if cancelled if constexpr (LF_COMPILER_EXCEPTIONS) { if (parent->exception_bit) [[unlikely]] { @@ -174,7 +174,7 @@ constexpr auto final_suspend_trailing(Context &context, frame_t *parent parent->reset_counters(); - if (parent->is_cancelled()) [[unlikely]] { + if (parent->stop_requested()) [[unlikely]] { if constexpr (LF_COMPILER_EXCEPTIONS) { if (parent->exception_bit) [[unlikely]] { std::ignore = extract_exception(parent); @@ -280,11 +280,11 @@ struct awaitable : std::suspend_always { // Noop if canceled, must clean-up the child that will never be resumed. if constexpr (Cancel) { - if (self.child->is_cancelled()) [[unlikely]] { + if (self.child->stop_requested()) [[unlikely]] { return self.child->handle().destroy(), parent; } } else { - if (parent.promise().frame.is_cancelled()) [[unlikely]] { + if (parent.promise().frame.stop_requested()) [[unlikely]] { return self.child->handle().destroy(), parent; } } @@ -346,7 +346,7 @@ struct join_awaitable { constexpr auto await_ready(this join_awaitable self) noexcept -> bool { if (not_null(self.frame)->steals == 0) [[likely]] { - if (self.frame->is_cancelled()) [[unlikely]] { + if (self.frame->stop_requested()) [[unlikely]] { // Must unconditionally suspended if canceled return false; } @@ -398,7 +398,7 @@ struct join_awaitable { // Need to acquire to ensure we see all writes by other threads to the result. std::atomic_thread_fence(std::memory_order_acquire); - if (self.frame->is_cancelled()) [[unlikely]] { + if (self.frame->stop_requested()) [[unlikely]] { return self.handle_cancel(); } diff --git a/src/core/root.cxx b/src/core/root.cxx index e56b16d6..0be5ce6c 100644 --- a/src/core/root.cxx +++ b/src/core/root.cxx @@ -90,7 +90,7 @@ root_pkg(std::shared_ptr> recv, Fn fn, Args... args promise_type *child = nullptr; - if (root->is_cancelled()) { + if (root->stop_requested()) { // The root task was cancelled before it even started, we can skip // straight to cleanup. goto cleanup; From f4cc3bc131abab46c9695d3befe4093378c8f489 Mon Sep 17 00:00:00 2001 From: Conor Date: Sun, 19 Apr 2026 16:01:00 +0100 Subject: [PATCH 110/123] rm dead file --- src/core/cancel.md | 64 ---------------------------------------------- 1 file changed, 64 deletions(-) delete mode 100644 src/core/cancel.md diff --git a/src/core/cancel.md b/src/core/cancel.md deleted file mode 100644 index cfaef8da..00000000 --- a/src/core/cancel.md +++ /dev/null @@ -1,64 +0,0 @@ -# Cancel notes - -Goals: - -- Symmetry between schedule and fork/call cancellation binding ✓ -- Allocator aware schedule (using shared pointer std:: function) ✓ -- Customize the construction of receiver ✓ -- Default schedule should be non-cancellable (bind nullptr) ✓ -- Join should be a member function of the scope ✓ -- Cancel scope instead of separate source+scope ✓ - -## Task 1 - cancel scope ✓ - -```cpp -auto example() -> task { - - auto sc = co_await child_scope(); - - co_await sc.fork(fn1, 0); - co_await sc.call(fn2, sc.token()); - - co_await sc.join(); -} -``` - -`child_scope()` returns a `child_scope_ops` that: -- Owns a `stop_source` chained onto the parent frame's cancel token. -- All `fork`/`call` operations automatically bind the scope's stop source - as the child's cancel source (Cancel=true path). -- `.token()` returns a `stop_token` wrapping the scope's stop source. -- `.join()` is available via the shared `scope_base` base class - (same as calling `co_await lf::join()`). - -`stop_source` is now internal-only (not exported). The public API is -`stop_token` — a lightweight pointer-sized wrapper that exposes -`stop_requested()`, `request_stop()`, and `race_request_stop()`. - -`scope_ops` (obtained via `co_await lf::scope()`) also inherits `scope_base` -and therefore also exposes `.join()`. For explicit cancel binding from a -regular scope, `fork_with(token, ...)` / `call_with(token, ...)` accept a -`stop_token`. - -## Task 2 - Schedule API ✓ - -`receiver_state` has: -- Public default constructor + forwarding constructors for in-place T construction. -- All other members private, with accessor methods used by `root_pkg` and `schedule`. -- A `stop_source` member only when `Stoppable=true`. -- `get_stop_token()` (requires `Stoppable=true`) returns a `stop_token`. - -`receiver` exposes: -- `token()` (requires `Stoppable=true`) for external cancellation. - -`schedule` has two overloads: - -```cpp -// Primary: caller supplies a pre-allocated (possibly custom-allocated) state. -auto schedule(Sch&&, shared_ptr>, Fn&&, Args&&...) - -> receiver; - -// Convenience: allocates via make_shared, non-cancellable by default. -auto schedule(Sch&&, Fn&&, Args&&...) - -> receiver; // receiver -``` From 9a2b0ac70f377013a21843ea6f7309c6d723ba6a Mon Sep 17 00:00:00 2001 From: Conor Date: Sun, 19 Apr 2026 16:03:18 +0100 Subject: [PATCH 111/123] rename 6 --- src/core/ops.cxx | 4 ++-- src/core/promise.cxx | 18 +++++++++--------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/core/ops.cxx b/src/core/ops.cxx index 31954946..3b67e5e8 100644 --- a/src/core/ops.cxx +++ b/src/core/ops.cxx @@ -18,9 +18,9 @@ struct no_ret_t {}; // clang-format off -template +template struct [[nodiscard("You should immediately co_await this!")]] pkg { - [[no_unique_address]] std::conditional_t stop_token; + [[no_unique_address]] std::conditional_t stop_token; [[no_unique_address]] std::conditional_t, no_ret_t, R *> return_addr; [[no_unique_address]] Fn fn; [[no_unique_address]] tuple args; diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 1a29ce6d..6d827edc 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -247,7 +247,7 @@ constexpr void stash_current_exception(frame_type *frame) noexcept { } } -template +template struct awaitable : std::suspend_always { static_assert(Cat == category::call || Cat == category::fork, "Invalid category for awaitable"); @@ -279,7 +279,7 @@ struct awaitable : std::suspend_always { } // Noop if canceled, must clean-up the child that will never be resumed. - if constexpr (Cancel) { + if constexpr (StopToken) { if (self.child->stop_requested()) [[unlikely]] { return self.child->handle().destroy(), parent; } @@ -499,10 +499,10 @@ struct mixin_frame { // --- Await transformations - template + template constexpr auto - await_transform_pkg(this auto const &self, pkg &&pkg) noexcept( - async_nothrow_invocable) -> awaitable { + await_transform_pkg(this auto const &self, pkg &&pkg) noexcept( + async_nothrow_invocable) -> awaitable { // Required for noexcept specifier to be correct static_assert(std::is_reference_v && (... && std::is_reference_v)); @@ -532,7 +532,7 @@ struct mixin_frame { child_promise->return_address = nullptr; } - if constexpr (Cancel) { + if constexpr (StopToken) { // TODO: need some kind of API to launch an unstoppable task? LF_ASSUME(pkg.stop_token.stop_possible()); child_promise->frame.stop_token = pkg.stop_token; @@ -543,9 +543,9 @@ struct mixin_frame { return {.child = &child_promise->frame}; } - template - constexpr auto await_transform(this auto &self, pkg &&pkg) noexcept - -> awaitable { + template + constexpr auto await_transform(this auto &self, pkg &&pkg) noexcept + -> awaitable { LF_TRY { return self.await_transform_pkg(std::move(pkg)); } LF_CATCH_ALL { From e077b354e493ea4bc14d6cd905a4c26dd9efe899 Mon Sep 17 00:00:00 2001 From: Conor Date: Sun, 19 Apr 2026 16:06:40 +0100 Subject: [PATCH 112/123] rename 7 --- .codespellrc | 2 +- src/core/frame.cxx | 2 +- src/core/promise.cxx | 4 ++-- src/core/stop.cxx | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.codespellrc b/.codespellrc index c6cf5dfe..86730201 100644 --- a/.codespellrc +++ b/.codespellrc @@ -1,5 +1,5 @@ [codespell] -builtin = clear,rare,en-GB_to_en-US,names,informal,code +builtin = clear,rare,names,informal,code check-filenames = check-hidden = ignore-words-list = deque,warmup,stdio,copyable,combinate diff --git a/src/core/frame.cxx b/src/core/frame.cxx index e6a1ec11..71ec8934 100644 --- a/src/core/frame.cxx +++ b/src/core/frame.cxx @@ -52,7 +52,7 @@ struct frame_type : frame_base { [[nodiscard]] constexpr auto stop_requested() const noexcept -> bool { - // TODO: Should exception trigger cancellation? + // TODO: Should exception trigger stop? return stop_token.stop_requested(); } diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 6d827edc..d1145782 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -399,7 +399,7 @@ struct join_awaitable { std::atomic_thread_fence(std::memory_order_acquire); if (self.frame->stop_requested()) [[unlikely]] { - return self.handle_cancel(); + return self.handle_stop(); } // We must reset the control block and take the stack. We should never @@ -443,7 +443,7 @@ struct join_awaitable { } [[nodiscard]] - constexpr auto handle_cancel(this join_awaitable self) -> coro<> { + constexpr auto handle_stop(this join_awaitable self) -> coro<> { // Only need to take the stack if there were steals if (self.frame->steals > 0) { self.take_stack(); diff --git a/src/core/stop.cxx b/src/core/stop.cxx index ab0d96f4..bb82b4a3 100644 --- a/src/core/stop.cxx +++ b/src/core/stop.cxx @@ -19,12 +19,12 @@ export class stop_source { class stop_token { public: /** - * @brief Construct a null (non-cancellable) token. + * @brief Construct a null (unstoppable) token. */ constexpr stop_token() noexcept = default; /** - * @brief Returns true if a stop source is associated (cancellation is possible). + * @brief Returns true if a stop source is associated (stopping is possible). */ [[nodiscard]] constexpr auto stop_possible() const noexcept -> bool { From 1581ec1933ee010174eb5f0c26b2348fb595c386 Mon Sep 17 00:00:00 2001 From: Conor Date: Sun, 19 Apr 2026 16:10:03 +0100 Subject: [PATCH 113/123] rename 8 --- src/core/promise.cxx | 14 ++++++-------- src/core/root.cxx | 2 +- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index d1145782..3fb7c23d 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -127,7 +127,7 @@ constexpr auto final_suspend_full(Context &context, frame_t *frame) noe parent->reset_counters(); if (parent->stop_requested()) [[unlikely]] { - // Don't resume if cancelled + // Don't resume if stopped if constexpr (LF_COMPILER_EXCEPTIONS) { if (parent->exception_bit) [[unlikely]] { std::ignore = extract_exception(parent); @@ -269,16 +269,14 @@ struct awaitable : std::suspend_always { constexpr auto await_suspend(this awaitable self, coro> parent) noexcept -> coro<> { - // TODO: Add tests for exception/cancellation handling in fork/call. - - // TODO: test of having a dedicated is_cancelld awaitable is quicker + // TODO: test of having a dedicated is_stopped awaitable is quicker if (!self.child) [[unlikely]] { // Noop if an exception was thrown. return parent; } - // Noop if canceled, must clean-up the child that will never be resumed. + // Noop if stopped, must clean-up the child that will never be resumed. if constexpr (StopToken) { if (self.child->stop_requested()) [[unlikely]] { return self.child->handle().destroy(), parent; @@ -347,7 +345,7 @@ struct join_awaitable { constexpr auto await_ready(this join_awaitable self) noexcept -> bool { if (not_null(self.frame)->steals == 0) [[likely]] { if (self.frame->stop_requested()) [[unlikely]] { - // Must unconditionally suspended if canceled + // Must unconditionally suspended if stopped return false; } // If no steals then we are the only owner of the parent and we are @@ -383,7 +381,7 @@ struct join_awaitable { std::uint32_t offset = k_u16_max - steals; std::uint32_t joined = self.frame->atomic_joins().fetch_sub(offset, std::memory_order_release); - // If this was a cancel: + // If this was a stop: // // steals = 0, joins = k_u16_max then: // @@ -452,7 +450,7 @@ struct join_awaitable { // We always need to reset the connters as we modified self.frame->reset_counters(); - // Drop any exceptions in the now-cancelled task + // Drop any exceptions in the now-stopped task if constexpr (LF_COMPILER_EXCEPTIONS) { if (self.frame->exception_bit) [[unlikely]] { std::ignore = extract_exception(self.frame); diff --git a/src/core/root.cxx b/src/core/root.cxx index 0be5ce6c..00990260 100644 --- a/src/core/root.cxx +++ b/src/core/root.cxx @@ -106,7 +106,7 @@ root_pkg(std::shared_ptr> recv, Fn fn, Args... args LF_ASSUME(child != nullptr); - // Propagate parent/cancel info to child + // Propagate parent/stop info to child child->frame.parent = root; child->frame.stop_token = root->stop_token; From c957159ce7372a55c9d90038b607df59d8b36951 Mon Sep 17 00:00:00 2001 From: Conor Date: Sun, 19 Apr 2026 16:16:39 +0100 Subject: [PATCH 114/123] restore comment --- src/core/ops.cxx | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/core/ops.cxx b/src/core/ops.cxx index 3b67e5e8..2e111094 100644 --- a/src/core/ops.cxx +++ b/src/core/ops.cxx @@ -29,7 +29,10 @@ struct [[nodiscard("You should immediately co_await this!")]] pkg { // clang-format on /** - * @brief Forward the function member of a pkg correctly. + * @brief Forward the function member of a pkg correctly + * + * The Fn member should be an l/r value reference, r-value reference need an + * explicit move to be forwarded correctly. */ template constexpr auto fwd_fn(auto &&fn) noexcept -> Fn { From a957970533742e65a65fcf3ce530ec80e95adadd Mon Sep 17 00:00:00 2001 From: Conor Date: Sun, 19 Apr 2026 16:53:25 +0100 Subject: [PATCH 115/123] nicer name --- src/core/ops.cxx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/core/ops.cxx b/src/core/ops.cxx index 2e111094..3d47475b 100644 --- a/src/core/ops.cxx +++ b/src/core/ops.cxx @@ -13,14 +13,14 @@ import :stop; namespace lf { // Placeholder types for absent optional fields. -struct no_cnl_t {}; +struct no_stop_t {}; struct no_ret_t {}; // clang-format off template struct [[nodiscard("You should immediately co_await this!")]] pkg { - [[no_unique_address]] std::conditional_t stop_token; + [[no_unique_address]] std::conditional_t stop_token; [[no_unique_address]] std::conditional_t, no_ret_t, R *> return_addr; [[no_unique_address]] Fn fn; [[no_unique_address]] tuple args; From bdb3d4eaa4dc59eef1d2f6615753ad2d27aec260 Mon Sep 17 00:00:00 2001 From: Conor Date: Sun, 19 Apr 2026 16:55:23 +0100 Subject: [PATCH 116/123] complexity notes --- src/core/stop.cxx | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/core/stop.cxx b/src/core/stop.cxx index bb82b4a3..4b3b8c52 100644 --- a/src/core/stop.cxx +++ b/src/core/stop.cxx @@ -35,6 +35,10 @@ export class stop_source { * @brief Returns true if any stop source in the ancestor chain has been stopped. * * A null token always returns false. + * + * Complexity: O(chain depth). Every task that creates a child_scope adds one + * node to the chain, so deeply-nested task hierarchies pay proportionally more + * per stop check. */ [[nodiscard]] constexpr auto stop_requested() const noexcept -> bool { @@ -72,6 +76,10 @@ export class stop_source { /** * @brief Returns true if any stop source in the ancestor chain has been stopped. + + * Complexity: O(chain depth). Every task that creates a child_scope adds one + * node to the chain, so deeply-nested task hierarchies pay proportionally more + * per stop check. */ [[nodiscard]] constexpr auto stop_requested() const noexcept -> bool { From 06ba8105397e69104b4cc05b50ed8e5c0691a937 Mon Sep 17 00:00:00 2001 From: Conor Date: Sun, 19 Apr 2026 16:57:17 +0100 Subject: [PATCH 117/123] more tests --- src/core/receiver.cxx | 31 +++++++++++++++++-- test/src/cancel.cpp | 72 ++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 100 insertions(+), 3 deletions(-) diff --git a/src/core/receiver.cxx b/src/core/receiver.cxx index edc0df61..73289b7a 100644 --- a/src/core/receiver.cxx +++ b/src/core/receiver.cxx @@ -42,6 +42,18 @@ class receiver_state { requires (!std::is_void_v) && std::constructible_from constexpr explicit receiver_state(Args &&...args) : m_return_value(std::forward(args)...) {} + /** + * @brief Request that the associated task stop. + * + * Only available when Stoppable=true. Safe to call before scheduling — + * the root frame checks stop_requested() before executing the task body. + */ + constexpr auto request_stop() noexcept -> void + requires Stoppable + { + m_stop.request_stop(); + } + private: template friend class receiver; @@ -134,8 +146,8 @@ class receiver { /** * @brief Returns a stop_token for this task's stop source. * - * Only available when Stoppable=true. The token can be used to request - * cancellation of the scheduled task before or after it has started. + * Only available when Stoppable=true. The token can be used to observe + * whether the associated task has been cancelled. */ [[nodiscard]] constexpr auto token() noexcept -> stop_source::stop_token @@ -147,6 +159,21 @@ class receiver { return get(key(), *m_state).get_stop_token(); } + /** + * @brief Request that the associated task stop. + * + * Only available when Stoppable=true. Thread-safe; may be called + * concurrently with the task executing on worker threads. + */ + constexpr auto request_stop() -> void + requires Stoppable + { + if (!valid()) { + LF_THROW(broken_receiver_error{}); + } + m_state->m_stop.request_stop(); + } + [[nodiscard]] constexpr auto get() && -> T { diff --git a/test/src/cancel.cpp b/test/src/cancel.cpp index c1e4a475..448b8953 100644 --- a/test/src/cancel.cpp +++ b/test/src/cancel.cpp @@ -29,6 +29,14 @@ import libfork; // // F. handle_cancel (exception_bit set on cancelled frame): // exception dropped, not propagated to caller +// +// G. Nested child_scope chain propagation: +// inner child_scope inherits parent's stop token; stopping the outer +// source propagates through the chain to the inner scope. +// +// H. Stoppable receiver / pre-cancelled root: +// receiver_state::request_stop() before schedule() triggers +// the goto-cleanup fast path in root.cxx — task body never executes. namespace { @@ -305,11 +313,57 @@ auto test_sibling_exception_dropped_when_cancelled(lf::env) -> lf::task auto outer_sc = co_await lf::child_scope(); co_await outer_sc.call_drop(inner_sibling_throws_and_cancel{}, outer_sc, count); co_await outer_sc.join(); - co_return count.load() >= 2 && count.load() < 100; + auto c = count.load(); + co_return c >= 2 && c < 100; } #endif // LF_COMPILER_EXCEPTIONS +// ============================================================ +// G. Nested child_scope chain propagation. +// +// A child_scope created inside a task that runs under another child_scope +// has m_parent pointing to the outer scope's stop_source. Stopping the +// outer source propagates through the chain, making the inner scope's +// stop_requested() return true (path A). +// ============================================================ + +struct inner_with_nested_scope { + template + static auto + operator()(lf::env, lf::stop_source &outer, std::atomic &count) -> lf::task { + auto inner_sc = co_await lf::child_scope(); + // Cancel the outer scope; inner_sc.m_parent == &outer, so the chain fires. + outer.request_stop(); + co_await inner_sc.fork_drop(count_up_void{}, count); // skipped: inner_sc is stopped + co_await inner_sc.join(); // handle_stop + count.fetch_add(100); // must not be reached + } +}; + +template +auto test_nested_child_scope_chain(lf::env) -> lf::task { + std::atomic count = 0; + auto outer_sc = co_await lf::child_scope(); + co_await outer_sc.call_drop(inner_with_nested_scope{}, outer_sc, count); + co_await outer_sc.join(); + co_return count.load() == 0; +} + +// ============================================================ +// H. Stoppable receiver / pre-cancelled root. +// +// receiver_state::request_stop() before schedule() makes the root +// frame's stop_token immediately satisfied, triggering the goto-cleanup +// fast path in root.cxx so the task body never runs. +// ============================================================ + +template +auto pre_cancelled_root_fn(lf::env, bool *ran) -> lf::task { + *ran = true; + co_return; +} + // ============================================================ // Run all tests against a given scheduler // ============================================================ @@ -379,6 +433,22 @@ void tests(Sch &scheduler) { REQUIRE(std::move(recv).get()); } + SECTION("nested child_scope: stopping outer scope propagates to inner via chain") { + auto recv = schedule(scheduler, test_nested_child_scope_chain); + REQUIRE(recv.valid()); + REQUIRE(std::move(recv).get()); + } + + SECTION("stoppable receiver: pre-cancelled root task body never executes") { + bool ran = false; + auto state = std::make_shared>(); + state->request_stop(); + auto recv = lf::schedule(scheduler, std::move(state), pre_cancelled_root_fn, &ran); + REQUIRE(recv.valid()); + std::move(recv).get(); + REQUIRE(!ran); + } + #if LF_COMPILER_EXCEPTIONS SECTION("exception propagates through join when frame is NOT cancelled") { From aa4af3f1ba64ad51bd9309037b27a4b08b77c890 Mon Sep 17 00:00:00 2001 From: Conor Date: Sun, 19 Apr 2026 17:00:32 +0100 Subject: [PATCH 118/123] dynamic section --- test/src/cancel.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/test/src/cancel.cpp b/test/src/cancel.cpp index 448b8953..f0df4de0 100644 --- a/test/src/cancel.cpp +++ b/test/src/cancel.cpp @@ -494,7 +494,9 @@ TEMPLATE_TEST_CASE("Busy cancel", "[cancel]", mono_busy_thread_pool, poly_busy_t STATIC_REQUIRE(lf::scheduler); for (std::size_t thr = 1; thr < 4; ++thr) { - TestType scheduler{thr}; - tests(scheduler); + DYNAMIC_SECTION("threads=" << thr) { + TestType scheduler{thr}; + tests(scheduler); + } } } From 37dc76ecdabaa447a8df0c2edfe64f84d51e2b18 Mon Sep 17 00:00:00 2001 From: Conor Date: Sun, 19 Apr 2026 17:01:36 +0100 Subject: [PATCH 119/123] rename --- src/core/ops.cxx | 4 ++-- src/core/promise.cxx | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/core/ops.cxx b/src/core/ops.cxx index 3d47475b..3ea05bcc 100644 --- a/src/core/ops.cxx +++ b/src/core/ops.cxx @@ -190,10 +190,10 @@ struct child_scope_ops : scope_base, stop_source { template struct child_scope_awaitable : std::suspend_never { - stop_source::stop_token parent_stop_source; + stop_source::stop_token parent_stop_token; constexpr auto await_resume(this child_scope_awaitable self) -> child_scope_ops { - return child_scope_ops{self.parent_stop_source}; + return child_scope_ops{self.parent_stop_token}; } }; diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 3fb7c23d..b04a42c3 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -560,7 +560,7 @@ struct mixin_frame { constexpr auto await_transform(this auto const &self, child_scope_type) noexcept -> child_scope_awaitable { - return {.parent_stop_source = self.frame.stop_token}; + return {.parent_stop_token = self.frame.stop_token}; } constexpr static auto initial_suspend() noexcept -> std::suspend_always { return {}; } From 1759e34b69f74f2402365277ae41154faef72ac2 Mon Sep 17 00:00:00 2001 From: Conor Date: Sun, 19 Apr 2026 17:01:42 +0100 Subject: [PATCH 120/123] gaurd max --- benchmark/src/libfork_benchmark/common.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmark/src/libfork_benchmark/common.hpp b/benchmark/src/libfork_benchmark/common.hpp index 0649b8cc..1c6dcef6 100644 --- a/benchmark/src/libfork_benchmark/common.hpp +++ b/benchmark/src/libfork_benchmark/common.hpp @@ -13,7 +13,7 @@ struct incorrect_result : public std::runtime_error { }; inline void bench_thread_args(benchmark::Benchmark *bench, auto make_args) { - unsigned hw = std::thread::hardware_concurrency(); + unsigned hw = std::max(1U, std::thread::hardware_concurrency()); for (unsigned t : {1U, 2U, 4U, 6U, 8U, 12U, 16U, 24U, 32U, 48U, 64U, 96U}) { if (t > hw) { return; From 6dd0f41c0b8fb9f4b6b225e7a499bb40a42f2fad Mon Sep 17 00:00:00 2001 From: Conor Date: Sun, 19 Apr 2026 19:21:22 +0100 Subject: [PATCH 121/123] update comments --- test/src/cancel.cpp | 48 ++++++++++++++++++++++----------------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/test/src/cancel.cpp b/test/src/cancel.cpp index f0df4de0..c648d8a7 100644 --- a/test/src/cancel.cpp +++ b/test/src/cancel.cpp @@ -7,27 +7,27 @@ import std; import libfork; -// Exhaustive tests for all cancellation paths in promise.cxx. +// Exhaustive tests for all stop-token paths in promise.cxx. // -// Cancellation check-points in promise.cxx: +// Stop check-points in promise.cxx: // -// A. awaitable::await_suspend (Cancel=true): -// child->is_cancelled() → child not spawned (fork/call via child_scope_ops) +// A. awaitable::await_suspend (StopToken=true): +// child->stop_requested() → child not spawned (fork/call via child_scope_ops) // -// B. awaitable::await_suspend (Cancel=false): -// parent.promise().frame.is_cancelled() → child not spawned (fork/call via scope_ops) +// B. awaitable::await_suspend (StopToken=false): +// parent.promise().frame.stop_requested() → child not spawned (fork/call via scope_ops) // // C. final_suspend_full / final_suspend_trailing: -// parent->is_cancelled() after winning join race → exception dropped, +// parent->stop_requested() after winning join race → exception dropped, // iterative ancestor cleanup (exercises concurrent/stolen path) // // D. join_awaitable::await_ready: -// is_cancelled() forces suspension even when steals==0 +// stop_requested() forces suspension even when steals==0 // // E. join_awaitable::await_suspend: -// is_cancelled() after winning join race → handle_cancel() +// stop_requested() after winning join race → handle_stop() // -// F. handle_cancel (exception_bit set on cancelled frame): +// F. handle_stop (exception_bit set on stopped frame): // exception dropped, not propagated to caller // // G. Nested child_scope chain propagation: @@ -156,25 +156,25 @@ auto test_mixed_cancel(lf::env) -> lf::task { } // ============================================================ -// B. Cancel=false: parent frame cancellation propagation. +// B. StopToken=false: parent frame stop propagation. // -// An inner task receives a stop_source& that IS its own frame's cancel -// source (bound via child_scope_ops::call_drop / Cancel=true). It calls -// request_stop() on it, making its own is_cancelled() return true, then -// tries to launch sub-tasks via scope_ops (Cancel=false). Those are -// skipped because parent.is_cancelled() is true (path B). -// At join, handle_cancel fires (paths D+E). +// An inner task receives a stop_source& that IS its own frame's stop +// source (bound via child_scope_ops::call_drop / StopToken=true). It calls +// request_stop() on it, making its own stop_requested() return true, then +// tries to launch sub-tasks via scope_ops (StopToken=false). Those are +// skipped because parent.frame.stop_requested() is true (path B). +// At join, handle_stop fires (paths D+E). // ============================================================ struct inner_call_after_self_cancel { template static auto operator()(lf::env, lf::stop_source &my_cancel, std::atomic &count) -> lf::task { - my_cancel.request_stop(); // make this frame's is_cancelled() == true + my_cancel.request_stop(); // make this frame's stop_requested() == true auto sc = co_await lf::scope(); - co_await sc.call_drop(count_up_void{}, count); // Cancel=false: parent cancelled → skip - co_await sc.fork_drop(count_up_void{}, count); // Cancel=false: parent cancelled → skip - co_await sc.join(); // paths D+E: join fires handle_cancel + co_await sc.call_drop(count_up_void{}, count); // StopToken=false: stop requested → skip + co_await sc.fork_drop(count_up_void{}, count); // StopToken=false: stop requested → skip + co_await sc.join(); // paths D+E: join fires handle_stop count.fetch_add(100); // must not be reached } }; @@ -210,7 +210,7 @@ struct inner_fork_then_cancel_at_join { -> lf::task { auto sc = co_await lf::scope(); co_await sc.fork_drop(cancel_source{}, my_cancel, count); - co_await sc.join(); // is_cancelled after child cancels → handle_cancel + co_await sc.join(); // stop_requested() after child requests stop → handle_stop count.fetch_add(100); // must not be reached } }; @@ -272,7 +272,7 @@ struct inner_cancel_and_throw { -> lf::task { auto sc = co_await lf::scope(); co_await sc.fork_drop(cancel_source_and_throw{}, my_cancel, count); - co_await sc.join(); // cancelled + exception → handle_cancel drops exception + co_await sc.join(); // stop requested + exception → handle_stop drops exception count.fetch_add(100); // must not be reached } }; @@ -302,7 +302,7 @@ struct inner_sibling_throws_and_cancel { auto sc = co_await lf::scope(); co_await sc.fork_drop(just_throw_and_count{}, count); co_await sc.fork_drop(cancel_source{}, my_cancel, count); - co_await sc.join(); // cancelled; exceptions dropped + co_await sc.join(); // stop requested; exceptions dropped count.fetch_add(100); // must not be reached } }; From d42e9fb435ba3dc1873257ff872e17a893673cc4 Mon Sep 17 00:00:00 2001 From: Conor Date: Sun, 19 Apr 2026 19:24:47 +0100 Subject: [PATCH 122/123] drop branch --- src/core/promise.cxx | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index b04a42c3..9fc204ff 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -276,15 +276,9 @@ struct awaitable : std::suspend_always { return parent; } - // Noop if stopped, must clean-up the child that will never be resumed. - if constexpr (StopToken) { - if (self.child->stop_requested()) [[unlikely]] { - return self.child->handle().destroy(), parent; - } - } else { - if (parent.promise().frame.stop_requested()) [[unlikely]] { - return self.child->handle().destroy(), parent; - } + if (self.child->stop_requested()) [[unlikely]] { + // Noop if stopped, must clean-up the child that will never be resumed. + return self.child->handle().destroy(), parent; } // Propagate parent->child relationships From 4e4250269765b88c83c1ca44c15f54d38d49f529 Mon Sep 17 00:00:00 2001 From: Conor Date: Sun, 19 Apr 2026 19:25:33 +0100 Subject: [PATCH 123/123] drop template --- src/core/promise.cxx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/core/promise.cxx b/src/core/promise.cxx index 9fc204ff..24b1908a 100644 --- a/src/core/promise.cxx +++ b/src/core/promise.cxx @@ -247,7 +247,7 @@ constexpr void stash_current_exception(frame_type *frame) noexcept { } } -template +template struct awaitable : std::suspend_always { static_assert(Cat == category::call || Cat == category::fork, "Invalid category for awaitable"); @@ -494,7 +494,7 @@ struct mixin_frame { template constexpr auto await_transform_pkg(this auto const &self, pkg &&pkg) noexcept( - async_nothrow_invocable) -> awaitable { + async_nothrow_invocable) -> awaitable { // Required for noexcept specifier to be correct static_assert(std::is_reference_v && (... && std::is_reference_v)); @@ -537,7 +537,7 @@ struct mixin_frame { template constexpr auto await_transform(this auto &self, pkg &&pkg) noexcept - -> awaitable { + -> awaitable { LF_TRY { return self.await_transform_pkg(std::move(pkg)); } LF_CATCH_ALL {