@@ -138,83 +138,6 @@ struct final_awaitable : std::suspend_always {
138138 }
139139};
140140
141- /* *
142- * @brief Part that deals only with parent.
143- */
144- template <worker_context Context>
145- [[nodiscard]]
146- constexpr auto final_suspend_continue (Context *context, frame_t <Context> *parent) noexcept -> coro<> {
147-
148- // An owner is a worker who:
149- //
150- // - Created the task.
151- // - OR had the task submitted to them.
152- // - OR won the task at a join.
153- //
154- // An owner of a task owns the stack the task is on.
155- //
156- // As the worker who completed the child task this thread owns the stack the child task was on.
157- //
158- // Either:
159- //
160- // 1. The parent is on the same stack as the child.
161- // 2. OR the parent is on a different stack to the child.
162- //
163- // Case (1) implies: we owned the parent; forked the child task; then the parent was then stolen.
164- // Case (2) implies: we stole the parent task; then forked the child; then the parent was stolen.
165- //
166- // Case (2) implies that our stack is empty.
167-
168- // As soon as we do the `fetch_sub` below the parent task is no longer safe
169- // to access as it may be resumed and then destroyed by another thread. Hence
170- // we must make copies on-the-stack of any data we may need if we lose the
171- // join race.
172- bool const owner = parent->stack_ckpt == context->stack ().checkpoint ();
173-
174- // TODO: we could reduce branching if we unconditionally release and also
175- // drop pre-release function altogether... Need to benchmark with code that
176- // triggers a lot of stealing.
177-
178- // As soon as we do the fetch_sub (if we loose) someone may acquire
179- // the stack so we must prepare it for release now.
180- auto release_key = context->stack ().prepare_release ();
181-
182- // TODO: we could add an `if (owner)` around acquire below, then we could
183- // define that acquire is always called with null or not-self.
184-
185- // Register with parent we have completed this child task.
186- if (parent->atomic_joins ().fetch_sub (1 , std::memory_order_release) == 1 ) {
187- // Parent has reached join and we are the last child task to complete. We
188- // are the exclusive owner of the parent and therefore, we must continue
189- // parent. As we won the race, acquire all writes before resuming.
190- std::atomic_thread_fence (std::memory_order_acquire);
191-
192- if (!owner) {
193- // In case of scenario (2) we must acquire the parent's stack.
194- context->stack ().acquire (std::as_const (parent->stack_ckpt ));
195- }
196-
197- // Must reset parent's control block before resuming parent.
198- parent->reset_counters ();
199-
200- return parent->handle ();
201- }
202-
203- // We did not win the join-race, we cannot dereference the parent pointer now
204- // as the frame may now be freed by the winner. Parent has not reached join
205- // or we are not the last child to complete. We are now out of jobs, we must
206- // yield to the executor.
207-
208- if (owner) {
209- // We were unable to resume the parent and we were its owner, as the
210- // resuming thread will take ownership of the parent's we must give it up.
211- context->stack ().release (std::move (release_key));
212- }
213-
214- // Else, case (2), our stack has no allocations on it, it may be used later.
215- return std::noop_coroutine ();
216- }
217-
218141// =============== Fork/Call =============== //
219142
220143// TODO: make sure exceptions are cancel-safe (I think now cancellation can leak)
0 commit comments