Skip to content

Commit f96129a

Browse files
committed
Add Apple-Clang TSan CI build and harden strand cache shutdown
- Add Apple-Clang (macOS 26) to the TSan build matrix - Skip CMake for all sanitizer variants (redundant with B2) - Fix strand_service cached_frame_ leak: use a closed sentinel so in-flight invokers cannot repopulate the cache after shutdown - Make cached_frame_ atomic to eliminate data races - Replace std::optional with placement-new in stop_only_awaitable to fix data race on optional's engaged flag - Zero-initialize stop_cb_buf_ to silence GCC 15 false positive
1 parent a9e7699 commit f96129a

File tree

3 files changed

+50
-18
lines changed

3 files changed

+50
-18
lines changed

.github/generate-matrix.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,7 @@ def generate_sanitizer_variant(compiler_family, spec):
129129
"asan": True,
130130
"build-type": "RelWithDebInfo",
131131
"shared": True,
132+
"build-cmake": False,
132133
}
133134

134135
# MSVC and Clang-CL only support ASAN, not UBSAN
@@ -142,14 +143,15 @@ def generate_sanitizer_variant(compiler_family, spec):
142143

143144

144145
def generate_tsan_variant(compiler_family, spec):
145-
"""Generate TSan variant for the latest compiler in a family (Linux only)."""
146+
"""Generate TSan variant for the latest compiler in a family."""
146147
overrides = {
147148
"tsan": True,
148149
"build-type": "RelWithDebInfo",
149150
"shared": True,
151+
"build-cmake": False,
150152
}
151153

152-
if compiler_family == "clang":
154+
if compiler_family in ("clang", "apple-clang"):
153155
overrides["shared"] = False
154156

155157
return make_entry(compiler_family, spec, **overrides)
@@ -202,7 +204,7 @@ def main():
202204
matrix.append(generate_sanitizer_variant(family, spec))
203205

204206
# TSan is incompatible with ASan; separate variant for Linux
205-
if family in ("gcc", "clang"):
207+
if family in ("gcc", "clang", "apple-clang"):
206208
matrix.append(generate_tsan_variant(family, spec))
207209

208210
if family == "gcc":

src/ex/detail/strand_service.cpp

Lines changed: 16 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -26,13 +26,17 @@ namespace detail {
2626
Each strand_impl provides serialization for coroutines
2727
dispatched through strands that share it.
2828
*/
29+
// Sentinel stored in cached_frame_ after shutdown to prevent
30+
// in-flight invokers from repopulating a freed cache slot.
31+
inline void* const kCacheClosed = reinterpret_cast<void*>(1);
32+
2933
struct strand_impl
3034
{
3135
std::mutex mutex_;
3236
strand_queue pending_;
3337
bool locked_ = false;
3438
std::atomic<std::thread::id> dispatch_thread_{};
35-
void* cached_frame_ = nullptr;
39+
std::atomic<void*> cached_frame_{nullptr};
3640
};
3741

3842
//----------------------------------------------------------
@@ -52,9 +56,10 @@ struct strand_invoker
5256
std::size_t padded = (n + A - 1) & ~(A - 1);
5357
std::size_t total = padded + sizeof(strand_impl*);
5458

55-
void* p = impl.cached_frame_
56-
? std::exchange(impl.cached_frame_, nullptr)
57-
: ::operator new(total);
59+
void* p = impl.cached_frame_.exchange(
60+
nullptr, std::memory_order_acquire);
61+
if(!p || p == kCacheClosed)
62+
p = ::operator new(total);
5863

5964
// Trailer lets delete recover impl
6065
*reinterpret_cast<strand_impl**>(
@@ -70,9 +75,9 @@ struct strand_invoker
7075
auto* impl = *reinterpret_cast<strand_impl**>(
7176
static_cast<char*>(p) + padded);
7277

73-
if (!impl->cached_frame_)
74-
impl->cached_frame_ = p;
75-
else
78+
void* expected = nullptr;
79+
if(!impl->cached_frame_.compare_exchange_strong(
80+
expected, p, std::memory_order_release))
7681
::operator delete(p);
7782
}
7883

@@ -126,11 +131,10 @@ class strand_service_impl : public strand_service
126131
std::lock_guard<std::mutex> lock(impls_[i].mutex_);
127132
impls_[i].locked_ = true;
128133

129-
if(impls_[i].cached_frame_)
130-
{
131-
::operator delete(impls_[i].cached_frame_);
132-
impls_[i].cached_frame_ = nullptr;
133-
}
134+
void* p = impls_[i].cached_frame_.exchange(
135+
kCacheClosed, std::memory_order_acquire);
136+
if(p)
137+
::operator delete(p);
134138
}
135139
}
136140

test/unit/test_helpers.hpp

Lines changed: 29 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
#include <boost/capy/ex/io_env.hpp>
1919
#include <boost/capy/task.hpp>
2020

21-
#include <optional>
21+
#include <atomic>
2222
#include <stop_token>
2323

2424
#include "test_suite.hpp"
@@ -264,15 +264,41 @@ struct stop_only_awaitable
264264
stop_only_awaitable() noexcept = default;
265265
stop_only_awaitable(stop_only_awaitable && ) noexcept {}
266266

267-
std::optional<stop_resume_callback> stop_cb;
267+
// Placement-new storage instead of std::optional to avoid a
268+
// data race on optional's _M_engaged flag. The stop_callback
269+
// constructor synchronises with request_stop() through the
270+
// stop-state's atomics, but optional::emplace writes _M_engaged
271+
// *after* the constructor returns — outside that sync window.
272+
// When ~jthread calls request_stop() before join(), the
273+
// destructor's _M_reset (on the requesting thread) races with
274+
// emplace's _M_engaged write (on the registering thread).
275+
#ifdef _MSC_VER
276+
# pragma warning(push)
277+
# pragma warning(disable: 4324) // padded due to alignas
278+
#endif
279+
alignas(stop_resume_callback)
280+
unsigned char stop_cb_buf_[sizeof(stop_resume_callback)]{};
281+
#ifdef _MSC_VER
282+
# pragma warning(pop)
283+
#endif
284+
std::atomic<bool> active_{false};
285+
286+
~stop_only_awaitable()
287+
{
288+
if (active_.load(std::memory_order_acquire))
289+
reinterpret_cast<stop_resume_callback*>(
290+
stop_cb_buf_)->~stop_resume_callback();
291+
}
268292

269293
bool await_ready() {return false;}
270294

271295
std::coroutine_handle<> await_suspend(std::coroutine_handle<> h, io_env const* env)
272296
{
273297
if (env->stop_token.stop_requested())
274298
return h;
275-
stop_cb.emplace(env->stop_token, env->post_resume(h));
299+
::new(stop_cb_buf_) stop_resume_callback(
300+
env->stop_token, env->post_resume(h));
301+
active_.store(true, std::memory_order_release);
276302
return std::noop_coroutine();
277303
}
278304
void await_resume() {}

0 commit comments

Comments
 (0)