Skip to content

Commit a3c584c

Browse files
jpnurmiclaude
andcommitted
fix(retry): prevent poll task from re-arming after shutdown
Use a SENTRY_POLL_SHUTDOWN sentinel so that a concurrent retry_poll_task cannot resubmit the delayed poll that shutdown just dropped. The CAS(SCHEDULED→IDLE) in retry_poll_task is a no-op when scheduled is SHUTDOWN, and the subsequent CAS(IDLE→SCHEDULED) also fails. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
1 parent 30dbb71 commit a3c584c

1 file changed

Lines changed: 16 additions & 7 deletions

File tree

src/sentry_retry.c

Lines changed: 16 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,12 @@ typedef enum {
1919
SENTRY_RETRY_SEALED = 2
2020
} sentry_retry_state_t;
2121

22+
typedef enum {
23+
SENTRY_POLL_IDLE = 0,
24+
SENTRY_POLL_SCHEDULED = 1,
25+
SENTRY_POLL_SHUTDOWN = 2
26+
} sentry_poll_state_t;
27+
2228
struct sentry_retry_s {
2329
sentry_run_t *run;
2430
bool cache_keep;
@@ -233,10 +239,12 @@ retry_poll_task(void *_retry, void *_state)
233239
= sentry__atomic_fetch(&retry->state) == SENTRY_RETRY_STARTUP
234240
? retry->startup_time
235241
: 0;
236-
// clear before scanning so a concurrent enqueue sees 0 and arms a poll
237-
sentry__atomic_store(&retry->scheduled, 0);
242+
// CAS instead of unconditional store to preserve SENTRY_POLL_SHUTDOWN
243+
sentry__atomic_compare_swap(
244+
&retry->scheduled, SENTRY_POLL_SCHEDULED, SENTRY_POLL_IDLE);
238245
if (sentry__retry_send(retry, before, retry->send_cb, retry->send_data)
239-
&& sentry__atomic_compare_swap(&retry->scheduled, 0, 1)) {
246+
&& sentry__atomic_compare_swap(
247+
&retry->scheduled, SENTRY_POLL_IDLE, SENTRY_POLL_SCHEDULED)) {
240248
sentry__bgworker_submit_delayed(retry->bgworker, retry_poll_task, NULL,
241249
retry, SENTRY_RETRY_INTERVAL);
242250
}
@@ -252,7 +260,7 @@ sentry__retry_start(sentry_retry_t *retry, sentry_bgworker_t *bgworker,
252260
retry->bgworker = bgworker;
253261
retry->send_cb = send_cb;
254262
retry->send_data = send_data;
255-
sentry__atomic_store(&retry->scheduled, 1);
263+
sentry__atomic_store(&retry->scheduled, SENTRY_POLL_SCHEDULED);
256264
sentry__bgworker_submit_delayed(
257265
bgworker, retry_poll_task, NULL, retry, SENTRY_RETRY_THROTTLE);
258266
}
@@ -280,10 +288,10 @@ void
280288
sentry__retry_shutdown(sentry_retry_t *retry)
281289
{
282290
if (retry) {
283-
// drop the delayed poll that would stall bgworker_flush
291+
// drop the delayed poll and prevent retry_poll_task from re-arming
284292
sentry__bgworker_foreach_matching(
285293
retry->bgworker, retry_poll_task, drop_task_cb, NULL);
286-
sentry__atomic_store(&retry->scheduled, 0);
294+
sentry__atomic_store(&retry->scheduled, SENTRY_POLL_SHUTDOWN);
287295
sentry__bgworker_submit(retry->bgworker, retry_flush_task, NULL, retry);
288296
}
289297
}
@@ -345,7 +353,8 @@ sentry__retry_enqueue(sentry_retry_t *retry, const sentry_envelope_t *envelope)
345353

346354
sentry__atomic_compare_swap(
347355
&retry->state, SENTRY_RETRY_STARTUP, SENTRY_RETRY_RUNNING);
348-
if (sentry__atomic_compare_swap(&retry->scheduled, 0, 1)) {
356+
if (sentry__atomic_compare_swap(
357+
&retry->scheduled, SENTRY_POLL_IDLE, SENTRY_POLL_SCHEDULED)) {
349358
sentry__bgworker_submit_delayed(retry->bgworker, retry_poll_task, NULL,
350359
retry, SENTRY_RETRY_INTERVAL);
351360
}

0 commit comments

Comments
 (0)