Skip to content

Commit b5cf8ca

Browse files
fix: use state field for ClusterShutdown instead of local variable (#458)
ClusterShutdown in ClusterManagerState was checked by update handlers to reject work after shutdown, but was never set — only a local shouldShutdown variable was set. This made the shutdown guard dead code, allowing updates to slip through during the AllHandlersFinished drain. Replace shouldShutdown with cm.state.ClusterShutdown as the single source of truth. Co-authored-by: Claude Sonnet 4.6 <noreply@anthropic.com>
1 parent 2d3ec87 commit b5cf8ca

1 file changed

Lines changed: 2 additions & 3 deletions

File tree

safe_message_handler/workflow.go

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -292,16 +292,15 @@ func (cm *ClusterManager) run(ctx workflow.Context) (ClusterManagerResult, error
292292
cm.logger.Info("Cluster started")
293293
for {
294294
selector := workflow.NewSelector(ctx)
295-
shouldShutdown := false
296295
selector.AddReceive(cm.shutdownCh, func(c workflow.ReceiveChannel, _ bool) {
297296
c.Receive(ctx, nil)
298-
shouldShutdown = true
297+
cm.state.ClusterShutdown = true
299298
})
300299
selector.AddFuture(workflow.NewTimer(ctx, cm.sleepInterval), func(f workflow.Future) {
301300
cm.performHealthCheck(ctx)
302301
})
303302
selector.Select(ctx)
304-
if shouldShutdown {
303+
if cm.state.ClusterShutdown {
305304
break
306305
}
307306
if cm.shouldContinueAsNew(ctx) {

0 commit comments

Comments
 (0)