Skip to content

Commit 633e474

Browse files
committed
test: replace context.Background() with t.Context()/b.Context() in tests
Tests now use the testing.T/B/F context binding introduced in Go 1.24 instead of context.Background(). The bound context is cancelled when the test finishes, so any goroutines or in-flight operations spawned by a test get stopped automatically when the test exits or times out — this avoids goroutine leaks and surfaces lifetime bugs that context.Background() silently masks. Mechanical replacement across 31 test files: - ctx := context.Background() -> ctx := t.Context() - context.WithCancel(context.Background()) -> context.WithCancel(t.Context()) - context.WithTimeout(context.Background(), …) -> context.WithTimeout(t.Context(), …) - Inside *testing.B benchmarks -> b.Context() - Inside helpers taking testing.TB -> tb.Context() - ratelimit doRequest helper now takes *testing.T so it can use t.Context() - Removed now-unused "context" imports where no other context.* usage remained Intentionally left as context.Background(): - t.Cleanup(...) callbacks: t.Context() is cancelled BEFORE cleanups run, so passing it to Close()/Terminate() would abort the cleanup operation. - Helpers without a *testing.T/B in scope (e.g. proxy_test.go newTestBackend, uffd/userfaultfd/rpc_services_test.go subprocess harness). - Unreachable defensive fallbacks of the form `if parentCtx == nil { parentCtx = context.Background() }` in placement_benchmark_test.go after b.Context() (which never returns nil). Verified with `go vet` and `go test -run=NONE` (compile-only) across all affected packages, including GOOS=linux for build-tagged orchestrator tests.
1 parent 91d00d4 commit 633e474

30 files changed

Lines changed: 173 additions & 185 deletions

File tree

packages/api/internal/handlers/proxy_grpc_test.go

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
package handlers
22

33
import (
4-
"context"
54
"testing"
65

76
"github.com/stretchr/testify/assert"
@@ -61,7 +60,7 @@ func TestIsNonEnvdTrafficRequest(t *testing.T) {
6160
for _, tt := range tests {
6261
t.Run(tt.name, func(t *testing.T) {
6362
t.Parallel()
64-
result := isNonEnvdTrafficRequest(context.Background(), tt.md, "test-sandbox")
63+
result := isNonEnvdTrafficRequest(t.Context(), tt.md, "test-sandbox")
6564
assert.Equal(t, tt.expected, result)
6665
})
6766
}

packages/api/internal/handlers/sandbox_create_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -280,7 +280,7 @@ func TestValidateNetworkConfig(t *testing.T) {
280280
t.Run(tt.name, func(t *testing.T) {
281281
t.Parallel()
282282
mockFF := handlersmocks.NewMockFeatureFlagsClient(t)
283-
err := validateNetworkConfig(context.Background(), mockFF, uuid.Nil, "", tt.network)
283+
err := validateNetworkConfig(t.Context(), mockFF, uuid.Nil, "", tt.network)
284284

285285
if tt.wantErr {
286286
if err == nil {
@@ -1038,7 +1038,7 @@ func TestValidateNetworkRules(t *testing.T) {
10381038
t.Parallel()
10391039

10401040
ff := tt.setupFF(t)
1041-
apiErr := validateNetworkRules(context.Background(), ff, teamID, tt.envdVersion, tt.rules)
1041+
apiErr := validateNetworkRules(t.Context(), ff, teamID, tt.envdVersion, tt.rules)
10421042

10431043
if tt.wantMsg == "" {
10441044
assert.Nil(t, apiErr)

packages/api/internal/middleware/ratelimit/ratelimit_test.go

Lines changed: 18 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
package ratelimit
22

33
import (
4-
"context"
54
"encoding/json"
65
"net/http"
76
"net/http/httptest"
@@ -55,9 +54,11 @@ func routeConfig(rate, burst int) map[string]map[string]int {
5554
}
5655

5756
// doRequest performs a POST /sandboxes/test-sbx/connect.
58-
func doRequest(r *gin.Engine) *httptest.ResponseRecorder {
57+
func doRequest(t *testing.T, r *gin.Engine) *httptest.ResponseRecorder {
58+
t.Helper()
59+
5960
w := httptest.NewRecorder()
60-
req, _ := http.NewRequestWithContext(context.Background(), http.MethodPost, "/sandboxes/test-sbx/connect", nil)
61+
req, _ := http.NewRequestWithContext(t.Context(), http.MethodPost, "/sandboxes/test-sbx/connect", nil)
6162
r.ServeHTTP(w, req)
6263

6364
return w
@@ -99,7 +100,7 @@ func TestMiddleware_SkipsUnauthenticated(t *testing.T) {
99100
c.JSON(http.StatusOK, gin.H{"status": "ok"})
100101
})
101102

102-
w := doRequest(r)
103+
w := doRequest(t, r)
103104
assert.Equal(t, http.StatusOK, w.Code)
104105
}
105106

@@ -117,7 +118,7 @@ func TestMiddleware_FailOpen(t *testing.T) {
117118
limiter := redis_rate.NewLimiter(badClient)
118119
r := newRouterWithTeam(limiter, Config{FailOpen: true}, ff, uuid.New())
119120

120-
w := doRequest(r)
121+
w := doRequest(t, r)
121122
assert.Equal(t, http.StatusOK, w.Code)
122123
}
123124

@@ -134,7 +135,7 @@ func TestMiddleware_FailClosed(t *testing.T) {
134135
limiter := redis_rate.NewLimiter(badClient)
135136
r := newRouterWithTeam(limiter, Config{FailOpen: false}, ff, uuid.New())
136137

137-
w := doRequest(r)
138+
w := doRequest(t, r)
138139
assert.Equal(t, http.StatusInternalServerError, w.Code)
139140
}
140141

@@ -149,7 +150,7 @@ func TestMiddleware_UnconfiguredRouteAllowsThrough(t *testing.T) {
149150
limiter := redis_rate.NewLimiter(badClient)
150151
r := newRouterWithTeam(limiter, Config{FailOpen: true}, ff, uuid.New())
151152

152-
w := doRequest(r)
153+
w := doRequest(t, r)
153154
assert.Equal(t, http.StatusOK, w.Code)
154155
// No rate limit headers should be set for unconfigured routes.
155156
assert.Empty(t, w.Header().Get("RateLimit-Limit"))
@@ -170,7 +171,7 @@ func TestIntegration_AllowedRequestSetsHeaders(t *testing.T) {
170171

171172
r := newRouterWithTeam(limiter, Config{FailOpen: true}, ff, uuid.New())
172173

173-
w := doRequest(r)
174+
w := doRequest(t, r)
174175

175176
assert.Equal(t, http.StatusOK, w.Code)
176177
assert.Equal(t, "20", w.Header().Get("RateLimit-Limit"))
@@ -193,12 +194,12 @@ func TestIntegration_BurstThenDeny(t *testing.T) {
193194

194195
// First 3 requests should succeed (burst).
195196
for i := range 3 {
196-
w := doRequest(r)
197+
w := doRequest(t, r)
197198
assert.Equal(t, http.StatusOK, w.Code, "request %d should be allowed", i+1)
198199
}
199200

200201
// 4th should be denied.
201-
w := doRequest(r)
202+
w := doRequest(t, r)
202203
assert.Equal(t, http.StatusTooManyRequests, w.Code)
203204
assert.NotEmpty(t, w.Header().Get("Retry-After"))
204205

@@ -227,16 +228,16 @@ func TestIntegration_Refill(t *testing.T) {
227228

228229
// Exhaust burst.
229230
for range 2 {
230-
w := doRequest(r)
231+
w := doRequest(t, r)
231232
assert.Equal(t, http.StatusOK, w.Code)
232233
}
233-
w := doRequest(r)
234+
w := doRequest(t, r)
234235
assert.Equal(t, http.StatusTooManyRequests, w.Code)
235236

236237
// Wait for refill (rate=10/s → one token every 100ms).
237238
time.Sleep(200 * time.Millisecond)
238239

239-
w = doRequest(r)
240+
w = doRequest(t, r)
240241
assert.Equal(t, http.StatusOK, w.Code)
241242
}
242243

@@ -260,13 +261,13 @@ func TestIntegration_IndependentTeams(t *testing.T) {
260261
rB := newRouterWithTeam(limiter, cfg, ff, teamB)
261262

262263
// Team A uses its quota.
263-
w := doRequest(rA)
264+
w := doRequest(t, rA)
264265
assert.Equal(t, http.StatusOK, w.Code)
265-
w = doRequest(rA)
266+
w = doRequest(t, rA)
266267
assert.Equal(t, http.StatusTooManyRequests, w.Code)
267268

268269
// Team B should still have quota.
269-
w = doRequest(rB)
270+
w = doRequest(t, rB)
270271
assert.Equal(t, http.StatusOK, w.Code)
271272
}
272273

@@ -295,7 +296,7 @@ func TestIntegration_ConcurrentAccess(t *testing.T) {
295296
wg.Add(1)
296297
go func(idx int) {
297298
defer wg.Done()
298-
w := doRequest(r)
299+
w := doRequest(t, r)
299300
results[idx] = w.Code
300301
}(i)
301302
}

packages/api/internal/oauth/oauth_test.go

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ func TestNewVerifierConfig(t *testing.T) {
3434
t.Run(tt.name, func(t *testing.T) {
3535
t.Parallel()
3636

37-
verifier, err := NewVerifier(context.Background(), tt.issuerURL)
37+
verifier, err := NewVerifier(t.Context(), tt.issuerURL)
3838
require.NoError(t, err)
3939
require.NotNil(t, verifier)
4040
})
@@ -44,10 +44,10 @@ func TestNewVerifierConfig(t *testing.T) {
4444
func TestNoopVerifierRejectsClaims(t *testing.T) {
4545
t.Parallel()
4646

47-
verifier, err := NewVerifier(context.Background(), "")
47+
verifier, err := NewVerifier(t.Context(), "")
4848
require.NoError(t, err)
4949

50-
claims, err := verifier.VerifyClaims(context.Background(), "token")
50+
claims, err := verifier.VerifyClaims(t.Context(), "token")
5151
require.Error(t, err)
5252
require.Empty(t, claims)
5353
}
@@ -72,7 +72,7 @@ func TestNewVerifierLoadsOIDCProvider(t *testing.T) {
7272
t.Cleanup(server.Close)
7373
issuerURL = server.URL
7474

75-
verifier, err := NewVerifier(context.Background(), server.URL)
75+
verifier, err := NewVerifier(t.Context(), server.URL)
7676
require.NoError(t, err)
7777
require.NotNil(t, verifier)
7878
}
@@ -179,7 +179,7 @@ func TestRequireClaims(t *testing.T) {
179179
t.Run(tt.name, func(t *testing.T) {
180180
t.Parallel()
181181

182-
claims, err := RequireClaims(context.Background(), tt.md, tt.verifier)
182+
claims, err := RequireClaims(t.Context(), tt.md, tt.verifier)
183183
if tt.wantErr {
184184
require.Error(t, err)
185185

packages/api/internal/orchestrator/discovery/kubernetes_test.go

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
package discovery
22

33
import (
4-
"context"
54
"net"
65
"strconv"
76
"testing"
@@ -59,7 +58,7 @@ func TestKubernetesDiscovery_PodsWithSharedPrefix(t *testing.T) {
5958
client := fake.NewSimpleClientset(pod1, pod2)
6059
d := NewKubernetes(client, testNamespace, testLabelSelector)
6160

62-
nodes, err := d.ListNodes(context.Background())
61+
nodes, err := d.ListNodes(t.Context())
6362
require.NoError(t, err)
6463
require.Len(t, nodes, 2)
6564

@@ -96,7 +95,7 @@ func TestKubernetesDiscovery_FiltersNotReady(t *testing.T) {
9695
client := fake.NewSimpleClientset(ready, notReady)
9796
d := NewKubernetes(client, testNamespace, testLabelSelector)
9897

99-
nodes, err := d.ListNodes(context.Background())
98+
nodes, err := d.ListNodes(t.Context())
10099
require.NoError(t, err)
101100
require.Len(t, nodes, 1)
102101
assert.Equal(t, ready.Name, nodes[0].ShortID)
@@ -122,7 +121,7 @@ func TestKubernetesDiscovery_FiltersPending(t *testing.T) {
122121
client := fake.NewSimpleClientset(pending)
123122
d := NewKubernetes(client, testNamespace, testLabelSelector)
124123

125-
nodes, err := d.ListNodes(context.Background())
124+
nodes, err := d.ListNodes(t.Context())
126125
require.NoError(t, err)
127126
assert.Empty(t, nodes)
128127
}
@@ -149,7 +148,7 @@ func TestKubernetesDiscovery_FiltersMissingIP(t *testing.T) {
149148
client := fake.NewSimpleClientset(noIP)
150149
d := NewKubernetes(client, testNamespace, testLabelSelector)
151150

152-
nodes, err := d.ListNodes(context.Background())
151+
nodes, err := d.ListNodes(t.Context())
153152
require.NoError(t, err)
154153
assert.Empty(t, nodes)
155154
}

packages/api/internal/orchestrator/nodemanager/node_test.go

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
package nodemanager_test
22

33
import (
4-
"context"
54
"testing"
65

76
"github.com/launchdarkly/go-server-sdk/v7/testhelpers/ldtestdata"
@@ -35,7 +34,7 @@ func TestNode_OptimisticAdd_FlagEnabled(t *testing.T) {
3534
CPUs: 2,
3635
MiBMemory: 1024,
3736
}
38-
node.OptimisticAdd(context.Background(), res)
37+
node.OptimisticAdd(t.Context(), res)
3938

4039
// 6. Assert: When flag is enabled, resources should be successfully accumulated
4140
newMetrics := node.Metrics()
@@ -65,7 +64,7 @@ func TestNode_OptimisticAdd_FlagDisabled(t *testing.T) {
6564
CPUs: 2,
6665
MiBMemory: 1024,
6766
}
68-
node.OptimisticAdd(context.Background(), res)
67+
node.OptimisticAdd(t.Context(), res)
6968

7069
// 6. Assert: When flag is disabled, return early, resources should not be accumulated
7170
newMetrics := node.Metrics()
@@ -95,7 +94,7 @@ func TestNode_OptimisticRemove_FlagEnabled(t *testing.T) {
9594
CPUs: 2,
9695
MiBMemory: 1024,
9796
}
98-
node.OptimisticRemove(context.Background(), res)
97+
node.OptimisticRemove(t.Context(), res)
9998

10099
// 6. Assert: When flag is enabled, resources should be successfully deducted
101100
newMetrics := node.Metrics()
@@ -125,7 +124,7 @@ func TestNode_OptimisticRemove_FlagDisabled(t *testing.T) {
125124
CPUs: 2,
126125
MiBMemory: 1024,
127126
}
128-
node.OptimisticRemove(context.Background(), res)
127+
node.OptimisticRemove(t.Context(), res)
129128

130129
// 6. Assert: When flag is disabled, return early, resources should remain unchanged
131130
newMetrics := node.Metrics()

packages/api/internal/sandbox/storage/memory/operations_benchmark_test.go

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
package memory
22

33
import (
4-
"context"
54
"fmt"
65
"testing"
76
"time"
@@ -105,7 +104,7 @@ func BenchmarkStorageGetItemsRunningByTeam(b *testing.B) {
105104
}
106105

107106
func BenchmarkStorageExpiredItems(b *testing.B) {
108-
ctx := context.Background()
107+
ctx := b.Context()
109108
benchmarkSizes(b, func(b *testing.B, f benchFixture) {
110109
b.Helper()
111110

@@ -116,7 +115,7 @@ func BenchmarkStorageExpiredItems(b *testing.B) {
116115
}
117116

118117
func BenchmarkStorageTeamsWithSandboxCount(b *testing.B) {
119-
ctx := context.Background()
118+
ctx := b.Context()
120119
benchmarkSizes(b, func(b *testing.B, f benchFixture) {
121120
b.Helper()
122121

0 commit comments

Comments
 (0)