Skip to content

Commit 25271b3

Browse files
committed
fix: address Copilot review — correct totalAttempts counting and drop batch on maxRateLimitDuration exceeded
- Move totalAttempts++ after rate-limit sleep check so it only counts actual HTTP requests - Detect when maxRateLimitDuration is exceeded and drop the batch instead of silently retrying - Update test expectations for X-Retry-Count and maxRateLimitDuration behavior - Clamp token-manager backoff attempt to non-negative
1 parent 7652157 commit 25271b3

3 files changed

Lines changed: 29 additions & 24 deletions

File tree

packages/node/src/lib/token-manager.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -179,7 +179,7 @@ export class TokenManager implements ITokenManager {
179179
}
180180

181181
const timeUntilRefreshInMs = backoff({
182-
attempt: this.retryCount - 1,
182+
attempt: Math.max(this.retryCount - 1, 0),
183183
minTimeout: 100,
184184
maxTimeout: 60 * 1000,
185185
})

packages/node/src/plugins/segmentio/__tests__/publisher.test.ts

Lines changed: 17 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -697,7 +697,7 @@ describe('retry semantics', () => {
697697
expect(makeReqSpy).toHaveBeenCalledTimes(2)
698698
const [first, second] = getAllRequests()
699699
expect(first.headers['X-Retry-Count']).toBeUndefined()
700-
expect(second.headers['X-Retry-Count']).toBe('2')
700+
expect(second.headers['X-Retry-Count']).toBe('1')
701701
})
702702

703703
it('T07 408 uses backoff (Retry-After header ignored)', async () => {
@@ -964,7 +964,7 @@ describe('retry semantics', () => {
964964
expect(makeReqSpy).toHaveBeenCalledTimes(2)
965965
const [first, second] = getAllRequests()
966966
expect(first.headers['X-Retry-Count']).toBeUndefined()
967-
expect(second.headers['X-Retry-Count']).toBe('2')
967+
expect(second.headers['X-Retry-Count']).toBe('1')
968968
expect(updated.failedDelivery()).toBeFalsy()
969969
})
970970

@@ -1204,35 +1204,37 @@ describe('retry semantics', () => {
12041204
expect(makeReqSpy.mock.calls.length).toBeLessThan(100)
12051205
})
12061206

1207-
it('T20 maxRateLimitDuration: clears rate-limit window and resumes send', async () => {
1207+
it('T20 maxRateLimitDuration: drops batch after duration exceeded', async () => {
12081208
jest.useFakeTimers()
12091209
const headers = new TestHeaders()
12101210
headers.set('Retry-After', '60')
12111211

1212-
makeReqSpy
1213-
.mockReturnValueOnce(
1214-
createError({
1215-
status: 429,
1216-
statusText: 'Too Many Requests',
1217-
...headers,
1218-
})
1219-
)
1220-
.mockReturnValue(createSuccess())
1212+
makeReqSpy.mockReturnValue(
1213+
createError({
1214+
status: 429,
1215+
statusText: 'Too Many Requests',
1216+
...headers,
1217+
})
1218+
)
12211219

12221220
const { plugin: segmentPlugin } = createTestNodePlugin({
12231221
maxRetries: 3,
12241222
flushAt: 1,
12251223
maxRateLimitDuration: 1, // 1 second
12261224
})
12271225

1228-
// First event gets 429, then resumes after maxRateLimitDuration elapses.
1226+
// First event gets 429, rate-limit state is set
12291227
const ctx1 = trackEvent()
12301228
const pending = segmentPlugin.track(ctx1)
12311229
expect(makeReqSpy).toHaveBeenCalledTimes(1)
12321230

1231+
// Advance past maxRateLimitDuration — batch should be dropped
12331232
await jest.advanceTimersByTimeAsync(1000)
12341233
const updated1 = await pending
1235-
expect(updated1.failedDelivery()).toBeFalsy()
1236-
expect(makeReqSpy).toHaveBeenCalledTimes(2)
1234+
expect(updated1.failedDelivery()).toBeTruthy()
1235+
const err = updated1.failedDelivery()!.reason as Error
1236+
expect(err.message).toContain('Rate limit duration exceeded')
1237+
// Only 1 actual HTTP request — the initial 429
1238+
expect(makeReqSpy).toHaveBeenCalledTimes(1)
12371239
})
12381240
})

packages/node/src/plugins/segmentio/publisher.ts

Lines changed: 11 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -262,12 +262,12 @@ export class Publisher {
262262
private _isRateLimited(): boolean {
263263
if (this._rateLimitedUntil === undefined) return false
264264

265-
// Check if maxRateLimitDuration has been exceeded
265+
// Check if maxRateLimitDuration has been exceeded.
266+
// Returns false so the caller can detect the cleared state and drop the batch.
266267
if (
267268
this._rateLimitStartTime !== undefined &&
268269
Date.now() - this._rateLimitStartTime >= this._maxRateLimitDuration * 1000
269270
) {
270-
// Clear rate-limit state; caller will drop batch
271271
this._rateLimitedUntil = undefined
272272
this._rateLimitStartTime = undefined
273273
return false
@@ -313,9 +313,8 @@ export class Publisher {
313313

314314
// eslint-disable-next-line no-constant-condition
315315
while (true) {
316-
totalAttempts++
317-
318316
// Check rate-limit state before making a request
317+
const wasRateLimited = this._rateLimitStartTime !== undefined
319318
if (this._isRateLimited()) {
320319
const untilRetryAfter = Math.max(
321320
0,
@@ -334,10 +333,12 @@ export class Publisher {
334333
continue
335334
}
336335

337-
// Check if maxRateLimitDuration was exceeded (cleared by _isRateLimited)
338-
// If we had a rateLimitStartTime but it got cleared due to duration,
339-
// and we're in a 429 requeue cycle, drop the batch
340-
// (This is handled by _isRateLimited returning false after clearing state)
336+
// If we were rate-limited but _isRateLimited() now returns false with
337+
// cleared state, maxRateLimitDuration was exceeded — drop the batch.
338+
if (wasRateLimited && this._rateLimitStartTime === undefined) {
339+
resolveFailedBatch(batch, new Error('Rate limit duration exceeded'))
340+
return
341+
}
341342

342343
let failureReason: unknown
343344
let shouldRetry = false
@@ -355,6 +356,8 @@ export class Publisher {
355356
}
356357
}
357358

359+
totalAttempts++
360+
358361
const headers: Record<string, string> = {
359362
'Content-Type': 'application/json',
360363
'User-Agent': 'analytics-node-next/latest',

0 commit comments

Comments
 (0)