Skip to content

Commit ff255cd

Browse files
committed
impr(CLDSRV-852): Remove unused workers params from interval calculation
1 parent dcd2cba commit ff255cd

File tree

5 files changed

+125
-138
lines changed

5 files changed

+125
-138
lines changed

lib/Config.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1873,7 +1873,7 @@ class Config extends EventEmitter {
18731873
assert(this.localCache, 'localCache must be defined when rate limiting is enabled');
18741874

18751875
// Parse and validate all rate limiting configuration
1876-
this.rateLimiting = parseRateLimitConfig(config.rateLimiting, this.clusters = this.clusters || 1);
1876+
this.rateLimiting = parseRateLimitConfig(config.rateLimiting);
18771877
}
18781878

18791879

lib/api/apiUtils/rateLimit/config.js

Lines changed: 11 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -199,11 +199,10 @@ const rateLimitConfigSchema = Joi.object({
199199
*
200200
* @param {string} resourceClass - Rate limit class name ('bucket' or 'account')
201201
* @param {object} validatedCfg - Already validated config from Joi
202-
* @param {number} clusters - Number of worker processes spawned per instance
203202
* @param {number} nodes - Number of instances that requests will be load balanced across
204203
* @returns {RateLimitClassConfig} Transformed rate limit config
205204
*/
206-
function transformClassConfig(resourceClass, validatedCfg, clusters, nodes) {
205+
function transformClassConfig(resourceClass, validatedCfg, nodes) {
207206
const transformed = {
208207
defaultConfig: undefined,
209208
configCacheTTL: validatedCfg.configCacheTTL,
@@ -213,23 +212,22 @@ function transformClassConfig(resourceClass, validatedCfg, clusters, nodes) {
213212
if (validatedCfg.defaultConfig?.requestsPerSecond) {
214213
const { limit, burstCapacity } = validatedCfg.defaultConfig.requestsPerSecond;
215214

216-
// Validate limit against nodes AND workers (business rule)
217-
const minLimit = nodes * clusters;
218-
if (limit > 0 && limit < minLimit) {
215+
// Validate limit against nodes (business rule)
216+
if (limit > 0 && limit < nodes) {
219217
throw new Error(
220218
`rateLimiting.${resourceClass}.defaultConfig.` +
221219
`requestsPerSecond.limit (${limit}) must be >= ` +
222-
`(nodes x workers = ${nodes} x ${clusters} = ${minLimit}) ` +
223-
'or 0 (unlimited). Each worker enforces limit/nodes/workers locally. ' +
224-
`With limit < ${minLimit}, per-worker rate would be < 1 req/s, effectively blocking traffic.`
220+
`nodes (${nodes}) ` +
221+
'or 0 (unlimited). Each node enforces limit/nodes locally. ' +
222+
`With limit < ${nodes}, per-node rate would be < 1 req/s, effectively blocking traffic.`
225223
);
226224
}
227225

228226
// Use provided burstCapacity or fall back to default
229227
const effectiveBurstCapacity = burstCapacity || transformed.defaultBurstCapacity;
230228

231-
// Calculate per-worker interval using distributed architecture
232-
const interval = calculateInterval(limit, nodes, clusters);
229+
// Calculate per-node interval using distributed architecture
230+
const interval = calculateInterval(limit, nodes);
233231

234232
// Store both the original limit and the calculated values
235233
transformed.defaultConfig = {
@@ -248,11 +246,10 @@ function transformClassConfig(resourceClass, validatedCfg, clusters, nodes) {
248246
* Parse and validate the complete rate limiting configuration
249247
*
250248
* @param {Object} rateLimitingConfig - config.rateLimiting object from config.json
251-
* @param {number} clusters - Number of worker clusters (must be numeric)
252249
* @returns {Object} Fully parsed and validated rate limiting configuration
253250
* @throws {Error} If configuration is invalid
254251
*/
255-
function parseRateLimitConfig(rateLimitingConfig, clusters) {
252+
function parseRateLimitConfig(rateLimitingConfig) {
256253
// Validate configuration using Joi schema
257254
const { error: validationError, value: validated } = rateLimitConfigSchema.validate(
258255
rateLimitingConfig,
@@ -282,8 +279,8 @@ function parseRateLimitConfig(rateLimitingConfig, clusters) {
282279
),
283280
};
284281

285-
parsed.bucket = transformClassConfig('bucket', validated.bucket, clusters, parsed.nodes);
286-
parsed.account = transformClassConfig('account', validated.account, clusters, parsed.nodes);
282+
parsed.bucket = transformClassConfig('bucket', validated.bucket, parsed.nodes);
283+
parsed.account = transformClassConfig('account', validated.account, parsed.nodes);
287284

288285
return parsed;
289286
}

lib/api/apiUtils/rateLimit/gcra.js

Lines changed: 13 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,38 +1,35 @@
11
/**
2-
* Calculate per-worker interval based on distributed architecture
2+
* Calculate per-node interval based on distributed architecture
33
*
4-
* In a distributed setup with N nodes and W workers per node:
4+
* In a distributed setup with N nodes:
55
* - Global limit: R requests per second
6-
* - Per-worker limit: R / N / W
7-
* - Interval = 1000ms / (R / N / W)
6+
* - Per-node limit: R / N
7+
* - Interval = 1000ms / (R / N)
88
*
99
* The interval represents milliseconds between requests. We divide 1000 (milliseconds
1010
* in a second) by the rate to convert "requests per second" to "milliseconds per request".
1111
*
1212
* Examples:
13-
* - 100 req/s ÷ 1 node ÷ 10 workers = 10 req/s per worker → interval = 100ms
14-
* - 600 req/s ÷ 6 nodes ÷ 10 workers = 10 req/s per worker → interval = 100ms
13+
* - 100 req/s ÷ 1 node = 100 req/s per node → interval = 100ms
14+
* - 600 req/s ÷ 6 nodes = 100 req/s per node → interval = 100ms
1515
*
1616
* Dynamic work-stealing is achieved through Redis sync reconciliation:
17-
* - Each worker evaluates locally at its fixed per-worker quota
18-
* - Workers report consumed / workers to Redis
19-
* - Redis sums all workers' shares
20-
* - Workers overwrite local counters with Redis values
17+
* - Each worker evaluates locally using preallocated tokens
18+
* - Workers report processed requests to Redis
19+
* - Redis sums all workers' requests
2120
* - Idle workers' unused capacity accumulates in Redis
2221
* - Busy workers pull back higher emptyAt values and throttle proportionally
2322
*
24-
* IMPORTANT: Limit must be >= N * W, otherwise per-worker rate < 1 req/s
23+
* IMPORTANT: Limit must be >= N, otherwise per-node rate < 1 req/s
2524
* which results in intervals > 1000ms and effectively blocks traffic.
2625
*
2726
* @param {number} limit - Global requests per second
2827
* @param {number} nodes - Total number of nodes
29-
* @param {number} _workers - Number of workers per node (unused in token reservation)
3028
* @returns {number} Interval in milliseconds between requests
3129
*/
32-
// eslint-disable-next-line no-unused-vars
33-
function calculateInterval(limit, nodes, _workers) {
34-
// Per-node rate = limit / nodes (workers NOT divided)
35-
// This allows dynamic work-stealing - workers evaluate at node quota
30+
31+
function calculateInterval(limit, nodes) {
32+
// Per-node rate = limit / nodes
3633
const perNodeRate = limit / nodes;
3734

3835
// Interval = 1000ms / rate

0 commit comments

Comments
 (0)