Skip to content

Commit 9e85110

Browse files
aepfliclaude
andauthored
perf: add C7-C10 high-concurrency benchmarks (16 threads) (#99)
## Summary - Add C7-C10 (16-thread) concurrency benchmarks to the Rust criterion suite in `benches/concurrency.rs` - **C7**: 16 threads evaluating a simple static flag -- measures throughput saturation point - **C8**: 16 threads evaluating a flag with targeting rules -- heavy concurrent rule evaluation - **C9**: 16 threads with mixed workload (static + targeting + disabled flags) -- realistic high-load production mix - **C10**: 15 reader threads + 1 writer thread updating state -- read/write contention under heavy parallel load These complement the existing C1-C6 benchmarks (1/4/8 threads) as defined in BENCHMARKS.md, measuring mutex contention overhead and the scalability ceiling at higher parallelism. Java concurrency JMH benchmarks are deferred -- no dedicated concurrency benchmark file exists yet in the Java module. Closes #89 ## Test plan - [x] `cargo bench --bench concurrency -- --test` passes all 10 benchmarks (C1-C10) - [x] `cargo fmt && cargo clippy -- -D warnings` clean 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> --------- Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
1 parent a78af74 commit 9e85110

2 files changed

Lines changed: 412 additions & 0 deletions

File tree

benches/concurrency.rs

Lines changed: 144 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -264,6 +264,146 @@ fn concurrent_read_write_4t(c: &mut Criterion) {
264264
});
265265
}
266266

267+
// ---------------------------------------------------------------------------
268+
// C7: 16 threads evaluating simple flag
269+
// ---------------------------------------------------------------------------
270+
271+
/// C7: 16 threads concurrently evaluating a simple (static) flag.
272+
///
273+
/// Tests throughput saturation — at 16 threads the mutex contention dominates,
274+
/// revealing the scalability ceiling of the current locking strategy.
275+
fn concurrent_simple_16t(c: &mut Criterion) {
276+
let evaluator = make_evaluator();
277+
278+
c.bench_function("concurrent_simple_16t", |b| {
279+
b.iter(|| {
280+
let handles: Vec<_> = (0..16)
281+
.map(|_| {
282+
let eval = Arc::clone(&evaluator);
283+
thread::spawn(move || {
284+
let ctx = json!({});
285+
let guard = eval.lock().unwrap();
286+
guard.evaluate_flag(black_box("boolFlag"), black_box(&ctx))
287+
})
288+
})
289+
.collect();
290+
for h in handles {
291+
h.join().unwrap();
292+
}
293+
})
294+
});
295+
}
296+
297+
// ---------------------------------------------------------------------------
298+
// C8: 16 threads evaluating targeting flag
299+
// ---------------------------------------------------------------------------
300+
301+
/// C8: 16 threads concurrently evaluating a flag with targeting rules.
302+
///
303+
/// Combines heavy mutex contention with per-evaluation rule processing,
304+
/// measuring how targeting overhead compounds under high parallelism.
305+
fn concurrent_targeting_16t(c: &mut Criterion) {
306+
let evaluator = make_evaluator();
307+
308+
c.bench_function("concurrent_targeting_16t", |b| {
309+
b.iter(|| {
310+
let handles: Vec<_> = (0..16)
311+
.map(|i| {
312+
let eval = Arc::clone(&evaluator);
313+
thread::spawn(move || {
314+
let role = if i % 2 == 0 { "admin" } else { "viewer" };
315+
let ctx = json!({"role": role});
316+
let guard = eval.lock().unwrap();
317+
guard.evaluate_flag(black_box("targetedFlag"), black_box(&ctx))
318+
})
319+
})
320+
.collect();
321+
for h in handles {
322+
h.join().unwrap();
323+
}
324+
})
325+
});
326+
}
327+
328+
// ---------------------------------------------------------------------------
329+
// C9: Mixed workload - 16 threads with simple/targeting/disabled flags
330+
// ---------------------------------------------------------------------------
331+
332+
/// C9: 16 threads with mixed workload (simple, targeting, and disabled flags).
333+
///
334+
/// Simulates a realistic high-load production scenario where threads evaluate
335+
/// different flag types concurrently. The workload distribution cycles through
336+
/// static, targeting, and disabled flags across all 16 threads.
337+
fn concurrent_mixed_16t(c: &mut Criterion) {
338+
let evaluator = make_evaluator();
339+
340+
// Cycle through workload types across 16 threads:
341+
// simple, targeting(admin), disabled, targeting(viewer), repeat...
342+
let workload_defs: Vec<(&str, Value)> = vec![
343+
("boolFlag", json!({})),
344+
("targetedFlag", json!({"role": "admin"})),
345+
("disabledFlag", json!({})),
346+
("targetedFlag", json!({"role": "viewer"})),
347+
];
348+
349+
c.bench_function("concurrent_mixed_16t", |b| {
350+
b.iter(|| {
351+
let handles: Vec<_> = (0..16)
352+
.map(|i| {
353+
let eval = Arc::clone(&evaluator);
354+
let (key, ctx) = workload_defs[i % workload_defs.len()].clone();
355+
thread::spawn(move || {
356+
let guard = eval.lock().unwrap();
357+
guard.evaluate_flag(black_box(key), black_box(&ctx))
358+
})
359+
})
360+
.collect();
361+
for h in handles {
362+
h.join().unwrap();
363+
}
364+
})
365+
});
366+
}
367+
368+
// ---------------------------------------------------------------------------
369+
// C10: Read/write contention - 15 readers + 1 writer (16 threads total)
370+
// ---------------------------------------------------------------------------
371+
372+
/// C10: Read/write contention at 16 threads — 15 evaluating while 1 updates state.
373+
///
374+
/// The writer thread alternates between two configurations on each iteration,
375+
/// simulating periodic config refreshes under heavy parallel evaluation load.
376+
/// This measures contention between readers and a writer at high thread counts.
377+
fn concurrent_read_write_16t(c: &mut Criterion) {
378+
let evaluator = make_evaluator();
379+
380+
c.bench_function("concurrent_read_write_16t", |b| {
381+
b.iter(|| {
382+
let eval_writer = Arc::clone(&evaluator);
383+
let writer = thread::spawn(move || {
384+
let mut guard = eval_writer.lock().unwrap();
385+
guard.update_state(black_box(BENCH_CONFIG_ALT)).unwrap();
386+
});
387+
388+
let readers: Vec<_> = (0..15)
389+
.map(|_| {
390+
let eval = Arc::clone(&evaluator);
391+
thread::spawn(move || {
392+
let ctx = json!({});
393+
let guard = eval.lock().unwrap();
394+
guard.evaluate_flag(black_box("boolFlag"), black_box(&ctx))
395+
})
396+
})
397+
.collect();
398+
399+
writer.join().unwrap();
400+
for h in readers {
401+
h.join().unwrap();
402+
}
403+
})
404+
});
405+
}
406+
267407
criterion_group!(
268408
benches,
269409
concurrent_simple_1t,
@@ -272,5 +412,9 @@ criterion_group!(
272412
concurrent_targeting_4t,
273413
concurrent_mixed_4t,
274414
concurrent_read_write_4t,
415+
concurrent_simple_16t,
416+
concurrent_targeting_16t,
417+
concurrent_mixed_16t,
418+
concurrent_read_write_16t,
275419
);
276420
criterion_main!(benches);

0 commit comments

Comments
 (0)