Skip to content

Commit 5d20550

Browse files
committed
Fix
1 parent bcd4a06 commit 5d20550

4 files changed

Lines changed: 9 additions & 5 deletions

File tree

.github/workflows/benchmarks.yml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -59,13 +59,17 @@ jobs:
5959
6060
- name: Run realistic workload benchmark
6161
id: realistic
62+
env:
63+
BENCHMARK_ITERATIONS: ${{ inputs.iterations }}
6264
run: |
6365
uv run python benchmarks/bench/realistic_workload.py 2>&1 | tee realistic_output.txt
6466
# Extract just the results JSON
6567
cat benchmarks/results/realistic-workload.json
6668
6769
- name: Run fixed QPS latency benchmark
6870
id: fixed_qps
71+
env:
72+
BENCHMARK_QPS_DURATION: ${{ inputs.qps_duration }}
6973
run: |
7074
uv run python benchmarks/bench/fixed_qps_latency.py 2>&1 | tee fixed_qps_output.txt
7175
# Extract just the results JSON

benchmarks/bench/fixed_qps_latency.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -155,9 +155,9 @@ def main():
155155
print("Endpoint: POST /api/realistic (~10-15ms baseline)")
156156
print()
157157

158-
# Test at multiple QPS levels
158+
# Test at multiple QPS levels (duration configurable via BENCHMARK_QPS_DURATION env var)
159159
qps_levels = [25, 50, 75] # Requests per second
160-
duration = 10 # seconds per test
160+
duration = int(os.environ.get("BENCHMARK_QPS_DURATION", "10")) # seconds per test
161161

162162
results = {"baseline": {}, "sdk_100": {}, "sdk_10": {}}
163163

benchmarks/bench/realistic_workload.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -70,8 +70,8 @@ def run_benchmark_subprocess(mode: str, sampling_rate: float = 1.0) -> dict | No
7070
session.post(f"{{server_url}}/api/typical-write", json={{"name": "test"}})
7171
session.post(f"{{server_url}}/api/realistic", json={{"userId": "u1", "query": "test"}})
7272
73-
# Benchmark parameters
74-
iterations = 200
73+
# Benchmark parameters (configurable via BENCHMARK_ITERATIONS env var)
74+
iterations = int(os.environ.get("BENCHMARK_ITERATIONS", "200"))
7575
results = {{}}
7676
7777
# Test 1: Typical Read (~5-10ms baseline)

benchmarks/scripts/compare_runs.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
44
Usage:
55
python compare_runs.py results1.json results2.json
6-
python compare_runs.py --download RUN_ID1 RUN_ID2 # Download from GitHub Actions
6+
python compare_runs.py --json results1.json results2.json # JSON output only
77
"""
88

99
import argparse

0 commit comments

Comments
 (0)