Skip to content

Commit b268dca

Browse files
committed
add script to run all benchmarks
1 parent 360725d commit b268dca

File tree

1 file changed

+210
-0
lines changed

1 file changed

+210
-0
lines changed

run-all-benchmarks.sh

Lines changed: 210 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,210 @@
1+
#!/bin/bash
2+
3+
# Script to run benchmarks for all Python SDK E2E instrumentation tests serially.
4+
# Each instrumentation runs with BENCHMARKS=1, producing a comparison of
5+
# SDK disabled vs enabled (RECORD mode) with Go-style ns/op stats.
6+
#
7+
# Usage:
8+
# ./run-all-benchmarks.sh # Run all, 10s per endpoint
9+
# ./run-all-benchmarks.sh -d 20 # Run all, 20s per endpoint
10+
# ./run-all-benchmarks.sh -f flask,fastapi # Run only flask and fastapi
11+
# ./run-all-benchmarks.sh -h # Show help
12+
13+
set -e
14+
15+
# Default values
16+
BENCHMARK_DURATION=${BENCHMARK_DURATION:-10}
17+
BENCHMARK_WARMUP=${BENCHMARK_WARMUP:-3}
18+
FILTER=""
19+
20+
# Colors for output
21+
GREEN='\033[0;32m'
22+
RED='\033[0;31m'
23+
YELLOW='\033[1;33m'
24+
BLUE='\033[0;34m'
25+
NC='\033[0m' # No Color
26+
27+
usage() {
28+
echo "Usage: $0 [OPTIONS]"
29+
echo ""
30+
echo "Run SDK benchmarks for all (or selected) instrumentations serially."
31+
echo ""
32+
echo "Options:"
33+
echo " -d, --duration N Seconds per endpoint for timed loop (default: 10)"
34+
echo " -w, --warmup N Seconds of warmup per endpoint before timing (default: 3)"
35+
echo " -f, --filter LIST Comma-separated list of instrumentations to benchmark"
36+
echo " e.g. flask,fastapi,django"
37+
echo " -h, --help Show this help message"
38+
echo ""
39+
echo "Environment variables:"
40+
echo " BENCHMARK_DURATION Same as --duration"
41+
echo " BENCHMARK_WARMUP Same as --warmup"
42+
echo " TUSK_CLI_VERSION CLI version to use in Docker builds"
43+
echo ""
44+
echo "Examples:"
45+
echo " $0 # Benchmark all instrumentations"
46+
echo " $0 -d 20 # 20s per endpoint"
47+
echo " $0 -d 30 -w 5 # 30s timed, 5s warmup"
48+
echo " $0 -f flask,fastapi # Only benchmark flask and fastapi"
49+
}
50+
51+
# Parse arguments
52+
while [[ $# -gt 0 ]]; do
53+
case $1 in
54+
-d|--duration)
55+
if [[ -z "$2" ]] || [[ "$2" == -* ]]; then
56+
echo "Error: --duration requires a number argument"
57+
exit 1
58+
fi
59+
BENCHMARK_DURATION="$2"
60+
shift 2
61+
;;
62+
-w|--warmup)
63+
if [[ -z "$2" ]] || [[ "$2" == -* ]]; then
64+
echo "Error: --warmup requires a number argument"
65+
exit 1
66+
fi
67+
BENCHMARK_WARMUP="$2"
68+
shift 2
69+
;;
70+
-f|--filter)
71+
if [[ -z "$2" ]] || [[ "$2" == -* ]]; then
72+
echo "Error: --filter requires a comma-separated list"
73+
exit 1
74+
fi
75+
FILTER="$2"
76+
shift 2
77+
;;
78+
-h|--help)
79+
usage
80+
exit 0
81+
;;
82+
*)
83+
echo "Error: Unknown option $1"
84+
usage
85+
exit 1
86+
;;
87+
esac
88+
done
89+
90+
# Validate numbers
91+
if ! [[ "$BENCHMARK_DURATION" =~ ^[0-9]+$ ]]; then
92+
echo "Error: --duration must be a positive integer"
93+
exit 1
94+
fi
95+
if ! [[ "$BENCHMARK_WARMUP" =~ ^[0-9]+$ ]]; then
96+
echo "Error: --warmup must be a positive integer"
97+
exit 1
98+
fi
99+
100+
# Get the directory where this script is located (SDK root)
101+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
102+
103+
# Discover all e2e test run.sh scripts
104+
ALL_SCRIPTS=($(find "$SCRIPT_DIR/drift/instrumentation" -path "*/e2e-tests/run.sh" -type f | sort))
105+
106+
if [ ${#ALL_SCRIPTS[@]} -eq 0 ]; then
107+
echo -e "${RED}No e2e test scripts found!${NC}"
108+
exit 1
109+
fi
110+
111+
# Apply filter if provided
112+
RUN_SCRIPTS=()
113+
RUN_NAMES=()
114+
for script in "${ALL_SCRIPTS[@]}"; do
115+
NAME=$(echo "$script" | sed -E 's|.*/instrumentation/([^/]+)/e2e-tests/run.sh|\1|')
116+
if [ -n "$FILTER" ]; then
117+
# Check if NAME is in the comma-separated filter list
118+
if echo ",$FILTER," | grep -q ",$NAME,"; then
119+
RUN_SCRIPTS+=("$script")
120+
RUN_NAMES+=("$NAME")
121+
fi
122+
else
123+
RUN_SCRIPTS+=("$script")
124+
RUN_NAMES+=("$NAME")
125+
fi
126+
done
127+
128+
NUM_TESTS=${#RUN_SCRIPTS[@]}
129+
130+
if [ $NUM_TESTS -eq 0 ]; then
131+
echo -e "${RED}No matching instrumentations found for filter: $FILTER${NC}"
132+
echo "Available: $(printf '%s\n' "${ALL_SCRIPTS[@]}" | sed -E 's|.*/instrumentation/([^/]+)/e2e-tests/run.sh|\1|' | tr '\n' ' ')"
133+
exit 1
134+
fi
135+
136+
echo ""
137+
echo -e "${BLUE}========================================${NC}"
138+
echo -e "${BLUE}Python SDK Benchmarks${NC}"
139+
echo -e "${BLUE}========================================${NC}"
140+
echo "Instrumentations: ${RUN_NAMES[*]}"
141+
echo "Warmup per endpoint: ${BENCHMARK_WARMUP}s"
142+
echo "Duration per endpoint: ${BENCHMARK_DURATION}s"
143+
echo "Total instrumentations: $NUM_TESTS"
144+
echo -e "${BLUE}========================================${NC}"
145+
echo ""
146+
147+
# Export benchmark env vars so they pass through docker-compose
148+
export BENCHMARKS=1
149+
export BENCHMARK_DURATION
150+
export BENCHMARK_WARMUP
151+
152+
# Track results
153+
OVERALL_EXIT_CODE=0
154+
declare -a EXIT_CODES
155+
156+
for i in "${!RUN_SCRIPTS[@]}"; do
157+
SCRIPT="${RUN_SCRIPTS[$i]}"
158+
NAME="${RUN_NAMES[$i]}"
159+
TEST_DIR=$(dirname "$SCRIPT")
160+
161+
echo ""
162+
echo -e "${BLUE}============================================================${NC}"
163+
echo -e "${BLUE}[$((i + 1))/$NUM_TESTS] Benchmarking: $NAME${NC}"
164+
echo -e "${BLUE}============================================================${NC}"
165+
echo ""
166+
167+
chmod +x "$SCRIPT"
168+
169+
set +e
170+
(cd "$TEST_DIR" && ./run.sh)
171+
EXIT_CODE=$?
172+
set -e
173+
174+
EXIT_CODES+=("$EXIT_CODE")
175+
176+
if [ $EXIT_CODE -ne 0 ]; then
177+
echo -e "${RED}Benchmark for $NAME failed with exit code $EXIT_CODE${NC}"
178+
OVERALL_EXIT_CODE=1
179+
fi
180+
181+
echo ""
182+
done
183+
184+
# Final summary
185+
echo ""
186+
echo -e "${BLUE}========================================${NC}"
187+
echo -e "${BLUE}Benchmark Summary${NC}"
188+
echo -e "${BLUE}========================================${NC}"
189+
190+
for i in "${!RUN_NAMES[@]}"; do
191+
NAME="${RUN_NAMES[$i]}"
192+
EXIT_CODE="${EXIT_CODES[$i]}"
193+
194+
if [ "$EXIT_CODE" -eq 0 ]; then
195+
echo -e "${GREEN}${NC} $NAME"
196+
else
197+
echo -e "${RED}${NC} $NAME (exit code $EXIT_CODE)"
198+
fi
199+
done
200+
201+
echo -e "${BLUE}========================================${NC}"
202+
203+
if [ $OVERALL_EXIT_CODE -eq 0 ]; then
204+
echo -e "${GREEN}All $NUM_TESTS benchmark(s) completed successfully.${NC}"
205+
else
206+
echo -e "${RED}Some benchmarks failed.${NC}"
207+
fi
208+
209+
echo ""
210+
exit $OVERALL_EXIT_CODE

0 commit comments

Comments
 (0)