-
-
Notifications
You must be signed in to change notification settings - Fork 5
Expand file tree
/
Copy pathbenchmark_hooks.py
More file actions
142 lines (122 loc) · 3.92 KB
/
benchmark_hooks.py
File metadata and controls
142 lines (122 loc) · 3.92 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
#!/usr/bin/env python3
"""
Benchmark script to compare performance of cpp-linter-hooks vs mirrors-clang-format.
Usage:
python benchmark_hooks.py
Requirements:
- pre-commit must be installed and available in PATH
- Two config files:
- testing/pre-commit-config-cpp-linter-hooks.yaml
- testing/pre-commit-config-mirrors-clang-format.yaml
- Target files: testing/main.c (or adjust as needed)
"""
import subprocess
import time
import statistics
import glob
HOOKS = [
{
"name": "cpp-linter-hooks",
"config": "testing/benchmark_hook_1.yaml",
},
{
"name": "mirrors-clang-format",
"config": "testing/benchmark_hook_2.yaml",
},
]
# Automatically find all C/C++ files in testing/ (and optionally src/, include/)
TARGET_FILES = glob.glob("testing/test-examples/*.c", recursive=True)
REPEATS = 5
RESULTS_FILE = "testing/benchmark_results.txt"
def git_clone():
try:
subprocess.run(
[
"git",
"clone",
"--depth",
"1",
"https://github.com/gouravthakur39/beginners-C-program-examples.git",
"testing/test-examples",
],
check=True,
)
except subprocess.CalledProcessError:
pass
def run_hook(config, files):
cmd = ["pre-commit", "run", "--config", config, "--files"] + files
start = time.perf_counter()
try:
subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except subprocess.CalledProcessError:
# Still record time even if hook fails
pass
end = time.perf_counter()
return end - start
def safe_git_restore(files):
# Only restore files tracked by git
tracked = []
for f in files:
result = subprocess.run(
["git", "ls-files", "--error-unmatch", f],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if result.returncode == 0:
tracked.append(f)
if tracked:
subprocess.run(["git", "restore"] + tracked)
def benchmark():
results = {}
for hook in HOOKS:
times = []
print(f"\nBenchmarking {hook['name']}...")
for i in range(REPEATS):
safe_git_restore(TARGET_FILES)
subprocess.run(["pre-commit", "clean"])
t = run_hook(hook["config"], TARGET_FILES)
print(f" Run {i + 1}: {t:.3f} seconds")
times.append(t)
results[hook["name"]] = times
return results
def report(results):
headers = ["Hook", "Avg (s)", "Std (s)", "Min (s)", "Max (s)", "Runs"]
col_widths = [max(len(h), 16) for h in headers]
# Calculate max width for each column
for name, times in results.items():
col_widths[0] = max(col_widths[0], len(name))
print("\nBenchmark Results:\n")
# Print header
header_row = " | ".join(h.ljust(w) for h, w in zip(headers, col_widths))
print(header_row)
print("-+-".join("-" * w for w in col_widths))
# Print rows
lines = []
for name, times in results.items():
avg = statistics.mean(times)
std = statistics.stdev(times) if len(times) > 1 else 0.0
min_t = min(times)
max_t = max(times)
row = [
name.ljust(col_widths[0]),
f"{avg:.3f}".ljust(col_widths[1]),
f"{std:.3f}".ljust(col_widths[2]),
f"{min_t:.3f}".ljust(col_widths[3]),
f"{max_t:.3f}".ljust(col_widths[4]),
str(len(times)).ljust(col_widths[5]),
]
print(" | ".join(row))
lines.append(" | ".join(row))
# Save to file
with open(RESULTS_FILE, "w") as f:
f.write(header_row + "\n")
f.write("-+-".join("-" * w for w in col_widths) + "\n")
for line in lines:
f.write(line + "\n")
print(f"\nResults saved to {RESULTS_FILE}")
def main():
git_clone()
results = benchmark()
report(results)
if __name__ == "__main__":
main()