-
-
Notifications
You must be signed in to change notification settings - Fork 5
Expand file tree
/
Copy pathbenchmark_hooks.py
More file actions
145 lines (127 loc) · 4.2 KB
/
benchmark_hooks.py
File metadata and controls
145 lines (127 loc) · 4.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
#!/usr/bin/env python3
"""
Benchmark script to compare performance of cpp-linter-hooks vs mirrors-clang-format.
Usage:
python benchmark_hooks.py
Requirements:
- pre-commit must be installed and available in PATH
- Two config files:
- testing/cpp-linter-hooks.yaml
- testing/mirrors-clang-format.yaml
- Target files: testing/examples/*.c (or adjust as needed)
"""
import os
import subprocess
import time
import statistics
HOOKS = [
{
"name": "cpp-linter-hooks",
"config": "../testing/benchmark_hook_1.yaml",
},
{
"name": "mirrors-clang-format",
"config": "../testing/benchmark_hook_2.yaml",
},
]
REPEATS = 5
RESULTS_FILE = "testing/benchmark_results.txt"
def prepare_code():
try:
subprocess.run(
[
"git",
"clone",
"--depth",
"1",
"https://github.com/gouravthakur39/beginners-C-program-examples.git",
"examples",
],
check=True,
)
except subprocess.CalledProcessError:
pass
def run_hook(config):
cmd = ["pre-commit", "run", "--config", config, "--all-files"]
start = time.perf_counter()
try:
subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except subprocess.CalledProcessError:
# Still record time even if hook fails
pass
end = time.perf_counter()
return end - start
def benchmark():
results = {}
prepare_code()
os.chdir("examples")
for hook in HOOKS:
subprocess.run(["git", "restore", "."], check=True)
times = []
print(f"\nBenchmarking {hook['name']}...")
for i in range(REPEATS):
subprocess.run(["pre-commit", "clean"])
t = run_hook(hook["config"])
print(f" Run {i + 1}: {t:.3f} seconds")
times.append(t)
results[hook["name"]] = times
return results
def report(results):
headers = ["Hook", "Avg (s)", "Std (s)", "Min (s)", "Max (s)", "Runs"]
col_widths = [max(len(h), 16) for h in headers]
# Calculate max width for each column
for name, times in results.items():
col_widths[0] = max(col_widths[0], len(name))
print("\nBenchmark Results:\n")
# Print header
header_row = " | ".join(h.ljust(w) for h, w in zip(headers, col_widths))
print(header_row)
print("-+-".join("-" * w for w in col_widths))
# Print rows
lines = []
for name, times in results.items():
avg = statistics.mean(times)
std = statistics.stdev(times) if len(times) > 1 else 0.0
min_t = min(times)
max_t = max(times)
row = [
name.ljust(col_widths[0]),
f"{avg:.3f}".ljust(col_widths[1]),
f"{std:.3f}".ljust(col_widths[2]),
f"{min_t:.3f}".ljust(col_widths[3]),
f"{max_t:.3f}".ljust(col_widths[4]),
str(len(times)).ljust(col_widths[5]),
]
print(" | ".join(row))
lines.append(" | ".join(row))
# Save to file
os.chdir("..")
with open(RESULTS_FILE, "w") as f:
f.write(header_row + "\n")
f.write("-+-".join("-" * w for w in col_widths) + "\n")
for line in lines:
f.write(line + "\n")
print(f"\nResults saved to {RESULTS_FILE}")
# Write to GitHub Actions summary
summary_path = os.environ.get("GITHUB_STEP_SUMMARY")
if summary_path:
with open(summary_path, "a") as f:
f.write("## Benchmark Results\n\n")
# Markdown table header
md_header = "| " + " | ".join(headers) + " |\n"
md_sep = "|" + "|".join(["-" * (w + 2) for w in col_widths]) + "|\n"
f.write(md_header)
f.write(md_sep)
for name, times in results.items():
avg = statistics.mean(times)
std = statistics.stdev(times) if len(times) > 1 else 0.0
min_t = min(times)
max_t = max(times)
md_row = f"| {name} | {avg:.3f} | {std:.3f} | {min_t:.3f} | {max_t:.3f} | {len(times)} |\n"
f.write(md_row)
f.write("\n")
def main():
results = benchmark()
report(results)
if __name__ == "__main__":
main()