Skip to content

Commit 792f247

Browse files
committed
Add benchmark results parsing script and update CI workflow to display summaries in GitHub Step Summary
1 parent 63f5076 commit 792f247

2 files changed

Lines changed: 70 additions & 0 deletions

File tree

.github/workflows/ci.yml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,12 @@ jobs:
6565
arch: x86_64
6666
script: ./gradlew :benchmarks:connectedBenchmarkAndroidTest
6767

68+
- name: Parse Benchmark Results
69+
if: always()
70+
run: |
71+
echo "### Macrobenchmark Results" >> $GITHUB_STEP_SUMMARY
72+
python3 benchmarks/BenchmarkResultsParser.py >> $GITHUB_STEP_SUMMARY
73+
6874
- name: Upload benchmark JSON
6975
if: always()
7076
uses: actions/upload-artifact@v4
Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,64 @@
1+
#!/usr/bin/env python3
2+
import json
3+
import glob
4+
import os
5+
6+
def format_value(val):
7+
if val is None:
8+
return "-"
9+
if isinstance(val, (int, float)):
10+
return f"{val:.2f}"
11+
return str(val)
12+
13+
def main():
14+
# Search for benchmark data files in the standard output directory
15+
search_path = 'benchmarks/build/outputs/connected_android_test_additional_output/**/*-benchmarkData.json'
16+
files = glob.glob(search_path, recursive=True)
17+
18+
if not files:
19+
# Fallback to search from current directory
20+
files = glob.glob('**/*-benchmarkData.json', recursive=True)
21+
22+
if not files:
23+
print("No benchmark results found.")
24+
return
25+
26+
print("| Metric | Min | Median | Max |")
27+
print("| :--- | :---: | :---: | :---: |")
28+
29+
# Track metrics to avoid duplicates if multiple files are found
30+
seen_results = set()
31+
32+
for file_path in files:
33+
try:
34+
with open(file_path, 'r') as f:
35+
data = json.load(f)
36+
37+
if 'benchmarks' not in data:
38+
continue
39+
40+
for benchmark in data['benchmarks']:
41+
benchmark_name = benchmark.get('name', 'Unknown')
42+
metrics = benchmark.get('metrics', {})
43+
44+
for metric_name, values in metrics.items():
45+
m_min = values.get('minimum')
46+
m_median = values.get('median')
47+
m_max = values.get('maximum')
48+
49+
# Some metrics might be in nested objects depending on version
50+
# but usually minimum/median/maximum are at the top level of the metric object
51+
52+
display_name = f"{benchmark_name}_{metric_name}"
53+
result_row = (display_name, m_min, m_median, m_max)
54+
55+
if result_row not in seen_results:
56+
print(f"| {display_name} | {format_value(m_min)} | {format_value(m_median)} | {format_value(m_max)} |")
57+
seen_results.add(result_row)
58+
except Exception as e:
59+
# Print error to stderr so it doesn't mess up the markdown table on stdout
60+
import sys
61+
print(f"Error parsing {file_path}: {e}", file=sys.stderr)
62+
63+
if __name__ == "__main__":
64+
main()

0 commit comments

Comments
 (0)