|
| 1 | +#!/usr/bin/env python |
| 2 | +""" |
| 3 | +Measure and compare peak memory using pytest-memray. |
| 4 | +
|
| 5 | +Usage: |
| 6 | + # Save a baseline (on master) |
| 7 | + python benchmarks/memory.py save master |
| 8 | +
|
| 9 | + # Save current branch |
| 10 | + python benchmarks/memory.py save my-feature |
| 11 | +
|
| 12 | + # Compare two saved runs |
| 13 | + python benchmarks/memory.py compare master my-feature |
| 14 | +
|
| 15 | + # Quick mode (smaller sizes) |
| 16 | + python benchmarks/memory.py save master --quick |
| 17 | +
|
| 18 | +Results are stored in .benchmarks/memory/. |
| 19 | +""" |
| 20 | + |
| 21 | +from __future__ import annotations |
| 22 | + |
| 23 | +import argparse |
| 24 | +import json |
| 25 | +import platform |
| 26 | +import re |
| 27 | +import subprocess |
| 28 | +import sys |
| 29 | +from pathlib import Path |
| 30 | + |
| 31 | +if platform.system() == "Windows": |
| 32 | + raise RuntimeError( |
| 33 | + "memory.py requires pytest-memray which is not available on Windows. " |
| 34 | + "Run memory benchmarks on Linux or macOS." |
| 35 | + ) |
| 36 | + |
| 37 | +RESULTS_DIR = Path(".benchmarks/memory") |
| 38 | +MEMORY_RE = re.compile( |
| 39 | + r"Allocation results for (.+?) at the high watermark\s+" |
| 40 | + r"📦 Total memory allocated: ([\d.]+)(MiB|KiB|GiB|B)", |
| 41 | +) |
| 42 | +# Only the build phase is measured by default. Unlike timing benchmarks (where |
| 43 | +# pytest-benchmark isolates the measured function), memray tracks all allocations |
| 44 | +# within a test — including model construction in setup. This means LP write and |
| 45 | +# matrix tests would report build + phase memory combined, making the phase-specific |
| 46 | +# contribution hard to isolate. Since model construction dominates memory usage, |
| 47 | +# measuring build alone gives the most accurate and actionable numbers. |
| 48 | +DEFAULT_TEST_PATHS = [ |
| 49 | + "benchmarks/test_build.py", |
| 50 | +] |
| 51 | + |
| 52 | + |
| 53 | +def _to_mib(value: float, unit: str) -> float: |
| 54 | + factors = {"B": 1 / 1048576, "KiB": 1 / 1024, "MiB": 1, "GiB": 1024} |
| 55 | + return value * factors[unit] |
| 56 | + |
| 57 | + |
| 58 | +def _collect_test_ids(test_paths: list[str], quick: bool) -> list[str]: |
| 59 | + """Collect test IDs without running them.""" |
| 60 | + cmd = [ |
| 61 | + sys.executable, |
| 62 | + "-m", |
| 63 | + "pytest", |
| 64 | + *test_paths, |
| 65 | + "--collect-only", |
| 66 | + "-q", |
| 67 | + ] |
| 68 | + if quick: |
| 69 | + cmd.append("--quick") |
| 70 | + result = subprocess.run(cmd, capture_output=True, text=True) |
| 71 | + return [ |
| 72 | + line.strip() |
| 73 | + for line in result.stdout.splitlines() |
| 74 | + if "::" in line and not line.startswith(("=", "-", " ")) |
| 75 | + ] |
| 76 | + |
| 77 | + |
| 78 | +def save(label: str, quick: bool = False, test_paths: list[str] | None = None) -> Path: |
| 79 | + """Run each benchmark in a separate process for accurate memory measurement.""" |
| 80 | + if test_paths is None: |
| 81 | + test_paths = DEFAULT_TEST_PATHS |
| 82 | + test_ids = _collect_test_ids(test_paths, quick) |
| 83 | + if not test_ids: |
| 84 | + print("No tests collected.", file=sys.stderr) |
| 85 | + sys.exit(1) |
| 86 | + |
| 87 | + print(f"Running {len(test_ids)} tests (each in a separate process)...") |
| 88 | + entries = {} |
| 89 | + for i, test_id in enumerate(test_ids, 1): |
| 90 | + short = test_id.split("::")[-1] |
| 91 | + print(f" [{i}/{len(test_ids)}] {short}...", end=" ", flush=True) |
| 92 | + |
| 93 | + cmd = [ |
| 94 | + sys.executable, |
| 95 | + "-m", |
| 96 | + "pytest", |
| 97 | + test_id, |
| 98 | + "--memray", |
| 99 | + "--benchmark-disable", |
| 100 | + "-v", |
| 101 | + "--tb=short", |
| 102 | + "-q", |
| 103 | + ] |
| 104 | + result = subprocess.run(cmd, capture_output=True, text=True) |
| 105 | + output = result.stdout + result.stderr |
| 106 | + |
| 107 | + match = MEMORY_RE.search(output) |
| 108 | + if match: |
| 109 | + value = float(match.group(2)) |
| 110 | + unit = match.group(3) |
| 111 | + mib = round(_to_mib(value, unit), 3) |
| 112 | + entries[test_id] = mib |
| 113 | + print(f"{mib:.1f} MiB") |
| 114 | + elif "SKIPPED" in output or "skipped" in output: |
| 115 | + print("skipped") |
| 116 | + else: |
| 117 | + print( |
| 118 | + "WARNING: no memray data (pytest-memray output format may have changed)", |
| 119 | + file=sys.stderr, |
| 120 | + ) |
| 121 | + |
| 122 | + if not entries: |
| 123 | + print("No memray results found. Is pytest-memray installed?", file=sys.stderr) |
| 124 | + sys.exit(1) |
| 125 | + |
| 126 | + RESULTS_DIR.mkdir(parents=True, exist_ok=True) |
| 127 | + out_path = RESULTS_DIR / f"{label}.json" |
| 128 | + out_path.write_text(json.dumps({"label": label, "peak_mib": entries}, indent=2)) |
| 129 | + print(f"\nSaved {len(entries)} results to {out_path}") |
| 130 | + return out_path |
| 131 | + |
| 132 | + |
| 133 | +def compare(label_a: str, label_b: str) -> None: |
| 134 | + """Compare two saved memory results.""" |
| 135 | + path_a = RESULTS_DIR / f"{label_a}.json" |
| 136 | + path_b = RESULTS_DIR / f"{label_b}.json" |
| 137 | + for p in (path_a, path_b): |
| 138 | + if not p.exists(): |
| 139 | + print(f"Not found: {p}. Run 'save {p.stem}' first.", file=sys.stderr) |
| 140 | + sys.exit(1) |
| 141 | + |
| 142 | + data_a = json.loads(path_a.read_text())["peak_mib"] |
| 143 | + data_b = json.loads(path_b.read_text())["peak_mib"] |
| 144 | + |
| 145 | + all_tests = sorted(set(data_a) | set(data_b)) |
| 146 | + |
| 147 | + print(f"\n{'Test':<60} {label_a:>10} {label_b:>10} {'Change':>10}") |
| 148 | + print("-" * 94) |
| 149 | + |
| 150 | + for test in all_tests: |
| 151 | + a = data_a.get(test) |
| 152 | + b = data_b.get(test) |
| 153 | + a_str = f"{a:.1f}" if a is not None else "—" |
| 154 | + b_str = f"{b:.1f}" if b is not None else "—" |
| 155 | + if a is not None and b is not None and a > 0: |
| 156 | + pct = (b - a) / a * 100 |
| 157 | + change = f"{pct:+.1f}%" |
| 158 | + else: |
| 159 | + change = "—" |
| 160 | + # Shorten test name for readability |
| 161 | + short = test.split("::")[-1] if "::" in test else test |
| 162 | + print(f"{short:<60} {a_str:>10} {b_str:>10} {change:>10}") |
| 163 | + |
| 164 | + print() |
| 165 | + |
| 166 | + |
| 167 | +def main(): |
| 168 | + parser = argparse.ArgumentParser( |
| 169 | + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter |
| 170 | + ) |
| 171 | + sub = parser.add_subparsers(dest="cmd", required=True) |
| 172 | + |
| 173 | + p_save = sub.add_parser("save", help="Run benchmarks and save memory results") |
| 174 | + p_save.add_argument( |
| 175 | + "label", help="Label for this run (e.g. 'master', 'my-feature')" |
| 176 | + ) |
| 177 | + p_save.add_argument( |
| 178 | + "--quick", action="store_true", help="Use smaller problem sizes" |
| 179 | + ) |
| 180 | + p_save.add_argument( |
| 181 | + "--test-path", |
| 182 | + nargs="+", |
| 183 | + default=None, |
| 184 | + help="Test file(s) to run (default: all phases)", |
| 185 | + ) |
| 186 | + |
| 187 | + p_cmp = sub.add_parser("compare", help="Compare two saved runs") |
| 188 | + p_cmp.add_argument("label_a", help="First run label (baseline)") |
| 189 | + p_cmp.add_argument("label_b", help="Second run label") |
| 190 | + |
| 191 | + args = parser.parse_args() |
| 192 | + if args.cmd == "save": |
| 193 | + save(args.label, quick=args.quick, test_paths=args.test_path) |
| 194 | + elif args.cmd == "compare": |
| 195 | + compare(args.label_a, args.label_b) |
| 196 | + |
| 197 | + |
| 198 | +if __name__ == "__main__": |
| 199 | + main() |
0 commit comments