|
| 1 | +#!/usr/bin/env python |
| 2 | +"""Benchmark script for arm_pytorch_utilities torch.compile optimization. |
| 3 | +
|
| 4 | +Measures eager-mode and torch.compile performance for refactor-target functions. |
| 5 | +Outputs a printed table and JSON file for before/after comparison. |
| 6 | +
|
| 7 | +Usage: |
| 8 | + python benchmarks/bench_compile.py --device cpu |
| 9 | + python benchmarks/bench_compile.py --device cuda |
| 10 | +""" |
| 11 | +import argparse |
| 12 | +import json |
| 13 | +import time |
| 14 | +from datetime import datetime |
| 15 | +from pathlib import Path |
| 16 | + |
| 17 | +import torch |
| 18 | + |
| 19 | +from arm_pytorch_utilities import math_utils, linalg, tensor_utils, preprocess, softknn |
| 20 | + |
| 21 | + |
| 22 | +def bench(fn, *args, warmup=5, repeats=20, device='cpu'): |
| 23 | + """Time a function with warmup and repeats. Returns median time in ms.""" |
| 24 | + for _ in range(warmup): |
| 25 | + fn(*args) |
| 26 | + if device == 'cuda': |
| 27 | + torch.cuda.synchronize() |
| 28 | + |
| 29 | + times = [] |
| 30 | + for _ in range(repeats): |
| 31 | + if device == 'cuda': |
| 32 | + torch.cuda.synchronize() |
| 33 | + t0 = time.perf_counter() |
| 34 | + fn(*args) |
| 35 | + if device == 'cuda': |
| 36 | + torch.cuda.synchronize() |
| 37 | + t1 = time.perf_counter() |
| 38 | + times.append((t1 - t0) * 1000) |
| 39 | + |
| 40 | + times.sort() |
| 41 | + return times[len(times) // 2] |
| 42 | + |
| 43 | + |
| 44 | +def try_compile_bench(fn, *args, device='cpu', warmup=5, repeats=20): |
| 45 | + """Try to compile and benchmark a function. Returns (time_ms, success) or (None, False).""" |
| 46 | + try: |
| 47 | + compiled_fn = torch.compile(fn, fullgraph=True) |
| 48 | + # Warmup includes compilation |
| 49 | + for _ in range(warmup): |
| 50 | + compiled_fn(*args) |
| 51 | + if device == 'cuda': |
| 52 | + torch.cuda.synchronize() |
| 53 | + |
| 54 | + times = [] |
| 55 | + for _ in range(repeats): |
| 56 | + if device == 'cuda': |
| 57 | + torch.cuda.synchronize() |
| 58 | + t0 = time.perf_counter() |
| 59 | + compiled_fn(*args) |
| 60 | + if device == 'cuda': |
| 61 | + torch.cuda.synchronize() |
| 62 | + t1 = time.perf_counter() |
| 63 | + times.append((t1 - t0) * 1000) |
| 64 | + |
| 65 | + times.sort() |
| 66 | + return times[len(times) // 2], True |
| 67 | + except Exception as e: |
| 68 | + return None, False, str(e) |
| 69 | + |
| 70 | + |
| 71 | +def make_psd(n, device): |
| 72 | + R = torch.randn(n, n, device=device) |
| 73 | + return R.t() @ R + torch.eye(n, device=device) * 0.1 |
| 74 | + |
| 75 | + |
| 76 | +def run_benchmarks(device_str): |
| 77 | + device = torch.device(device_str) |
| 78 | + results = {} |
| 79 | + |
| 80 | + benchmarks = {} |
| 81 | + |
| 82 | + # --- Setup inputs --- |
| 83 | + # replace_nan_and_inf |
| 84 | + x_nan = torch.randn(10000, 100, device=device) |
| 85 | + mask = torch.rand_like(x_nan) < 0.1 |
| 86 | + x_nan[mask] = float('nan') |
| 87 | + benchmarks['replace_nan_and_inf'] = (math_utils.replace_nan_and_inf, (x_nan.clone(), 0), True) |
| 88 | + |
| 89 | + # angular_diff_batch |
| 90 | + a_ang = torch.randn(100000, device=device) |
| 91 | + b_ang = torch.randn(100000, device=device) |
| 92 | + benchmarks['angular_diff_batch'] = (math_utils.angular_diff_batch, (a_ang, b_ang), False) |
| 93 | + |
| 94 | + # angle_between_stable |
| 95 | + u_abs = torch.randn(200, 50, device=device) |
| 96 | + v_abs = torch.randn(150, 50, device=device) |
| 97 | + benchmarks['angle_between_stable'] = (math_utils.angle_between_stable, (u_abs, v_abs), False) |
| 98 | + |
| 99 | + # cos_sim_pairwise |
| 100 | + x1_cos = torch.randn(500, 50, device=device) |
| 101 | + x2_cos = torch.randn(300, 50, device=device) |
| 102 | + benchmarks['cos_sim_pairwise'] = (math_utils.cos_sim_pairwise, (x1_cos, x2_cos), False) |
| 103 | + |
| 104 | + # batch_batch_product |
| 105 | + X_bbp = torch.randn(10000, 20, device=device) |
| 106 | + A_bbp = torch.randn(10000, 20, 20, device=device) |
| 107 | + benchmarks['batch_batch_product'] = (linalg.batch_batch_product, (X_bbp, A_bbp), False) |
| 108 | + |
| 109 | + # batch_quadratic_product |
| 110 | + X_bqp = torch.randn(10000, 20, device=device) |
| 111 | + A_bqp = make_psd(20, device) |
| 112 | + benchmarks['batch_quadratic_product'] = (linalg.batch_quadratic_product, (X_bqp, A_bqp), False) |
| 113 | + |
| 114 | + # batch_outer_product |
| 115 | + u_bop = torch.randn(10000, 20, device=device) |
| 116 | + v_bop = torch.randn(10000, 20, device=device) |
| 117 | + benchmarks['batch_outer_product'] = (linalg.batch_outer_product, (u_bop, v_bop), False) |
| 118 | + |
| 119 | + # squeeze_n |
| 120 | + x_sq = torch.randn(1, 1, 1, 1000, 50, device=device) |
| 121 | + benchmarks['squeeze_n'] = (tensor_utils.squeeze_n, (x_sq, 3), False) |
| 122 | + |
| 123 | + # MinMaxScaler.transform |
| 124 | + x_mm = torch.randn(10000, 50, device=device) |
| 125 | + scaler = preprocess.MinMaxScaler() |
| 126 | + scaler.fit(x_mm) |
| 127 | + benchmarks['MinMaxScaler.transform'] = (scaler.transform, (x_mm,), False) |
| 128 | + |
| 129 | + # SoftKNN.forward |
| 130 | + x_knn = torch.randn(200, 10, device=device) |
| 131 | + knn = softknn.SoftKNN(min_k=20) |
| 132 | + benchmarks['SoftKNN.forward'] = (knn, (x_knn,), False) |
| 133 | + |
| 134 | + # sqrtm (CPU only due to .numpy()) |
| 135 | + if device_str == 'cpu': |
| 136 | + A_sqrtm = make_psd(50, device) |
| 137 | + benchmarks['sqrtm'] = (linalg.sqrtm, (A_sqrtm,), False) |
| 138 | + |
| 139 | + # --- Run benchmarks --- |
| 140 | + print(f"\n{'Function':<30} {'Eager (ms)':>12} {'Compile (ms)':>14} {'Speedup':>10} {'Compile OK':>12}") |
| 141 | + print("-" * 80) |
| 142 | + |
| 143 | + for name, (fn, args, needs_clone) in benchmarks.items(): |
| 144 | + # Eager benchmark |
| 145 | + if needs_clone: |
| 146 | + # For in-place functions, clone first arg each call |
| 147 | + template = args[0] |
| 148 | + rest_args = args[1:] |
| 149 | + |
| 150 | + def cloning_fn(*a, _fn=fn, _tpl=template, _rest=rest_args): |
| 151 | + return _fn(_tpl.clone(), *_rest) |
| 152 | + |
| 153 | + eager_ms = bench(cloning_fn, warmup=5, repeats=20, device=device_str) |
| 154 | + else: |
| 155 | + try: |
| 156 | + eager_ms = bench(fn, *args, device=device_str) |
| 157 | + except Exception as e: |
| 158 | + print(f"{name:<30} {'ERROR':>12} {'N/A':>14} {'N/A':>10} {'N/A':>12} ({e})") |
| 159 | + results[name] = {'eager_ms': None, 'compile_ms': None, 'compile_ok': False, |
| 160 | + 'compile_error': None, 'eager_error': str(e)} |
| 161 | + continue |
| 162 | + |
| 163 | + # Compile benchmark |
| 164 | + compile_result = try_compile_bench(fn, *args, device=device_str) |
| 165 | + |
| 166 | + if len(compile_result) == 2: |
| 167 | + compile_ms, compile_ok = compile_result |
| 168 | + compile_err = None |
| 169 | + else: |
| 170 | + compile_ms, compile_ok, compile_err = compile_result |
| 171 | + |
| 172 | + speedup = f"{eager_ms / compile_ms:.2f}x" if compile_ms else "N/A" |
| 173 | + compile_str = f"{compile_ms:.3f}" if compile_ms else "FAIL" |
| 174 | + |
| 175 | + print(f"{name:<30} {eager_ms:>12.3f} {compile_str:>14} {speedup:>10} {'yes' if compile_ok else 'no':>12}") |
| 176 | + |
| 177 | + results[name] = { |
| 178 | + 'eager_ms': eager_ms, |
| 179 | + 'compile_ms': compile_ms, |
| 180 | + 'compile_ok': compile_ok, |
| 181 | + 'compile_error': compile_err, |
| 182 | + } |
| 183 | + |
| 184 | + return results |
| 185 | + |
| 186 | + |
| 187 | +def main(): |
| 188 | + parser = argparse.ArgumentParser(description='Benchmark arm_pytorch_utilities functions') |
| 189 | + parser.add_argument('--device', choices=['cpu', 'cuda'], default='cpu') |
| 190 | + args = parser.parse_args() |
| 191 | + |
| 192 | + if args.device == 'cuda' and not torch.cuda.is_available(): |
| 193 | + print("CUDA not available, falling back to CPU") |
| 194 | + args.device = 'cpu' |
| 195 | + |
| 196 | + print(f"Running benchmarks on {args.device}") |
| 197 | + print(f"PyTorch version: {torch.__version__}") |
| 198 | + |
| 199 | + results = run_benchmarks(args.device) |
| 200 | + |
| 201 | + # Save JSON |
| 202 | + output_dir = Path(__file__).parent |
| 203 | + timestamp = datetime.now().strftime('%Y%m%d_%H%M%S') |
| 204 | + output_file = output_dir / f"results_{args.device}_{timestamp}.json" |
| 205 | + output = { |
| 206 | + 'device': args.device, |
| 207 | + 'torch_version': torch.__version__, |
| 208 | + 'timestamp': timestamp, |
| 209 | + 'results': results, |
| 210 | + } |
| 211 | + with open(output_file, 'w') as f: |
| 212 | + json.dump(output, f, indent=2) |
| 213 | + print(f"\nResults saved to {output_file}") |
| 214 | + |
| 215 | + |
| 216 | +if __name__ == '__main__': |
| 217 | + main() |
0 commit comments