|
| 1 | +#!/usr/bin/env python3 |
| 2 | +import subprocess |
| 3 | +import re |
| 4 | +import numpy as np |
| 5 | +import os |
| 6 | +import sys |
| 7 | +from pathlib import Path |
| 8 | + |
| 9 | +# Configuration |
| 10 | +INPUT_AUDIO = "TestAudio/coder_audio_stock.wav" |
| 11 | +MODEL_PATH = "/Users/mcruz/Library/Application Support/Replay/com.replay.Replay/models/Slim Shady/model.pth" |
| 12 | +INDEX_PATH = "/Users/mcruz/Library/Application Support/Replay/com.replay.Replay/models/Slim Shady/model.index" |
| 13 | +OUTPUT_DIR = "TestAudio" |
| 14 | +NUM_RUNS = 3 |
| 15 | + |
| 16 | +def run_inference(backend): |
| 17 | + output_path = os.path.join(OUTPUT_DIR, f"output_{backend}_bench.wav") |
| 18 | + cmd = [ |
| 19 | + "conda", "run", "-n", "rvc", "python", "rvc_cli.py", "infer", |
| 20 | + "--backend", backend, |
| 21 | + "--input_path", INPUT_AUDIO, |
| 22 | + "--output_path", output_path, |
| 23 | + "--pth_path", MODEL_PATH, |
| 24 | + "--index_path", INDEX_PATH |
| 25 | + ] |
| 26 | + |
| 27 | + env = os.environ.copy() |
| 28 | + env["OMP_NUM_THREADS"] = "1" |
| 29 | + |
| 30 | + print(f"Running {backend} inference...") |
| 31 | + result = subprocess.run(cmd, capture_output=True, text=True, env=env) |
| 32 | + |
| 33 | + if result.returncode != 0: |
| 34 | + print(f"Error running {backend} inference: {result.stderr}") |
| 35 | + return None |
| 36 | + |
| 37 | + # Parse time from output: "Conversion completed at '...' in 2.83 seconds." |
| 38 | + match = re.search(r"Conversion completed at .* in ([\d\.]+) seconds", result.stdout) |
| 39 | + if match: |
| 40 | + return float(match.group(1)) |
| 41 | + else: |
| 42 | + print(f"Could not find timing in output for {backend}") |
| 43 | + print(result.stdout) |
| 44 | + return None |
| 45 | + |
| 46 | +def main(): |
| 47 | + if not os.path.exists(INPUT_AUDIO): |
| 48 | + print(f"Input audio not found: {INPUT_AUDIO}") |
| 49 | + return |
| 50 | + |
| 51 | + results = {"torch": [], "mlx": []} |
| 52 | + |
| 53 | + for i in range(NUM_RUNS): |
| 54 | + print(f"\n--- Run {i+1}/{NUM_RUNS} ---") |
| 55 | + |
| 56 | + # Torch |
| 57 | + t_torch = run_inference("torch") |
| 58 | + if t_torch: |
| 59 | + results["torch"].append(t_torch) |
| 60 | + print(f"Torch: {t_torch:.3f}s") |
| 61 | + |
| 62 | + # MLX |
| 63 | + t_mlx = run_inference("mlx") |
| 64 | + if t_mlx: |
| 65 | + results["mlx"].append(t_mlx) |
| 66 | + print(f"MLX: {t_mlx:.3f}s") |
| 67 | + |
| 68 | + print("\n" + "="*50) |
| 69 | + print(f"{'Backend':<10} | {'Median':<10} | {'Mean':<10} | {'Std Dev':<10}") |
| 70 | + print("-"*50) |
| 71 | + |
| 72 | + for backend in ["torch", "mlx"]: |
| 73 | + times = results[backend] |
| 74 | + if times: |
| 75 | + median = np.median(times) |
| 76 | + mean = np.mean(times) |
| 77 | + std = np.std(times) |
| 78 | + print(f"{backend:<10} | {median:<10.3f} | {mean:<10.3f} | {std:<10.3f}") |
| 79 | + else: |
| 80 | + print(f"{backend:<10} | ERROR | ERROR | ERROR") |
| 81 | + |
| 82 | + if results["torch"] and results["mlx"]: |
| 83 | + m_torch = np.median(results["torch"]) |
| 84 | + m_mlx = np.median(results["mlx"]) |
| 85 | + speedup = m_torch / m_mlx |
| 86 | + print("\n" + "="*50) |
| 87 | + print(f"MLX is {speedup:.2f}x faster (median) than PyTorch") |
| 88 | + print("="*50) |
| 89 | + |
| 90 | +if __name__ == "__main__": |
| 91 | + main() |
0 commit comments