|
| 1 | +package sk.ainet.bench |
| 2 | + |
| 3 | +import java.util.concurrent.TimeUnit |
| 4 | +import org.openjdk.jmh.annotations.Benchmark |
| 5 | +import org.openjdk.jmh.annotations.BenchmarkMode |
| 6 | +import org.openjdk.jmh.annotations.Level |
| 7 | +import org.openjdk.jmh.annotations.Mode |
| 8 | +import org.openjdk.jmh.annotations.OutputTimeUnit |
| 9 | +import org.openjdk.jmh.annotations.Param |
| 10 | +import org.openjdk.jmh.annotations.Scope |
| 11 | +import org.openjdk.jmh.annotations.Setup |
| 12 | +import org.openjdk.jmh.annotations.State |
| 13 | +import sk.ainet.backend.api.kernel.Fp32MatmulKernel |
| 14 | +import sk.ainet.exec.kernel.PanamaVectorMatmulKernel |
| 15 | +import sk.ainet.exec.kernel.ScalarMatmulKernel |
| 16 | + |
| 17 | +/** |
| 18 | + * Direct kernel-level matmul bench: `Fp32MatmulKernel.matmul` only, |
| 19 | + * with no `TensorOps` wrapper / dispatch / context allocation in the |
| 20 | + * timed region. Used to validate the M5 milestone target — Panama |
| 21 | + * Vector kernel ≥ 1.5× scalar — independent of the rest of the op |
| 22 | + * pipeline. |
| 23 | + * |
| 24 | + * Compare against `MatmulBench`, which exercises the same operation |
| 25 | + * through `ctx.ops.matmul` (production routing). Until |
| 26 | + * `DefaultCpuOpsJvm.matmul` is wired through `KernelRegistry`, only |
| 27 | + * this bench reflects pure kernel-vs-kernel performance. |
| 28 | + */ |
| 29 | +@State(Scope.Benchmark) |
| 30 | +@BenchmarkMode(Mode.AverageTime) |
| 31 | +@OutputTimeUnit(TimeUnit.MILLISECONDS) |
| 32 | +open class KernelMatmulBench { |
| 33 | + |
| 34 | + @Param("256", "512", "1024") |
| 35 | + var size: Int = 512 |
| 36 | + |
| 37 | + @Param("scalar", "panama") |
| 38 | + var provider: String = "panama" |
| 39 | + |
| 40 | + private lateinit var kernel: Fp32MatmulKernel |
| 41 | + private lateinit var a: FloatArray |
| 42 | + private lateinit var b: FloatArray |
| 43 | + private lateinit var out: FloatArray |
| 44 | + |
| 45 | + @Setup(Level.Trial) |
| 46 | + fun setup() { |
| 47 | + kernel = when (provider) { |
| 48 | + "scalar" -> ScalarMatmulKernel |
| 49 | + "panama" -> PanamaVectorMatmulKernel |
| 50 | + else -> error("unknown provider: $provider") |
| 51 | + } |
| 52 | + val n = size |
| 53 | + // Same input seeding as MatmulBench so numbers compare cleanly. |
| 54 | + a = FloatArray(n * n) { ((it % 251) - 125).toFloat() / 127f } |
| 55 | + b = FloatArray(n * n) { ((it * 13 % 257) - 128).toFloat() / 127f } |
| 56 | + out = FloatArray(n * n) |
| 57 | + } |
| 58 | + |
| 59 | + @Benchmark |
| 60 | + fun matmul_fp32_square(): FloatArray { |
| 61 | + val n = size |
| 62 | + kernel.matmul( |
| 63 | + a, 0, n, |
| 64 | + b, 0, n, |
| 65 | + out, 0, n, |
| 66 | + n, n, n, |
| 67 | + ) |
| 68 | + return out |
| 69 | + } |
| 70 | +} |
0 commit comments