|
| 1 | +package sk.ainet.exec.kernel |
| 2 | + |
| 3 | +import java.lang.foreign.Arena |
| 4 | +import java.lang.foreign.FunctionDescriptor |
| 5 | +import java.lang.foreign.Linker |
| 6 | +import java.lang.foreign.MemorySegment |
| 7 | +import java.lang.foreign.ValueLayout |
| 8 | +import java.lang.invoke.MethodHandle |
| 9 | +import sk.ainet.backend.api.kernel.Fp32MatmulKernel |
| 10 | + |
| 11 | +/** |
| 12 | + * Native (FFM) implementation of [Fp32MatmulKernel]. |
| 13 | + * |
| 14 | + * Wraps the bundled C symbol |
| 15 | + * |
| 16 | + * void skainet_fp32_matmul( |
| 17 | + * const float* a, int32_t a_offset, int32_t a_stride, |
| 18 | + * const float* b, int32_t b_offset, int32_t b_stride, |
| 19 | + * float* c, int32_t c_offset, int32_t c_stride, |
| 20 | + * int32_t m, int32_t n, int32_t k); |
| 21 | + * |
| 22 | + * The C kernel is a tight i-p-j outer-product accumulator over rows |
| 23 | + * of C; the inner `c[j] += a*b[j]` loop streams two contiguous arrays |
| 24 | + * and auto-vectorizes into FMA under -O3 -ffast-math (vfmadd231ps on |
| 25 | + * x86_64, fmla on AArch64). |
| 26 | + * |
| 27 | + * Numerical parity vs [PanamaVectorMatmulKernel] is asserted by |
| 28 | + * [NativeFp32MatmulKernelParityTest] within FMA + reordered-reduction |
| 29 | + * tolerance (the same `1e-5 * k` bar Panama uses against the scalar |
| 30 | + * reference). |
| 31 | + * |
| 32 | + * PR 5 of the staged native-FFM rollout — wraps the rollout per the |
| 33 | + * `native-ffm-plan` asciidoc. Single-threaded, no cache blocking; |
| 34 | + * future work could add parallelChunks-style row blocking and B-tile |
| 35 | + * packing, but the scalar C path already lands well within the SPI |
| 36 | + * contract on host-arch CPUs. |
| 37 | + */ |
| 38 | +internal object NativeFp32MatmulKernel : Fp32MatmulKernel { |
| 39 | + |
| 40 | + fun isAvailable(): Boolean = handle != null |
| 41 | + |
| 42 | + override fun matmul( |
| 43 | + a: FloatArray, aOffset: Int, aStride: Int, |
| 44 | + b: FloatArray, bOffset: Int, bStride: Int, |
| 45 | + out: FloatArray, outOffset: Int, outStride: Int, |
| 46 | + m: Int, n: Int, k: Int, |
| 47 | + ) { |
| 48 | + require(m >= 0 && n >= 0 && k >= 0) { |
| 49 | + "NativeFp32MatmulKernel: m, n, k must be non-negative; got m=$m n=$n k=$k" |
| 50 | + } |
| 51 | + if (m == 0 || n == 0) return |
| 52 | + |
| 53 | + val mh = handle |
| 54 | + ?: error("NativeFp32MatmulKernel.matmul invoked while native library unavailable") |
| 55 | + |
| 56 | + // Sizes for the off-heap copies. Each of A, B, C uses the |
| 57 | + // bytes the kernel actually reaches — for non-contiguous |
| 58 | + // strides this can be larger than the matrix's element count |
| 59 | + // because the strides skip past unused floats. Allocating to |
| 60 | + // the full reach (offset + last-row reach) keeps the kernel |
| 61 | + // pointer arithmetic simple and matches Kotlin's bounds. |
| 62 | + val aReachFloats = if (m == 0 || k == 0) 0 else aOffset + (m - 1) * aStride + k |
| 63 | + val bReachFloats = if (k == 0 || n == 0) 0 else bOffset + (k - 1) * bStride + n |
| 64 | + val cReachFloats = outOffset + (m - 1) * outStride + n |
| 65 | + |
| 66 | + Arena.ofConfined().use { arena -> |
| 67 | + val aBytes = aReachFloats.toLong() * java.lang.Float.BYTES |
| 68 | + val bBytes = bReachFloats.toLong() * java.lang.Float.BYTES |
| 69 | + val cBytes = cReachFloats.toLong() * java.lang.Float.BYTES |
| 70 | + val align = ValueLayout.JAVA_FLOAT.byteAlignment() |
| 71 | + |
| 72 | + val aSeg: MemorySegment = if (aBytes > 0) arena.allocate(aBytes, align) else MemorySegment.NULL |
| 73 | + val bSeg: MemorySegment = if (bBytes > 0) arena.allocate(bBytes, align) else MemorySegment.NULL |
| 74 | + val cSeg: MemorySegment = arena.allocate(cBytes, align) |
| 75 | + |
| 76 | + if (aReachFloats > 0) { |
| 77 | + MemorySegment.copy(a, 0, aSeg, ValueLayout.JAVA_FLOAT, 0L, aReachFloats) |
| 78 | + } |
| 79 | + if (bReachFloats > 0) { |
| 80 | + MemorySegment.copy(b, 0, bSeg, ValueLayout.JAVA_FLOAT, 0L, bReachFloats) |
| 81 | + } |
| 82 | + |
| 83 | + mh.invoke( |
| 84 | + aSeg, aOffset, aStride, |
| 85 | + bSeg, bOffset, bStride, |
| 86 | + cSeg, outOffset, outStride, |
| 87 | + m, n, k, |
| 88 | + ) |
| 89 | + |
| 90 | + MemorySegment.copy(cSeg, ValueLayout.JAVA_FLOAT, 0L, out, 0, cReachFloats) |
| 91 | + } |
| 92 | + } |
| 93 | + |
| 94 | + private val handle: MethodHandle? by lazy { |
| 95 | + val lookup = NativeLibraryLoader.lookup() ?: return@lazy null |
| 96 | + val symbol = lookup.find("skainet_fp32_matmul").orElse(null) ?: return@lazy null |
| 97 | + val descriptor = FunctionDescriptor.ofVoid( |
| 98 | + ValueLayout.ADDRESS, // a |
| 99 | + ValueLayout.JAVA_INT, // a_offset |
| 100 | + ValueLayout.JAVA_INT, // a_stride |
| 101 | + ValueLayout.ADDRESS, // b |
| 102 | + ValueLayout.JAVA_INT, // b_offset |
| 103 | + ValueLayout.JAVA_INT, // b_stride |
| 104 | + ValueLayout.ADDRESS, // c |
| 105 | + ValueLayout.JAVA_INT, // c_offset |
| 106 | + ValueLayout.JAVA_INT, // c_stride |
| 107 | + ValueLayout.JAVA_INT, // m |
| 108 | + ValueLayout.JAVA_INT, // n |
| 109 | + ValueLayout.JAVA_INT, // k |
| 110 | + ) |
| 111 | + runCatching { Linker.nativeLinker().downcallHandle(symbol, descriptor) }.getOrNull() |
| 112 | + } |
| 113 | +} |
0 commit comments