Skip to content

Commit cea964a

Browse files
committed
Add initial naive implementation of tensors backend for CPU with Float
1 parent 91df5d5 commit cea964a

13 files changed

Lines changed: 1912 additions & 2 deletions

File tree

settings.gradle.kts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,3 +16,4 @@ dependencyResolutionManagement {
1616
rootProject.name = "skainet"
1717

1818
include("skainet-core:skainet-tensors-api")
19+
include("skainet-core:skainet-tensors")
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
1-
POM_ARTIFACT_ID=core-api
1+
POM_ARTIFACT_ID=tensors-api
22
POM_NAME=skainet core API
Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
package sk.ai.net.core.tensor.backend
2+
3+
import sk.ai.net.core.tensor.DType
4+
import sk.ai.net.core.tensor.Tensor
5+
import sk.ai.net.core.tensor.TensorOps
6+
7+
/**
8+
* Interface representing a computation backend for tensor operations.
9+
*
10+
* A computation backend is responsible for executing tensor operations on a specific
11+
* hardware platform (CPU, GPU, etc.). Different backends can provide different
12+
* implementations of the same operations, optimized for their target platform.
13+
*/
14+
public interface ComputeBackend<D : DType, V> : TensorOps<Tensor<D, V>> {
15+
/**
16+
* The name of the backend.
17+
*/
18+
public val name: String
19+
20+
21+
}

skainet-core/skainet-tensors-api/src/commonMain/kotlin/sk/ai/net/core/tensor/Shape.kt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ public data class Shape(val dimensions: IntArray) {
1919
val rank: Int
2020
get() = dimensions.size
2121

22-
internal fun index(indices: IntArray): Int {
22+
public fun index(indices: IntArray): Int {
2323
assert(
2424
{ indices.size == dimensions.size },
2525
{ "`indices.size` must be ${dimensions.size}: ${indices.size}" })
Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
import org.jetbrains.kotlin.gradle.ExperimentalKotlinGradlePluginApi
2+
import org.jetbrains.kotlin.gradle.ExperimentalWasmDsl
3+
import org.jetbrains.kotlin.gradle.dsl.JvmTarget
4+
5+
plugins {
6+
alias(libs.plugins.kotlinMultiplatform)
7+
alias(libs.plugins.androidLibrary)
8+
alias(libs.plugins.vanniktech.mavenPublish)
9+
}
10+
11+
kotlin {
12+
explicitApi()
13+
14+
androidTarget {
15+
@OptIn(ExperimentalKotlinGradlePluginApi::class)
16+
compilerOptions {
17+
jvmTarget.set(JvmTarget.JVM_11)
18+
}
19+
}
20+
21+
iosArm64()
22+
iosSimulatorArm64()
23+
macosArm64 ()
24+
linuxX64 ()
25+
linuxArm64 ()
26+
27+
jvm()
28+
29+
@OptIn(ExperimentalWasmDsl::class)
30+
wasmJs {
31+
browser()
32+
binaries.executable()
33+
}
34+
35+
sourceSets {
36+
val commonMain by getting {
37+
dependencies {
38+
implementation(project(":skainet-core:skainet-tensors-api"))
39+
}
40+
}
41+
42+
commonTest.dependencies {
43+
implementation(libs.kotlin.test)
44+
}
45+
}
46+
}
47+
48+
android {
49+
namespace = "sk.ai.net.core.api"
50+
compileSdk = libs.versions.android.compileSdk.get().toInt()
51+
52+
defaultConfig {
53+
minSdk = libs.versions.android.minSdk.get().toInt()
54+
}
55+
compileOptions {
56+
sourceCompatibility = JavaVersion.VERSION_11
57+
targetCompatibility = JavaVersion.VERSION_11
58+
}
59+
}
Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
POM_ARTIFACT_ID=tensors
2+
POM_NAME=skainet core API
Lines changed: 285 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,285 @@
1+
package sk.ai.net.core.tensor.backend
2+
3+
import sk.ai.net.core.tensor.*
4+
5+
/**
6+
* A CPU-based tensor for FP32/Float values.
7+
*
8+
* This tensor stores data on the CPU using simple FloatArray with NCHW row-major layout.
9+
* It supports 1-4 dimensional tensors and delegates all operations to CpuBackend.
10+
*/
11+
public class CpuTensorFP32(
12+
override val shape: Shape,
13+
internal val data: FloatArray
14+
) : Tensor<FP32, Float> {
15+
16+
init {
17+
require(data.size == shape.volume) {
18+
"Data size ${data.size} doesn't match shape volume ${shape.volume}"
19+
}
20+
require(shape.rank in 1..4) {
21+
"Only 1-4 dimensional tensors are supported, got ${shape.rank}"
22+
}
23+
}
24+
25+
override fun get(vararg indices: Int): Float {
26+
val index = shape.index(indices)
27+
return data[index]
28+
}
29+
30+
// Delegate all operations to the CpuBackend
31+
private val backend = CpuBackend()
32+
33+
override fun matmul(a: Tensor<FP32, Float>, b: Tensor<FP32, Float>): Tensor<FP32, Float> = backend.matmul(a, b)
34+
override fun scale(a: Tensor<FP32, Float>, scalar: Double): Tensor<FP32, Float> = backend.scale(a, scalar)
35+
override fun dot(a: Tensor<FP32, Float>, b: Tensor<FP32, Float>): Double = backend.dot(a, b)
36+
37+
// Tensor-Tensor operations
38+
override fun Tensor<FP32, Float>.plus(other: Tensor<FP32, Float>): Tensor<FP32, Float> = with(backend) { this@plus.plus(other) }
39+
override fun Tensor<FP32, Float>.minus(other: Tensor<FP32, Float>): Tensor<FP32, Float> = with(backend) { this@minus.minus(other) }
40+
override fun Tensor<FP32, Float>.times(other: Tensor<FP32, Float>): Tensor<FP32, Float> = with(backend) { this@times.times(other) }
41+
override fun Tensor<FP32, Float>.div(other: Tensor<FP32, Float>): Tensor<FP32, Float> = with(backend) { this@div.div(other) }
42+
43+
// Tensor-Scalar operations - delegate to backend
44+
override fun Tensor<FP32, Float>.plus(scalar: Int): Tensor<FP32, Float> = with(backend) { this@plus.plus(scalar) }
45+
override fun Tensor<FP32, Float>.minus(scalar: Int): Tensor<FP32, Float> = with(backend) { this@minus.minus(scalar) }
46+
override fun Tensor<FP32, Float>.times(scalar: Int): Tensor<FP32, Float> = with(backend) { this@times.times(scalar) }
47+
override fun Tensor<FP32, Float>.div(scalar: Int): Tensor<FP32, Float> = with(backend) { this@div.div(scalar) }
48+
49+
override fun Tensor<FP32, Float>.plus(scalar: Float): Tensor<FP32, Float> = with(backend) { this@plus.plus(scalar) }
50+
override fun Tensor<FP32, Float>.minus(scalar: Float): Tensor<FP32, Float> = with(backend) { this@minus.minus(scalar) }
51+
override fun Tensor<FP32, Float>.times(scalar: Float): Tensor<FP32, Float> = with(backend) { this@times.times(scalar) }
52+
override fun Tensor<FP32, Float>.div(scalar: Float): Tensor<FP32, Float> = with(backend) { this@div.div(scalar) }
53+
54+
override fun Tensor<FP32, Float>.plus(scalar: Double): Tensor<FP32, Float> = with(backend) { this@plus.plus(scalar) }
55+
override fun Tensor<FP32, Float>.minus(scalar: Double): Tensor<FP32, Float> = with(backend) { this@minus.minus(scalar) }
56+
override fun Tensor<FP32, Float>.times(scalar: Double): Tensor<FP32, Float> = with(backend) { this@times.times(scalar) }
57+
override fun Tensor<FP32, Float>.div(scalar: Double): Tensor<FP32, Float> = with(backend) { this@div.div(scalar) }
58+
59+
// Scalar-Tensor operations - delegate to backend
60+
override fun Double.plus(t: Tensor<FP32, Float>): Tensor<FP32, Float> = with(backend) { this@plus.plus(t) }
61+
override fun Double.minus(t: Tensor<FP32, Float>): Tensor<FP32, Float> = with(backend) { this@minus.minus(t) }
62+
override fun Double.times(t: Tensor<FP32, Float>): Tensor<FP32, Float> = with(backend) { this@times.times(t) }
63+
override fun Double.div(t: Tensor<FP32, Float>): Tensor<FP32, Float> = with(backend) { this@div.div(t) }
64+
65+
public companion object {
66+
/**
67+
* Creates a tensor from an array with the given shape.
68+
*/
69+
public fun fromArray(shape: Shape, data: FloatArray): CpuTensorFP32 {
70+
return CpuTensorFP32(shape, data)
71+
}
72+
73+
/**
74+
* Creates a tensor filled with zeros.
75+
*/
76+
public fun zeros(shape: Shape): CpuTensorFP32 {
77+
return CpuTensorFP32(shape, FloatArray(shape.volume))
78+
}
79+
80+
/**
81+
* Creates a tensor filled with ones.
82+
*/
83+
public fun ones(shape: Shape): CpuTensorFP32 {
84+
return CpuTensorFP32(shape, FloatArray(shape.volume) { 1.0f })
85+
}
86+
87+
/**
88+
* Creates a tensor filled with a specific value.
89+
*/
90+
public fun full(shape: Shape, value: Float): CpuTensorFP32 {
91+
return CpuTensorFP32(shape, FloatArray(shape.volume) { value })
92+
}
93+
}
94+
}
95+
96+
/**
97+
* A CPU-based implementation of the ComputeBackend interface for FP32/Float tensors.
98+
*/
99+
public class CpuBackend : ComputeBackend<FP32, Float> {
100+
override val name: String = "CPU"
101+
102+
// Basic operations - implement the actual computation logic
103+
override fun matmul(a: Tensor<FP32, Float>, b: Tensor<FP32, Float>): Tensor<FP32, Float> {
104+
require(a is CpuTensorFP32 && b is CpuTensorFP32) { "Both tensors must be CpuTensorFP32" }
105+
require(a.shape.rank == 2 && b.shape.rank == 2) { "Matrix multiplication requires 2D tensors" }
106+
require(a.shape[1] == b.shape[0]) { "Matrix dimensions don't match for multiplication" }
107+
108+
val rows = a.shape[0]
109+
val cols = b.shape[1]
110+
val inner = a.shape[1]
111+
val result = FloatArray(rows * cols)
112+
113+
for (i in 0 until rows) {
114+
for (j in 0 until cols) {
115+
var sum = 0f
116+
for (k in 0 until inner) {
117+
sum += a.data[i * inner + k] * b.data[k * cols + j]
118+
}
119+
result[i * cols + j] = sum
120+
}
121+
}
122+
123+
return CpuTensorFP32(Shape(rows, cols), result)
124+
}
125+
126+
override fun scale(a: Tensor<FP32, Float>, scalar: Double): Tensor<FP32, Float> {
127+
require(a is CpuTensorFP32) { "Tensor must be CpuTensorFP32" }
128+
val result = a.data.map { it * scalar.toFloat() }.toFloatArray()
129+
return CpuTensorFP32(a.shape, result)
130+
}
131+
132+
override fun dot(a: Tensor<FP32, Float>, b: Tensor<FP32, Float>): Double {
133+
require(a is CpuTensorFP32 && b is CpuTensorFP32) { "Both tensors must be CpuTensorFP32" }
134+
require(a.shape == b.shape) { "Tensors must have same shape for dot product" }
135+
136+
var sum = 0.0
137+
for (i in a.data.indices) {
138+
sum += a.data[i] * b.data[i]
139+
}
140+
return sum
141+
}
142+
143+
// Tensor-Tensor operations - implement actual computation logic
144+
override fun Tensor<FP32, Float>.plus(other: Tensor<FP32, Float>): Tensor<FP32, Float> {
145+
require(this is CpuTensorFP32 && other is CpuTensorFP32) { "Both tensors must be CpuTensorFP32" }
146+
require(this.shape == other.shape) { "Tensors must have same shape for addition" }
147+
148+
val result = FloatArray(this.data.size)
149+
for (i in this.data.indices) {
150+
result[i] = this.data[i] + other.data[i]
151+
}
152+
return CpuTensorFP32(this.shape, result)
153+
}
154+
155+
override fun Tensor<FP32, Float>.minus(other: Tensor<FP32, Float>): Tensor<FP32, Float> {
156+
require(this is CpuTensorFP32 && other is CpuTensorFP32) { "Both tensors must be CpuTensorFP32" }
157+
require(this.shape == other.shape) { "Tensors must have same shape for subtraction" }
158+
159+
val result = FloatArray(this.data.size)
160+
for (i in this.data.indices) {
161+
result[i] = this.data[i] - other.data[i]
162+
}
163+
return CpuTensorFP32(this.shape, result)
164+
}
165+
166+
override fun Tensor<FP32, Float>.times(other: Tensor<FP32, Float>): Tensor<FP32, Float> {
167+
require(this is CpuTensorFP32 && other is CpuTensorFP32) { "Both tensors must be CpuTensorFP32" }
168+
require(this.shape == other.shape) { "Tensors must have same shape for element-wise multiplication" }
169+
170+
val result = FloatArray(this.data.size)
171+
for (i in this.data.indices) {
172+
result[i] = this.data[i] * other.data[i]
173+
}
174+
return CpuTensorFP32(this.shape, result)
175+
}
176+
177+
override fun Tensor<FP32, Float>.div(other: Tensor<FP32, Float>): Tensor<FP32, Float> {
178+
require(this is CpuTensorFP32 && other is CpuTensorFP32) { "Both tensors must be CpuTensorFP32" }
179+
require(this.shape == other.shape) { "Tensors must have same shape for element-wise division" }
180+
181+
val result = FloatArray(this.data.size)
182+
for (i in this.data.indices) {
183+
result[i] = this.data[i] / other.data[i]
184+
}
185+
return CpuTensorFP32(this.shape, result)
186+
}
187+
188+
// Tensor-Scalar operations
189+
override fun Tensor<FP32, Float>.plus(scalar: Int): Tensor<FP32, Float> {
190+
require(this is CpuTensorFP32) { "Tensor must be CpuTensorFP32" }
191+
val result = this.data.map { it + scalar }.toFloatArray()
192+
return CpuTensorFP32(this.shape, result)
193+
}
194+
195+
override fun Tensor<FP32, Float>.minus(scalar: Int): Tensor<FP32, Float> {
196+
require(this is CpuTensorFP32) { "Tensor must be CpuTensorFP32" }
197+
val result = this.data.map { it - scalar }.toFloatArray()
198+
return CpuTensorFP32(this.shape, result)
199+
}
200+
201+
override fun Tensor<FP32, Float>.times(scalar: Int): Tensor<FP32, Float> {
202+
require(this is CpuTensorFP32) { "Tensor must be CpuTensorFP32" }
203+
val result = this.data.map { it * scalar }.toFloatArray()
204+
return CpuTensorFP32(this.shape, result)
205+
}
206+
207+
override fun Tensor<FP32, Float>.div(scalar: Int): Tensor<FP32, Float> {
208+
require(this is CpuTensorFP32) { "Tensor must be CpuTensorFP32" }
209+
val result = this.data.map { it / scalar }.toFloatArray()
210+
return CpuTensorFP32(this.shape, result)
211+
}
212+
213+
override fun Tensor<FP32, Float>.plus(scalar: Float): Tensor<FP32, Float> {
214+
require(this is CpuTensorFP32) { "Tensor must be CpuTensorFP32" }
215+
val result = this.data.map { it + scalar }.toFloatArray()
216+
return CpuTensorFP32(this.shape, result)
217+
}
218+
219+
override fun Tensor<FP32, Float>.minus(scalar: Float): Tensor<FP32, Float> {
220+
require(this is CpuTensorFP32) { "Tensor must be CpuTensorFP32" }
221+
val result = this.data.map { it - scalar }.toFloatArray()
222+
return CpuTensorFP32(this.shape, result)
223+
}
224+
225+
override fun Tensor<FP32, Float>.times(scalar: Float): Tensor<FP32, Float> {
226+
require(this is CpuTensorFP32) { "Tensor must be CpuTensorFP32" }
227+
val result = this.data.map { it * scalar }.toFloatArray()
228+
return CpuTensorFP32(this.shape, result)
229+
}
230+
231+
override fun Tensor<FP32, Float>.div(scalar: Float): Tensor<FP32, Float> {
232+
require(this is CpuTensorFP32) { "Tensor must be CpuTensorFP32" }
233+
val result = this.data.map { it / scalar }.toFloatArray()
234+
return CpuTensorFP32(this.shape, result)
235+
}
236+
237+
override fun Tensor<FP32, Float>.plus(scalar: Double): Tensor<FP32, Float> {
238+
require(this is CpuTensorFP32) { "Tensor must be CpuTensorFP32" }
239+
val result = this.data.map { it + scalar.toFloat() }.toFloatArray()
240+
return CpuTensorFP32(this.shape, result)
241+
}
242+
243+
override fun Tensor<FP32, Float>.minus(scalar: Double): Tensor<FP32, Float> {
244+
require(this is CpuTensorFP32) { "Tensor must be CpuTensorFP32" }
245+
val result = this.data.map { it - scalar.toFloat() }.toFloatArray()
246+
return CpuTensorFP32(this.shape, result)
247+
}
248+
249+
override fun Tensor<FP32, Float>.times(scalar: Double): Tensor<FP32, Float> {
250+
require(this is CpuTensorFP32) { "Tensor must be CpuTensorFP32" }
251+
val result = this.data.map { it * scalar.toFloat() }.toFloatArray()
252+
return CpuTensorFP32(this.shape, result)
253+
}
254+
255+
override fun Tensor<FP32, Float>.div(scalar: Double): Tensor<FP32, Float> {
256+
require(this is CpuTensorFP32) { "Tensor must be CpuTensorFP32" }
257+
val result = this.data.map { it / scalar.toFloat() }.toFloatArray()
258+
return CpuTensorFP32(this.shape, result)
259+
}
260+
261+
// Scalar-Tensor operations
262+
override fun Double.plus(t: Tensor<FP32, Float>): Tensor<FP32, Float> {
263+
require(t is CpuTensorFP32) { "Tensor must be CpuTensorFP32" }
264+
val result = t.data.map { this.toFloat() + it }.toFloatArray()
265+
return CpuTensorFP32(t.shape, result)
266+
}
267+
268+
override fun Double.minus(t: Tensor<FP32, Float>): Tensor<FP32, Float> {
269+
require(t is CpuTensorFP32) { "Tensor must be CpuTensorFP32" }
270+
val result = t.data.map { this.toFloat() - it }.toFloatArray()
271+
return CpuTensorFP32(t.shape, result)
272+
}
273+
274+
override fun Double.times(t: Tensor<FP32, Float>): Tensor<FP32, Float> {
275+
require(t is CpuTensorFP32) { "Tensor must be CpuTensorFP32" }
276+
val result = t.data.map { this.toFloat() * it }.toFloatArray()
277+
return CpuTensorFP32(t.shape, result)
278+
}
279+
280+
override fun Double.div(t: Tensor<FP32, Float>): Tensor<FP32, Float> {
281+
require(t is CpuTensorFP32) { "Tensor must be CpuTensorFP32" }
282+
val result = t.data.map { this.toFloat() / it }.toFloatArray()
283+
return CpuTensorFP32(t.shape, result)
284+
}
285+
}

0 commit comments

Comments
 (0)