Skip to content

Commit 565c2c2

Browse files
committed
Fix package names and add nn package with basic layer and functions
1 parent 1519949 commit 565c2c2

32 files changed

Lines changed: 873 additions & 44 deletions

File tree

settings.gradle.kts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,3 +18,5 @@ rootProject.name = "skainet"
1818
include("skainet-core:skainet-tensors-api")
1919
include("skainet-core:skainet-tensors")
2020
include("skainet-core:skainet-performance")
21+
include("skainet-nn:skainet-nn-api")
22+
include("skainet-nn:skainet-nn-relection")

skainet-core/skainet-tensors-api/src/commonMain/kotlin/sk/ainet/core/tensor/DType.kt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
package sk.ai.net.core.tensor
1+
package sk.ainet.core.tensor
22

33
// Base marker interface for all dtypes
44
public sealed interface DType {

skainet-core/skainet-tensors-api/src/commonMain/kotlin/sk/ainet/core/tensor/Shape.kt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
package sk.ai.net.core.tensor
1+
package sk.ainet.core.tensor
22

33
/**
44
* Data class representing the shape of a multi-dimensional array (tensor).

skainet-core/skainet-tensors-api/src/commonMain/kotlin/sk/ainet/core/tensor/Tensor.kt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
package sk.ai.net.core.tensor
1+
package sk.ainet.core.tensor
22

33
/**
44
* Interface representing a multi-dimensional array of numeric values.

skainet-core/skainet-tensors-api/src/commonMain/kotlin/sk/ainet/core/tensor/TensorData.kt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
package sk.ai.net.core.tensor
1+
package sk.ainet.core.tensor
22

33
public interface TensorData<T:DType, V> {
44
/**

skainet-core/skainet-tensors-api/src/commonMain/kotlin/sk/ainet/core/tensor/TensorOps.kt

Lines changed: 39 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
1-
package sk.ai.net.core.tensor
1+
package sk.ainet.core.tensor
22

33
/**
44
* Interface representing mathematical operations on tensors.
55
*
66
* Keeps math separate from storage, so the same math API can work
77
* with multiple backends (dense, sparse, GPU, etc.).
88
*/
9-
public interface TensorOps<T: Tensor<*, *>> {
9+
public interface TensorOps<T : Tensor<*, *>> {
1010
/**
1111
* Performs matrix multiplication of two tensors.
1212
*
@@ -100,4 +100,40 @@ public interface TensorOps<T: Tensor<*, *>> {
100100
public operator fun Float.minus(t: T): T = this.toDouble() - t
101101
public operator fun Float.times(t: T): T = this.toDouble() * t
102102
public operator fun Float.div(t: T): T = this.toDouble() / t
103-
}
103+
104+
public fun T.t(): T // transpose
105+
106+
public fun T.relu(): T
107+
108+
/**
109+
* Applies the softmax function along the specified dimension of the tensor.
110+
*/
111+
public fun T.softmax(dimension: Int): T
112+
113+
/**
114+
* Applies the sigmoid function element-wise to the tensor.
115+
*
116+
* @return A new tensor with the sigmoid function applied to each element.
117+
*/
118+
public fun T.sigmoid(): T
119+
120+
121+
/**
122+
* Applies the hyperbolic tangent (tanh) function element-wise to the tensor.
123+
*
124+
* @return A new tensor with the tanh function applied to each element.
125+
*/
126+
public fun T.tanh(): T
127+
128+
129+
/**
130+
* Flattens the tensor into a 1D tensor.
131+
*
132+
* @param startDim The first dimension to flatten (inclusive).
133+
* @param endDim The last dimension to flatten (inclusive).
134+
* @return A new flattened tensor.
135+
*/
136+
public fun T.flatten(startDim: Int = 1, endDim: Int = -1): T
137+
}
138+
139+

skainet-core/skainet-tensors-api/src/commonMain/kotlin/sk/ainet/core/tensor/ComputeBackend.kt renamed to skainet-core/skainet-tensors-api/src/commonMain/kotlin/sk/ainet/core/tensor/backend/ComputeBackend.kt

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,9 @@
1-
package sk.ai.net.core.tensor.backend
1+
package sk.ainet.core.tensor.backend
2+
3+
import sk.ainet.core.tensor.DType
4+
import sk.ainet.core.tensor.Tensor
5+
import sk.ainet.core.tensor.TensorOps
26

3-
import sk.ai.net.core.tensor.DType
4-
import sk.ai.net.core.tensor.Tensor
5-
import sk.ai.net.core.tensor.TensorOps
67

78
/**
89
* Interface representing a computation backend for tensor operations.

skainet-core/skainet-tensors/src/commonMain/kotlin/sk/ainet/core/tensor/backend/CpuBackend.kt

Lines changed: 134 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,8 @@
11
package sk.ainet.core.tensor.backend
22

3-
import sk.ai.net.core.tensor.*
4-
import sk.ai.net.core.tensor.backend.ComputeBackend
3+
import sk.ainet.core.tensor.*
4+
import sk.ainet.core.tensor.backend.ComputeBackend
5+
import kotlin.math.*
56

67
/**
78
* Convenient type alias for FP32 tensors with Float values.
@@ -149,6 +150,14 @@ public class CpuTensorFP32(
149150
override fun Double.times(t: Tensor<FP32, Float>): Tensor<FP32, Float> = with(backend) { this@times.times(t) }
150151
override fun Double.div(t: Tensor<FP32, Float>): Tensor<FP32, Float> = with(backend) { this@div.div(t) }
151152

153+
// Advanced tensor operations - delegate to backend
154+
override fun Tensor<FP32, Float>.t(): Tensor<FP32, Float> = with(backend) { this@t.t() }
155+
override fun Tensor<FP32, Float>.relu(): Tensor<FP32, Float> = with(backend) { this@relu.relu() }
156+
override fun Tensor<FP32, Float>.sigmoid(): Tensor<FP32, Float> = with(backend) { this@sigmoid.sigmoid() }
157+
override fun Tensor<FP32, Float>.tanh(): Tensor<FP32, Float> = with(backend) { this@tanh.tanh() }
158+
override fun Tensor<FP32, Float>.softmax(dimension: Int): Tensor<FP32, Float> = with(backend) { this@softmax.softmax(dimension) }
159+
override fun Tensor<FP32, Float>.flatten(startDim: Int, endDim: Int): Tensor<FP32, Float> = with(backend) { this@flatten.flatten(startDim, endDim) }
160+
152161
public companion object {
153162
/**
154163
* Creates a tensor from an array with the given shape.
@@ -369,4 +378,127 @@ public class CpuBackend : ComputeBackend<FP32, Float> {
369378
val result = t.data.map { this.toFloat() / it }.toFloatArray()
370379
return CpuTensorFP32(t.shape, result)
371380
}
381+
382+
// Advanced tensor operations
383+
override fun Tensor<FP32, Float>.t(): Tensor<FP32, Float> {
384+
require(this is CpuTensorFP32) { "Tensor must be CpuTensorFP32" }
385+
require(this.shape.rank == 2) { "Transpose only supported for 2D tensors (matrices)" }
386+
387+
val rows = this.shape[0]
388+
val cols = this.shape[1]
389+
val result = FloatArray(rows * cols)
390+
391+
for (i in 0 until rows) {
392+
for (j in 0 until cols) {
393+
result[j * rows + i] = this.data[i * cols + j]
394+
}
395+
}
396+
397+
return CpuTensorFP32(Shape(cols, rows), result)
398+
}
399+
400+
override fun Tensor<FP32, Float>.relu(): Tensor<FP32, Float> {
401+
require(this is CpuTensorFP32) { "Tensor must be CpuTensorFP32" }
402+
val result = this.data.map { maxOf(0f, it) }.toFloatArray()
403+
return CpuTensorFP32(this.shape, result)
404+
}
405+
406+
override fun Tensor<FP32, Float>.sigmoid(): Tensor<FP32, Float> {
407+
require(this is CpuTensorFP32) { "Tensor must be CpuTensorFP32" }
408+
val result = this.data.map { 1f / (1f + exp(-it)) }.toFloatArray()
409+
return CpuTensorFP32(this.shape, result)
410+
}
411+
412+
override fun Tensor<FP32, Float>.tanh(): Tensor<FP32, Float> {
413+
require(this is CpuTensorFP32) { "Tensor must be CpuTensorFP32" }
414+
val result = this.data.map { tanh(it) }.toFloatArray()
415+
return CpuTensorFP32(this.shape, result)
416+
}
417+
418+
override fun Tensor<FP32, Float>.softmax(dimension: Int): Tensor<FP32, Float> {
419+
require(this is CpuTensorFP32) { "Tensor must be CpuTensorFP32" }
420+
require(dimension in 0 until this.shape.rank) { "Dimension $dimension is out of bounds for tensor with rank ${this.shape.rank}" }
421+
422+
when (this.shape.rank) {
423+
1 -> {
424+
// For 1D tensor, apply softmax across the single dimension
425+
val maxVal = this.data.maxOrNull() ?: 0f
426+
val expValues = this.data.map { exp(it - maxVal) }
427+
val sum = expValues.sum()
428+
val result = expValues.map { it / sum }.toFloatArray()
429+
return CpuTensorFP32(this.shape, result)
430+
}
431+
2 -> {
432+
// For 2D tensor (matrix), apply softmax along specified dimension
433+
val rows = this.shape[0]
434+
val cols = this.shape[1]
435+
val result = FloatArray(this.data.size)
436+
437+
if (dimension == 0) {
438+
// Apply softmax along rows (for each column)
439+
for (j in 0 until cols) {
440+
val columnValues = FloatArray(rows) { i -> this.data[i * cols + j] }
441+
val maxVal = columnValues.maxOrNull() ?: 0f
442+
val expValues = columnValues.map { exp(it - maxVal) }
443+
val sum = expValues.sum()
444+
for (i in 0 until rows) {
445+
result[i * cols + j] = expValues[i] / sum
446+
}
447+
}
448+
} else {
449+
// Apply softmax along columns (for each row)
450+
for (i in 0 until rows) {
451+
val rowStart = i * cols
452+
val rowValues = this.data.sliceArray(rowStart until rowStart + cols)
453+
val maxVal = rowValues.maxOrNull() ?: 0f
454+
val expValues = rowValues.map { exp(it - maxVal) }
455+
val sum = expValues.sum()
456+
for (j in 0 until cols) {
457+
result[rowStart + j] = expValues[j] / sum
458+
}
459+
}
460+
}
461+
return CpuTensorFP32(this.shape, result)
462+
}
463+
else -> {
464+
throw UnsupportedOperationException("Softmax not implemented for tensors with rank > 2")
465+
}
466+
}
467+
}
468+
469+
override fun Tensor<FP32, Float>.flatten(startDim: Int, endDim: Int): Tensor<FP32, Float> {
470+
require(this is CpuTensorFP32) { "Tensor must be CpuTensorFP32" }
471+
472+
val actualEndDim = if (endDim == -1) this.shape.rank - 1 else endDim
473+
require(startDim >= 0 && startDim < this.shape.rank) { "startDim $startDim is out of bounds" }
474+
require(actualEndDim >= startDim && actualEndDim < this.shape.rank) { "endDim $actualEndDim is out of bounds or less than startDim" }
475+
476+
if (startDim == actualEndDim) {
477+
// No flattening needed
478+
return CpuTensorFP32(this.shape, this.data.copyOf())
479+
}
480+
481+
// Calculate new shape
482+
val newDimensions = mutableListOf<Int>()
483+
484+
// Add dimensions before startDim
485+
for (i in 0 until startDim) {
486+
newDimensions.add(this.shape[i])
487+
}
488+
489+
// Calculate flattened dimension size
490+
var flattenedSize = 1
491+
for (i in startDim..actualEndDim) {
492+
flattenedSize *= this.shape[i]
493+
}
494+
newDimensions.add(flattenedSize)
495+
496+
// Add dimensions after endDim
497+
for (i in (actualEndDim + 1) until this.shape.rank) {
498+
newDimensions.add(this.shape[i])
499+
}
500+
501+
val newShape = Shape(*newDimensions.toIntArray())
502+
return CpuTensorFP32(newShape, this.data.copyOf())
503+
}
372504
}

0 commit comments

Comments
 (0)