11package sk.ainet.core.tensor.backend
22
3- import sk.ai.net.core.tensor.*
4- import sk.ai.net.core.tensor.backend.ComputeBackend
3+ import sk.ainet.core.tensor.*
4+ import sk.ainet.core.tensor.backend.ComputeBackend
5+ import kotlin.math.*
56
67/* *
78 * Convenient type alias for FP32 tensors with Float values.
@@ -149,6 +150,14 @@ public class CpuTensorFP32(
149150 override fun Double.times (t : Tensor <FP32 , Float >): Tensor <FP32 , Float > = with (backend) { this @times.times(t) }
150151 override fun Double.div (t : Tensor <FP32 , Float >): Tensor <FP32 , Float > = with (backend) { this @div.div(t) }
151152
153+ // Advanced tensor operations - delegate to backend
154+ override fun Tensor <FP32 , Float >.t (): Tensor <FP32 , Float > = with (backend) { this @t.t() }
155+ override fun Tensor <FP32 , Float >.relu (): Tensor <FP32 , Float > = with (backend) { this @relu.relu() }
156+ override fun Tensor <FP32 , Float >.sigmoid (): Tensor <FP32 , Float > = with (backend) { this @sigmoid.sigmoid() }
157+ override fun Tensor <FP32 , Float >.tanh (): Tensor <FP32 , Float > = with (backend) { this @tanh.tanh() }
158+ override fun Tensor <FP32 , Float >.softmax (dimension : Int ): Tensor <FP32 , Float > = with (backend) { this @softmax.softmax(dimension) }
159+ override fun Tensor <FP32 , Float >.flatten (startDim : Int , endDim : Int ): Tensor <FP32 , Float > = with (backend) { this @flatten.flatten(startDim, endDim) }
160+
152161 public companion object {
153162 /* *
154163 * Creates a tensor from an array with the given shape.
@@ -369,4 +378,127 @@ public class CpuBackend : ComputeBackend<FP32, Float> {
369378 val result = t.data.map { this .toFloat() / it }.toFloatArray()
370379 return CpuTensorFP32 (t.shape, result)
371380 }
381+
382+ // Advanced tensor operations
383+ override fun Tensor <FP32 , Float >.t (): Tensor <FP32 , Float > {
384+ require(this is CpuTensorFP32 ) { " Tensor must be CpuTensorFP32" }
385+ require(this .shape.rank == 2 ) { " Transpose only supported for 2D tensors (matrices)" }
386+
387+ val rows = this .shape[0 ]
388+ val cols = this .shape[1 ]
389+ val result = FloatArray (rows * cols)
390+
391+ for (i in 0 until rows) {
392+ for (j in 0 until cols) {
393+ result[j * rows + i] = this .data[i * cols + j]
394+ }
395+ }
396+
397+ return CpuTensorFP32 (Shape (cols, rows), result)
398+ }
399+
400+ override fun Tensor <FP32 , Float >.relu (): Tensor <FP32 , Float > {
401+ require(this is CpuTensorFP32 ) { " Tensor must be CpuTensorFP32" }
402+ val result = this .data.map { maxOf(0f , it) }.toFloatArray()
403+ return CpuTensorFP32 (this .shape, result)
404+ }
405+
406+ override fun Tensor <FP32 , Float >.sigmoid (): Tensor <FP32 , Float > {
407+ require(this is CpuTensorFP32 ) { " Tensor must be CpuTensorFP32" }
408+ val result = this .data.map { 1f / (1f + exp(- it)) }.toFloatArray()
409+ return CpuTensorFP32 (this .shape, result)
410+ }
411+
412+ override fun Tensor <FP32 , Float >.tanh (): Tensor <FP32 , Float > {
413+ require(this is CpuTensorFP32 ) { " Tensor must be CpuTensorFP32" }
414+ val result = this .data.map { tanh(it) }.toFloatArray()
415+ return CpuTensorFP32 (this .shape, result)
416+ }
417+
418+ override fun Tensor <FP32 , Float >.softmax (dimension : Int ): Tensor <FP32 , Float > {
419+ require(this is CpuTensorFP32 ) { " Tensor must be CpuTensorFP32" }
420+ require(dimension in 0 until this .shape.rank) { " Dimension $dimension is out of bounds for tensor with rank ${this .shape.rank} " }
421+
422+ when (this .shape.rank) {
423+ 1 -> {
424+ // For 1D tensor, apply softmax across the single dimension
425+ val maxVal = this .data.maxOrNull() ? : 0f
426+ val expValues = this .data.map { exp(it - maxVal) }
427+ val sum = expValues.sum()
428+ val result = expValues.map { it / sum }.toFloatArray()
429+ return CpuTensorFP32 (this .shape, result)
430+ }
431+ 2 -> {
432+ // For 2D tensor (matrix), apply softmax along specified dimension
433+ val rows = this .shape[0 ]
434+ val cols = this .shape[1 ]
435+ val result = FloatArray (this .data.size)
436+
437+ if (dimension == 0 ) {
438+ // Apply softmax along rows (for each column)
439+ for (j in 0 until cols) {
440+ val columnValues = FloatArray (rows) { i -> this .data[i * cols + j] }
441+ val maxVal = columnValues.maxOrNull() ? : 0f
442+ val expValues = columnValues.map { exp(it - maxVal) }
443+ val sum = expValues.sum()
444+ for (i in 0 until rows) {
445+ result[i * cols + j] = expValues[i] / sum
446+ }
447+ }
448+ } else {
449+ // Apply softmax along columns (for each row)
450+ for (i in 0 until rows) {
451+ val rowStart = i * cols
452+ val rowValues = this .data.sliceArray(rowStart until rowStart + cols)
453+ val maxVal = rowValues.maxOrNull() ? : 0f
454+ val expValues = rowValues.map { exp(it - maxVal) }
455+ val sum = expValues.sum()
456+ for (j in 0 until cols) {
457+ result[rowStart + j] = expValues[j] / sum
458+ }
459+ }
460+ }
461+ return CpuTensorFP32 (this .shape, result)
462+ }
463+ else -> {
464+ throw UnsupportedOperationException (" Softmax not implemented for tensors with rank > 2" )
465+ }
466+ }
467+ }
468+
469+ override fun Tensor <FP32 , Float >.flatten (startDim : Int , endDim : Int ): Tensor <FP32 , Float > {
470+ require(this is CpuTensorFP32 ) { " Tensor must be CpuTensorFP32" }
471+
472+ val actualEndDim = if (endDim == - 1 ) this .shape.rank - 1 else endDim
473+ require(startDim >= 0 && startDim < this .shape.rank) { " startDim $startDim is out of bounds" }
474+ require(actualEndDim >= startDim && actualEndDim < this .shape.rank) { " endDim $actualEndDim is out of bounds or less than startDim" }
475+
476+ if (startDim == actualEndDim) {
477+ // No flattening needed
478+ return CpuTensorFP32 (this .shape, this .data.copyOf())
479+ }
480+
481+ // Calculate new shape
482+ val newDimensions = mutableListOf<Int >()
483+
484+ // Add dimensions before startDim
485+ for (i in 0 until startDim) {
486+ newDimensions.add(this .shape[i])
487+ }
488+
489+ // Calculate flattened dimension size
490+ var flattenedSize = 1
491+ for (i in startDim.. actualEndDim) {
492+ flattenedSize * = this .shape[i]
493+ }
494+ newDimensions.add(flattenedSize)
495+
496+ // Add dimensions after endDim
497+ for (i in (actualEndDim + 1 ) until this .shape.rank) {
498+ newDimensions.add(this .shape[i])
499+ }
500+
501+ val newShape = Shape (* newDimensions.toIntArray())
502+ return CpuTensorFP32 (newShape, this .data.copyOf())
503+ }
372504}
0 commit comments