Skip to content

Commit 29b1245

Browse files
committed
Implement simple utility for tensors printing
1 parent 565c2c2 commit 29b1245

6 files changed

Lines changed: 556 additions & 13 deletions

File tree

docs/modules/getting-started/nav.adoc

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,9 @@
33
* xref:index.adoc[Introduction]
44
* xref:installation.adoc[Installation & Setup]
55
* xref:basic-tensors.adoc[Basic Tensor Operations]
6+
* xref:tensor-operators.adoc[Tensor Operators]
67
* xref:matrix-operations.adoc[Matrix Operations]
78
* xref:neural-network-basics.adoc[Neural Network Basics]
9+
* xref:neural-network-api.adoc[Neural Network API]
810
* xref:data-processing.adoc[Data Processing Use Cases]
911
* xref:performance-tips.adoc[Performance Tips]
Lines changed: 381 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,381 @@
1+
= Tensor Operators
2+
:toc: left
3+
:toclevels: 3
4+
:sectanchors:
5+
:sectlinks:
6+
7+
Master the complete set of tensor operations available in SKaiNET's TensorOps API for mathematical computations and neural network operations.
8+
9+
== Element-wise Operations
10+
11+
SKaiNET provides comprehensive element-wise operations between tensors and scalars.
12+
13+
=== Tensor-Tensor Operations
14+
15+
Perform element-wise operations between tensors of compatible shapes:
16+
17+
[source,kotlin]
18+
----
19+
val backend = CpuBackend()
20+
21+
// Create sample tensors
22+
val tensorA = CpuTensorFP32.fromArray(
23+
Shape(2, 3),
24+
floatArrayOf(1f, 2f, 3f, 4f, 5f, 6f)
25+
)
26+
27+
val tensorB = CpuTensorFP32.fromArray(
28+
Shape(2, 3),
29+
floatArrayOf(2f, 3f, 4f, 5f, 6f, 7f)
30+
)
31+
32+
with(backend) {
33+
// Element-wise addition
34+
val sum = tensorA + tensorB
35+
println("A + B = ${sum.print()}")
36+
// Output: [[3, 5, 7], [9, 11, 13]]
37+
38+
// Element-wise subtraction
39+
val diff = tensorA - tensorB
40+
println("A - B = ${diff.print()}")
41+
// Output: [[-1, -1, -1], [-1, -1, -1]]
42+
43+
// Element-wise multiplication
44+
val product = tensorA * tensorB
45+
println("A * B = ${product.print()}")
46+
// Output: [[2, 6, 12], [20, 30, 42]]
47+
48+
// Element-wise division
49+
val quotient = tensorA / tensorB
50+
println("A / B = ${quotient.print()}")
51+
// Output: [[0.5, 0.67, 0.75], [0.8, 0.83, 0.86]]
52+
}
53+
----
54+
55+
=== Tensor-Scalar Operations
56+
57+
Apply scalar operations to entire tensors:
58+
59+
[source,kotlin]
60+
----
61+
val backend = CpuBackend()
62+
val tensor = CpuTensorFP32.fromArray(
63+
Shape(2, 2),
64+
floatArrayOf(1f, 2f, 3f, 4f)
65+
)
66+
67+
with(backend) {
68+
// Scalar addition
69+
val added = tensor + 10f
70+
println("Tensor + 10 = ${added.print()}")
71+
// Output: [[11, 12], [13, 14]]
72+
73+
// Scalar multiplication
74+
val scaled = tensor * 2.5f
75+
println("Tensor * 2.5 = ${scaled.print()}")
76+
// Output: [[2.5, 5.0], [7.5, 10.0]]
77+
78+
// Works with Int, Float, and Double
79+
val intAdded = tensor + 5
80+
val doubleScaled = tensor * 3.14
81+
82+
// Scalar-tensor operations (commutative)
83+
val scaledCommutative = 2f * tensor
84+
println("2 * Tensor = ${scaledCommutative.print()}")
85+
}
86+
----
87+
88+
== Matrix Operations
89+
90+
=== Matrix Multiplication
91+
92+
The fundamental linear algebra operation for neural networks:
93+
94+
[source,kotlin]
95+
----
96+
val backend = CpuBackend()
97+
98+
val A = CpuTensorFP32.fromArray(
99+
Shape(2, 3),
100+
floatArrayOf(1f, 2f, 3f, 4f, 5f, 6f)
101+
)
102+
103+
val B = CpuTensorFP32.fromArray(
104+
Shape(3, 2),
105+
floatArrayOf(7f, 8f, 9f, 10f, 11f, 12f)
106+
)
107+
108+
// Matrix multiplication
109+
val C = backend.matmul(A, B)
110+
println("A @ B = ${C.print()}")
111+
// Output: [[58, 64], [139, 154]]
112+
----
113+
114+
=== Transpose Operation
115+
116+
Transpose tensors along their last two dimensions:
117+
118+
[source,kotlin]
119+
----
120+
val backend = CpuBackend()
121+
val matrix = CpuTensorFP32.fromArray(
122+
Shape(2, 3),
123+
floatArrayOf(1f, 2f, 3f, 4f, 5f, 6f)
124+
)
125+
126+
with(backend) {
127+
val transposed = matrix.t()
128+
println("Original: ${matrix.print()}")
129+
// Output: [[1, 2, 3], [4, 5, 6]]
130+
131+
println("Transposed: ${transposed.print()}")
132+
// Output: [[1, 4], [2, 5], [3, 6]]
133+
}
134+
----
135+
136+
=== Dot Product and Scaling
137+
138+
[source,kotlin]
139+
----
140+
val backend = CpuBackend()
141+
val vectorA = CpuTensorFP32.fromArray(Shape(3), floatArrayOf(1f, 2f, 3f))
142+
val vectorB = CpuTensorFP32.fromArray(Shape(3), floatArrayOf(4f, 5f, 6f))
143+
144+
// Dot product
145+
val dotResult = backend.dot(vectorA, vectorB)
146+
println("Dot product: $dotResult") // 32.0 (1*4 + 2*5 + 3*6)
147+
148+
// Scale tensor by scalar
149+
val scaled = backend.scale(vectorA, 2.5)
150+
println("Scaled: ${scaled.print()}") // [2.5, 5.0, 7.5]
151+
----
152+
153+
== Activation Functions
154+
155+
Essential non-linear functions for neural networks.
156+
157+
=== ReLU Activation
158+
159+
Rectified Linear Unit - the most common activation function:
160+
161+
[source,kotlin]
162+
----
163+
val backend = CpuBackend()
164+
val input = CpuTensorFP32.fromArray(
165+
Shape(4),
166+
floatArrayOf(-2f, -1f, 0f, 1f, 2f)
167+
)
168+
169+
with(backend) {
170+
val activated = input.relu()
171+
println("Input: ${input.print()}")
172+
println("ReLU: ${activated.print()}")
173+
// Output: [0, 0, 0, 1, 2]
174+
}
175+
----
176+
177+
=== Sigmoid Activation
178+
179+
Sigmoid function for probability outputs:
180+
181+
[source,kotlin]
182+
----
183+
val backend = CpuBackend()
184+
val input = CpuTensorFP32.fromArray(
185+
Shape(3),
186+
floatArrayOf(-1f, 0f, 1f)
187+
)
188+
189+
with(backend) {
190+
val activated = input.sigmoid()
191+
println("Input: ${input.print()}")
192+
println("Sigmoid: ${activated.print()}")
193+
// Output: [0.268, 0.5, 0.732]
194+
}
195+
----
196+
197+
=== Tanh Activation
198+
199+
Hyperbolic tangent activation:
200+
201+
[source,kotlin]
202+
----
203+
val backend = CpuBackend()
204+
val input = CpuTensorFP32.fromArray(
205+
Shape(3),
206+
floatArrayOf(-1f, 0f, 1f)
207+
)
208+
209+
with(backend) {
210+
val activated = input.tanh()
211+
println("Input: ${input.print()}")
212+
println("Tanh: ${activated.print()}")
213+
// Output: [-0.762, 0.0, 0.762]
214+
}
215+
----
216+
217+
=== Softmax Activation
218+
219+
Softmax for multi-class classification:
220+
221+
[source,kotlin]
222+
----
223+
val backend = CpuBackend()
224+
val logits = CpuTensorFP32.fromArray(
225+
Shape(2, 3), // Batch size 2, 3 classes
226+
floatArrayOf(1f, 2f, 3f, 0.5f, 1.5f, 2.5f)
227+
)
228+
229+
with(backend) {
230+
// Apply softmax along dimension 1 (classes)
231+
val probabilities = logits.softmax(dimension = 1)
232+
println("Logits: ${logits.print()}")
233+
println("Softmax: ${probabilities.print()}")
234+
// Each row sums to 1.0
235+
}
236+
----
237+
238+
== Tensor Reshaping
239+
240+
=== Flatten Operation
241+
242+
Convert multi-dimensional tensors to 1D or flatten specific dimensions:
243+
244+
[source,kotlin]
245+
----
246+
val backend = CpuBackend()
247+
val tensor3D = CpuTensorFP32.fromArray(
248+
Shape(2, 3, 4),
249+
FloatArray(24) { it.toFloat() }
250+
)
251+
252+
with(backend) {
253+
// Flatten all dimensions
254+
val flattened = tensor3D.flatten()
255+
println("Original shape: ${tensor3D.shape}") // Shape(2, 3, 4)
256+
println("Flattened shape: ${flattened.shape}") // Shape(24)
257+
258+
// Flatten from dimension 1 onwards (keep batch dimension)
259+
val batchFlattened = tensor3D.flatten(startDim = 1)
260+
println("Batch flattened shape: ${batchFlattened.shape}") // Shape(2, 12)
261+
262+
// Flatten specific range of dimensions
263+
val partialFlattened = tensor3D.flatten(startDim = 1, endDim = 2)
264+
println("Partial flattened shape: ${partialFlattened.shape}") // Shape(2, 12)
265+
}
266+
----
267+
268+
== Practical Examples
269+
270+
=== Neural Network Forward Pass
271+
272+
Combine multiple operators for a complete neural network layer:
273+
274+
[source,kotlin]
275+
----
276+
val backend = CpuBackend()
277+
278+
// Input batch: 32 samples, 784 features (28x28 images)
279+
val input = CpuTensorFP32.fromArray(
280+
Shape(32, 784),
281+
FloatArray(32 * 784) { kotlin.random.Random.nextFloat() }
282+
)
283+
284+
// Layer weights and bias
285+
val weights = CpuTensorFP32.fromArray(
286+
Shape(128, 784),
287+
FloatArray(128 * 784) { kotlin.random.Random.nextGaussian().toFloat() * 0.1f }
288+
)
289+
val bias = CpuTensorFP32.fromArray(
290+
Shape(128),
291+
FloatArray(128) { 0f }
292+
)
293+
294+
with(backend) {
295+
// Linear transformation: W @ x^T + b
296+
val linearOutput = matmul(input, weights.t()) + bias
297+
298+
// Apply ReLU activation
299+
val activated = linearOutput.relu()
300+
301+
// Apply dropout simulation (multiply by 0.8)
302+
val dropped = activated * 0.8f
303+
304+
println("Input shape: ${input.shape}")
305+
println("Output shape: ${dropped.shape}")
306+
}
307+
----
308+
309+
=== Image Processing Pipeline
310+
311+
[source,kotlin]
312+
----
313+
val backend = CpuBackend()
314+
315+
// RGB image: 224x224x3
316+
val image = CpuTensorFP32.fromArray(
317+
Shape(224, 224, 3),
318+
FloatArray(224 * 224 * 3) { kotlin.random.Random.nextFloat() * 255f }
319+
)
320+
321+
with(backend) {
322+
// Normalize to [0, 1]
323+
val normalized = image / 255f
324+
325+
// Apply mean subtraction (ImageNet means)
326+
val meanSubtracted = normalized - CpuTensorFP32.fromArray(
327+
Shape(3),
328+
floatArrayOf(0.485f, 0.456f, 0.406f)
329+
)
330+
331+
// Flatten for fully connected layer
332+
val flattened = meanSubtracted.flatten()
333+
334+
println("Original shape: ${image.shape}")
335+
println("Processed shape: ${flattened.shape}")
336+
}
337+
----
338+
339+
== Performance Tips
340+
341+
=== Operator Chaining
342+
343+
Chain operations efficiently within the backend context:
344+
345+
[source,kotlin]
346+
----
347+
val backend = CpuBackend()
348+
val input = CpuTensorFP32.fromArray(Shape(100, 50), FloatArray(5000) { it.toFloat() })
349+
350+
with(backend) {
351+
// Efficient chaining
352+
val result = input
353+
.relu() // Apply activation
354+
.t() // Transpose
355+
.softmax(dimension = 0) // Normalize along first dimension
356+
357+
// This is more efficient than separate operations
358+
}
359+
----
360+
361+
=== Memory Considerations
362+
363+
Be mindful of tensor shapes and memory usage:
364+
365+
[source,kotlin]
366+
----
367+
// Large tensors - be careful with memory
368+
val largeTensor = CpuTensorFP32.fromArray(
369+
Shape(1000, 1000),
370+
FloatArray(1_000_000) { it.toFloat() }
371+
)
372+
373+
// Operations create new tensors - manage memory accordingly
374+
with(backend) {
375+
val processed = largeTensor
376+
.relu() // Creates new tensor
377+
.softmax(1) // Creates another new tensor
378+
379+
// Original largeTensor still exists in memory
380+
}
381+
----

0 commit comments

Comments
 (0)