Skip to content

Commit fd8a474

Browse files
committed
Add initial gradle modules structure for reflective Ops documentaion
Related-To #139
1 parent dc21310 commit fd8a474

23 files changed

Lines changed: 1462 additions & 1 deletion

File tree

settings.gradle.kts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,3 +17,6 @@ rootProject.name = "SKaiNET"
1717

1818
include("skainet-lang:skainet-lang-core")
1919
include("skainet-lang:skainet-lang-models")
20+
include("skainet-lang:skainet-lang-ksp-annotations")
21+
include("skainet-lang:skainet-lang-ksp-processor")
22+
include("skainet-lang:skainet-lang-export-ops")
Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
package sk.ainet.compile.nn
2+
3+
import sk.ainet.compile.nn.DefaultNetworkContext
4+
import sk.ainet.lang.nn.dsl.NeuralNetworkDsl
5+
import sk.ainet.lang.nn.dsl.network
6+
7+
8+
import sk.ainet.lang.nn.Module
9+
import sk.ainet.lang.tensor.data.DenseTensorDataFactory
10+
import sk.ainet.lang.tensor.data.TensorDataFactory
11+
import sk.ainet.lang.types.DType
12+
13+
14+
/**
15+
* Context for the DSL to define the data type and operations.
16+
*
17+
* This class holds the information about the data type and operations
18+
* that should be used in the DSL. It's used to make the DSL generic
19+
* and to avoid hardcoding the data type.
20+
*
21+
* @param T The default data type.
22+
*/
23+
public interface NeuralNetworkContext<T: DType, V> {
24+
25+
public val tensorDataFactory: TensorDataFactory
26+
27+
}
28+
29+
/**
30+
* Creates a context for the DSL with the given configuration.
31+
*
32+
* @param T The type of data processed by the modules.
33+
* @param init The configuration function.
34+
* @return The configured context.
35+
*/
36+
public fun <T: DType, V> context(init: NeuralNetworkContext<T, V>.(NeuralNetworkContext<T, V>) -> Module<T, V>): Module<T, V> {
37+
val instance = DefaultNetworkContext<T, V>()
38+
return instance.init(instance)
39+
}
40+
41+
/**
42+
* Extension function to create a network within a NetworkContext.
43+
* This bridges the context wrapper with the network DSL using the context's tensor factory.
44+
*/
45+
public inline fun <reified T: DType, V> NeuralNetworkContext<T, V>.network(
46+
content: NeuralNetworkDsl<T, V>.() -> Unit
47+
): Module<T, V> = network(tensorDataFactory, content)
48+
49+
public class DefaultNetworkContext<T : DType, V> : NeuralNetworkContext<T, V> {
50+
override val tensorDataFactory: TensorDataFactory
51+
get() = DenseTensorDataFactory()
52+
}
Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
package sk.ainet.context
2+
3+
import sk.ainet.lang.tensor.ops.TensorOps
4+
5+
6+
public interface ExecutionContext<V> {
7+
public val ops: TensorOps<V>
8+
}
Lines changed: 70 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,70 @@
1+
plugins {
2+
alias(libs.plugins.kotlinMultiplatform)
3+
alias(libs.plugins.ksp)
4+
}
5+
6+
7+
group = "org.mikrograd.samples"
8+
9+
kotlin {
10+
11+
compilerOptions {
12+
// Common compiler options applied to all Kotlin source sets
13+
freeCompilerArgs.add("-Xexpect-actual-classes")
14+
freeCompilerArgs.add("-Xmulti-platform")
15+
}
16+
17+
jvmToolchain(17)
18+
19+
jvm()
20+
21+
22+
sourceSets {
23+
commonMain.dependencies {
24+
implementation(project(":miKrograd"))
25+
}
26+
27+
commonTest.dependencies {
28+
implementation(kotlin("test-common"))
29+
implementation(kotlin("test-annotations-common"))
30+
}
31+
32+
val jvmMain by getting {
33+
kotlin.srcDir("build/generated/ksp/jvm/jvmMain/kotlin")
34+
dependencies {
35+
implementation(project(":miKrograd-annotations"))
36+
}
37+
}
38+
39+
40+
41+
jvmTest.dependencies {
42+
implementation(kotlin("test-junit"))
43+
}
44+
}
45+
}
46+
47+
dependencies {
48+
// add("kspCommonMainMetadata", project(":test-processor"))
49+
add("kspJvm", project(":skainet-lang:skainet-lang-ksp-processor"))
50+
}
51+
52+
ksp {
53+
arg("ksp.verbose", "true")
54+
}
55+
56+
// Add a run task for the JVM application
57+
tasks.register<JavaExec>("runJvm") {
58+
group = "application"
59+
description = "Run the JVM application"
60+
classpath = files(kotlin.jvm().compilations["main"].output.allOutputs, configurations.getByName("jvmRuntimeClasspath"))
61+
mainClass.set("com.example.MainKt")
62+
}
63+
64+
// Add a run task for the KspMain application
65+
tasks.register<JavaExec>("runKspMain") {
66+
group = "application"
67+
description = "Run the KspMain application"
68+
classpath = files(kotlin.jvm().compilations["main"].output.allOutputs, configurations.getByName("jvmRuntimeClasspath"))
69+
mainClass.set("com.example.KspMainKt")
70+
}
Lines changed: 71 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,71 @@
1+
package org.mikrograd.data.generator
2+
3+
import kotlin.math.cos
4+
import kotlin.math.sin
5+
import kotlin.random.Random
6+
7+
fun makeMoons(
8+
nSamples: Any = 100,
9+
shuffle: Boolean = true,
10+
noise: Double? = null,
11+
randomState: Int? = null
12+
): Pair<Array<DoubleArray>, IntArray> {
13+
val (nSamplesOut, nSamplesIn) = when (nSamples) {
14+
is Int -> Pair(nSamples / 2, nSamples - nSamples / 2)
15+
is Pair<*, *> -> {
16+
if (nSamples.first is Int && nSamples.second is Int) {
17+
Pair(nSamples.first as Int, nSamples.second as Int)
18+
} else {
19+
throw IllegalArgumentException("`n_samples` can be either an int or a two-element tuple.")
20+
}
21+
}
22+
else -> throw IllegalArgumentException("`n_samples` can be either an int or a two-element tuple.")
23+
}
24+
25+
val generator = randomState?.let { Random(it) } ?: Random.Default
26+
27+
val outerCircX = DoubleArray(nSamplesOut) { cos(it * Math.PI / nSamplesOut) }
28+
val outerCircY = DoubleArray(nSamplesOut) { sin(it * Math.PI / nSamplesOut) }
29+
val innerCircX = DoubleArray(nSamplesIn) { 1 - cos(it * Math.PI / nSamplesIn) }
30+
val innerCircY = DoubleArray(nSamplesIn) { 1 - sin(it * Math.PI / nSamplesIn) - 0.5 }
31+
32+
val X = Array(nSamplesOut + nSamplesIn) { DoubleArray(2) }
33+
for (i in 0 until nSamplesOut) {
34+
X[i][0] = outerCircX[i]
35+
X[i][1] = outerCircY[i]
36+
}
37+
for (i in 0 until nSamplesIn) {
38+
X[nSamplesOut + i][0] = innerCircX[i]
39+
X[nSamplesOut + i][1] = innerCircY[i]
40+
}
41+
42+
val y = IntArray(nSamplesOut + nSamplesIn)
43+
for (i in 0 until nSamplesOut) {
44+
y[i] = 0
45+
}
46+
for (i in 0 until nSamplesIn) {
47+
y[nSamplesOut + i] = 1
48+
}
49+
50+
if (shuffle) {
51+
val indices = X.indices.toList().shuffled(generator)
52+
val XShuffled = Array(X.size) { DoubleArray(2) }
53+
val yShuffled = IntArray(y.size)
54+
for (i in indices.indices) {
55+
XShuffled[i] = X[indices[i]]
56+
yShuffled[i] = y[indices[i]]
57+
}
58+
X.indices.forEach { X[it] = XShuffled[it] }
59+
y.indices.forEach { y[it] = yShuffled[it] }
60+
}
61+
62+
noise?.let {
63+
for (i in X.indices) {
64+
X[i][0] += generator.nextDouble(-noise, noise)
65+
X[i][1] += generator.nextDouble(-noise, noise)
66+
}
67+
}
68+
69+
return Pair(X, y)
70+
}
71+
Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
package org.mikrograd.samples
2+
/*
3+
import org.mikrograd.diff.MLP
4+
import kotlin.random.Random
5+
import org.mikrograd.diff.Value
6+
7+
8+
class MLPClustering(private val data: Pair<Array<DoubleArray>, IntArray>, val model: MLP ) {
9+
private val X: Array<DoubleArray> = data.first
10+
private val y: IntArray = data.second
11+
12+
fun loss(batchSize: Int? = null): Pair<Value, Double> {
13+
val (Xb, yb) = if (batchSize == null) {
14+
Pair(X.toList(), y.toList())
15+
} else {
16+
val ri = List(batchSize) { Random.nextInt(X.size) }
17+
Pair(ri.map { X[it] }, ri.map { y[it] })
18+
}
19+
val xc: List<DoubleArray> = Xb
20+
val inputs: List<List<Value>> = Xb.map { xrow -> xrow.map { Value(it) } }
21+
22+
val scores: List<Value> = inputs.flatMap { input -> model.invoke (input) }
23+
24+
//losses = [(1 + -yi*scorei).relu() for yi, scorei in zip(yb, scores)]
25+
26+
27+
val losses: List<Value> = yb.zip(scores).map { (yi, scorei) -> Value(1 + -yi * scorei.data).relu() }
28+
val lossesSum: Value = losses.fold(Value(0.0)) { a, i -> a + i }
29+
30+
val dataLoss: Value = lossesSum / losses.size
31+
32+
val alpha = 1e-4
33+
val regLoss: Value = alpha * (model.parameters().reduce { a, i -> a * i })
34+
//val reg_loss = alpha * sum((p*p for p in model.parameters()))
35+
val totalLoss = dataLoss + regLoss
36+
37+
val accuracy = yb.zip(scores).count { (yi, scorei) -> (yi > 0) == (scorei.data > 0) }.toDouble() / yb.size
38+
39+
return Pair(totalLoss, accuracy)
40+
}
41+
42+
43+
}
44+
45+
*/
Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
package org.mikrograd.samples
2+
3+
/*
4+
import org.mikrograd.diff.MLP
5+
import org.mikrograd.diff.Value
6+
import org.mikrograd.utils.drawDot
7+
8+
fun loss(X: Array<DoubleArray>, y: DoubleArray, model: MLP): Pair<Value, Double> {
9+
val inputs: List<List<Value>> = X.mapIndexed { index, xrow -> xrow.map { Value(it, label = "in$index") } }
10+
val scores: List<Value> = inputs.map { input -> model(input).first() }
11+
12+
// mean square error
13+
val values: List<Value> =
14+
y.zip(scores) { actual: Double, predicted: Value -> (actual - predicted).pow(2.0) }
15+
val mse = values.fold(Value(0.0)) { acc, value -> acc + value }
16+
17+
return Pair(mse, 0.0)
18+
}
19+
20+
fun main() {
21+
22+
val c = Value(3.0, label = "a") + Value(2.0, label = "b")
23+
val cGr = drawDot(c)
24+
cGr.toFile("a+b.dot")
25+
26+
val d = Value(3.0, label = "a") * Value(2.0, label = "b")
27+
val dGr = drawDot(d)
28+
dGr.toFile("a*b.dot")
29+
30+
31+
val model = MLP(1, listOf(1, 1, 1)) //# 2-layer neural network
32+
val (X, y) = Pair<Array<DoubleArray>, DoubleArray>(
33+
arrayOf(doubleArrayOf(1.0)),
34+
doubleArrayOf(2.0)
35+
)
36+
37+
val X_v: List<List<Value>> = X.map { xrow -> xrow.map { Value(it) } }
38+
val prediction = model.invoke(X_v[0])[0]
39+
40+
val modelGr = drawDot(prediction)
41+
modelGr.toFile("model.dot")
42+
43+
val (loss, _) = loss(X, y, model)
44+
45+
val lossGr = drawDot(loss)
46+
lossGr.toFile("loss.dot")
47+
48+
model.zeroGrad()
49+
loss.backward()
50+
51+
val backGr = drawDot(loss, true)
52+
backGr.toFile("back.dot")
53+
54+
55+
56+
}
57+
58+
*/
Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
package org.mikrograd.samples
2+
3+
/*
4+
import org.mikrograd.diff.Neuron
5+
import org.mikrograd.diff.Value
6+
import org.mikrograd.utils.drawDot
7+
8+
fun main() {
9+
val neuralNetwork = Neuron(2)
10+
//parameters
11+
val x = listOf(Value(1.0), Value(-2.0))
12+
val y = neuralNetwork(x)
13+
drawDot(y).toFile("neuron.png")
14+
}
15+
16+
*/
Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
package org.mikrograd.samples
2+
3+
import org.mikrograd.diff.MLP
4+
import org.mikrograd.diff.Value
5+
import kotlin.math.PI
6+
import kotlin.math.sin
7+
8+
fun train(sine: MLP) {
9+
10+
//val X = listOf(0.0, PI / 2, PI)
11+
val X = List(100) { index ->
12+
(index / (100 - 1).toFloat()) * (PI / 2)
13+
}
14+
15+
val y = X.mapIndexed { index, value -> Value(sin(value)).also { it.label = "y$index" } }
16+
17+
18+
19+
(1..100).forEach {
20+
// forward propagation
21+
val ypred: List<List<Value>> = X.mapIndexed { index, x ->
22+
sine.invoke(listOf(x))//.also { it[0].label = "y_pred$index" }
23+
}
24+
// calculate loss
25+
val loss: Value = y.zip(ypred) { ygt, yout -> (ygt - yout[0]).pow(2.0) }.reduce { acc, v -> acc + v }
26+
// reset gradients
27+
sine.parameters().forEach { param ->
28+
param.grad = 0.0
29+
}
30+
// calc gradients in backpropagation
31+
loss.backward()
32+
33+
// update weights and biases with a learning rate
34+
sine.parameters().forEach { param ->
35+
param.data += -0.1 * param.grad
36+
}
37+
38+
println(loss.data)
39+
}
40+
}
41+
42+
fun MLP.nsin(d: Double) = invoke(listOf(d))[0].data
43+
44+
45+
fun main() {
46+
val sine = MLP(1, listOf(16, 16, 1))
47+
48+
println(sine.nsin(PI / 2))
49+
train(sine)
50+
println(sine.nsin(PI / 2))
51+
}
52+
53+

0 commit comments

Comments
 (0)