Skip to content

Commit f912578

Browse files
feat(backpropagation): implement hidden-layer gradient propagation with weight and bias updates
1 parent deee8a0 commit f912578

1 file changed

Lines changed: 45 additions & 6 deletions

File tree

try1 (OOP Approach)/typescript/NeuralNet.ts

Lines changed: 45 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ class hidden_layer_neuron {
44
activation: number;
55
weights_prev_layer: number[];
66
bias: number;
7+
z: number;
78
constructor(prev_layer_neurons: number) {
89
this.activation = 0;
910
this.weights_prev_layer = Array.from({ length: prev_layer_neurons }, () =>
@@ -17,6 +18,7 @@ class hidden_layer_neuron {
1718
z += x * this.weights_prev_layer[i];
1819
});
1920
z += this.bias;
21+
this.z = z;
2022
this.activation = sigmoid(z);
2123
return this.activation;
2224
}
@@ -145,13 +147,50 @@ export class MLP {
145147
backpropogate(input: number[], target: number[], l_rate: number) {
146148
this.forward_propogation(input);
147149

148-
// For output layer Weights Adjustments
149-
for (const [i, x] of this.get_last_hidden_layer_activations().entries()) {
150-
this.output_layer.neurons[i].weights_prev_layer =
151-
this.output_layer.neurons[i].weights_prev_layer.map((w, j) => {
152-
let gradient = x * (this.predictions[j] - target[j]);
153-
return w - l_rate * gradient;
150+
// For output layer Weights & Bias Adjustments
151+
let a_prev = this.get_last_hidden_layer_activations();
152+
let output_layer_deltas: number[] = [];
153+
this.output_layer.neurons.forEach((neuron, i) => {
154+
output_layer_deltas[i] = this.predictions[i] - target[i];
155+
neuron.weights_prev_layer = neuron.weights_prev_layer.map(
156+
(w, j) => w - l_rate * a_prev[j] * output_layer_deltas[i],
157+
);
158+
neuron.bias -= l_rate * output_layer_deltas[i];
159+
});
160+
161+
// for hidden layer Weights Adjustment
162+
let next_layer_deltas = output_layer_deltas;
163+
for (
164+
let layer_idx = this.hidden_layers.length - 1;
165+
layer_idx >= 0;
166+
layer_idx--
167+
) {
168+
const current_layer = this.hidden_layers[layer_idx];
169+
const next_layer =
170+
layer_idx == this.hidden_layers.length - 1
171+
? this.output_layer
172+
: this.hidden_layers[layer_idx + 1];
173+
const a_prev =
174+
layer_idx == 0
175+
? input
176+
: this.hidden_layers[layer_idx - 1].neurons.map((n) => n.activation);
177+
const current_layer_deltas: number[] = [];
178+
current_layer.neurons.forEach((n, i) => {
179+
let error_sum = 0;
180+
next_layer.neurons.forEach((n_next, j) => {
181+
error_sum += next_layer_deltas[j] * n_next.weights_prev_layer[i];
154182
});
183+
184+
const activation_derivative = n.z > 0 ? 1 : 0;
185+
const delta = activation_derivative * error_sum;
186+
current_layer_deltas.push(delta);
187+
188+
n.weights_prev_layer = n.weights_prev_layer.map(
189+
(w, j) => w - l_rate * a_prev[j] * delta,
190+
);
191+
n.bias -= l_rate * delta;
192+
});
193+
next_layer_deltas = current_layer_deltas;
155194
}
156195
}
157196
}

0 commit comments

Comments
 (0)