@@ -112,11 +112,33 @@ export class MLP {
112112 forward_propogation ( input : number [ ] ) {
113113 this . hidden_layers . forEach ( ( layer ) => layer . forward ( input ) ) ;
114114
115- let last_hidden_layer_activations =
116- this . hidden_layers [
117- this . hidden_layers . length - 1
118- ] . get_last_layer_activations ( ) ;
119- this . output_layer . calculate_activation ( last_hidden_layer_activations ) ;
120- return this . output_layer . neurons . map ( ( n ) => n . activation ) ;
115+ this . output_layer . calculate_activation ( this . get_last_layer_activations ( ) ) ;
116+
117+ this . predictions = this . output_layer . neurons . map ( ( n ) => n . activation ) ;
118+ return this . predictions ;
119+ }
120+
121+ loss ( target : number [ ] ) {
122+ // NOTE: Assumes you have already done the Forward Propogation
123+ // NOTE: Assimes the Target is in the One Hot Encoding Format
124+
125+ let loss = 0 ;
126+ target . forEach ( ( y , i ) => {
127+ loss -= y * Math . log ( this . output_layer . neurons [ i ] . activation ) ;
128+ } ) ;
129+ return loss ;
130+ }
131+
132+ // Single Sample single adjustment
133+ backpropogate ( input : number [ ] , target : number [ ] , l_rate : number ) {
134+ // For output layer Weights Adjustments
135+ for ( const [ i , x ] of this . get_last_layer_activations ( ) . entries ( ) ) {
136+ for ( let [ j , w ] of this . output_layer . neurons [
137+ i
138+ ] . weights_prev_layer . entries ( ) ) {
139+ let gradient = x * ( this . predictions [ j ] - target [ j ] ) ;
140+ w += l_rate * gradient ;
141+ }
142+ }
121143 }
122144}
0 commit comments