|
| 1 | +#include "../include/NeuralNet/classes.h" |
| 2 | +#include "../include/console/progressbar.h" |
| 3 | +#include <chrono> // For duration types |
| 4 | +#include <cmath> // for exp and round |
| 5 | +#include <cstdlib> // for rand() and srand() |
| 6 | +#include <iomanip> // for setw |
| 7 | +#include <iostream> |
| 8 | +#include <thread> // For threads |
| 9 | + |
| 10 | +float decimal_rounder(float x) { return round(x * 100.0) / 100.0; } |
| 11 | + |
| 12 | +float get_random(float range) { |
| 13 | + return ((float)rand() / RAND_MAX) * range * 2 - range; |
| 14 | +} |
| 15 | + |
| 16 | +template <typename T> |
| 17 | +void printInColor(const T &val, const std::string &colorCode) { |
| 18 | + std::cout << "\033[" << colorCode << "m" << val << "\033[0m"; |
| 19 | +} |
| 20 | + |
| 21 | +namespace NeuralNet { |
| 22 | + |
| 23 | +Layer::Layer(int size, int prev_layer_size, activations activation_function) { |
| 24 | + const float range = 0.01; |
| 25 | + |
| 26 | + this->size = size; |
| 27 | + this->prev_layer_size = prev_layer_size; |
| 28 | + this->activation_function = activation_function; |
| 29 | + |
| 30 | + this->activation = new float[size](); |
| 31 | + this->z = new float[size](); |
| 32 | + this->bias = new float[size]; |
| 33 | + this->weights = new float *[size]; |
| 34 | + for (int i = 0; i < size; i++) { |
| 35 | + this->bias[i] = get_random(range); |
| 36 | + this->weights[i] = new float[prev_layer_size]; |
| 37 | + for (int j = 0; j < prev_layer_size; j++) { |
| 38 | + this->weights[i][j] = get_random(range); |
| 39 | + } |
| 40 | + } |
| 41 | +} |
| 42 | +Layer::~Layer() { |
| 43 | + delete[] this->activation; |
| 44 | + delete[] this->z; |
| 45 | + delete[] this->bias; |
| 46 | + for (int i = 0; i < this->size; i++) { |
| 47 | + delete[] this->weights[i]; |
| 48 | + } |
| 49 | + delete[] this->weights; |
| 50 | +} |
| 51 | + |
| 52 | +void Layer::sigmoid() { |
| 53 | + for (int i = 0; i < this->size; i++) { |
| 54 | + this->activation[i] = 1 / (1 + exp(-this->z[i])); |
| 55 | + } |
| 56 | +} |
| 57 | +void Layer::relu() { |
| 58 | + for (int i = 0; i < this->size; i++) { |
| 59 | + this->activation[i] = this->z[i] > 0 ? this->z[i] : 0; |
| 60 | + } |
| 61 | +} |
| 62 | +void Layer::softmax() { |
| 63 | + float sum = 0; |
| 64 | + for (int i = 0; i < this->size; i++) { |
| 65 | + sum += exp(this->z[i]); |
| 66 | + } |
| 67 | + for (int i = 0; i < size; i++) { |
| 68 | + this->activation[i] = exp(this->z[i]) / sum; |
| 69 | + } |
| 70 | +} |
| 71 | + |
| 72 | +void Layer::forward_pass(const float *inputs) { |
| 73 | + for (int i = 0; i < this->size; i++) { |
| 74 | + for (int j = 0; j < this->prev_layer_size; j++) { |
| 75 | + this->z[i] += this->weights[i][j] * inputs[j]; |
| 76 | + } |
| 77 | + this->z[i] += this->bias[i]; |
| 78 | + |
| 79 | + switch (this->activation_function) { |
| 80 | + case activations::relu: |
| 81 | + this->relu(); |
| 82 | + break; |
| 83 | + case activations::sigmoid: |
| 84 | + this->sigmoid(); |
| 85 | + break; |
| 86 | + case activations::softmax: |
| 87 | + this->softmax(); |
| 88 | + break; |
| 89 | + } |
| 90 | + } |
| 91 | +} |
| 92 | + |
| 93 | +MLP::MLP(int input_layer_size, int hidden_layers_count, int *hidden_layer_sizes, |
| 94 | + int output_layer_size) { |
| 95 | + this->input_layer_size = input_layer_size; |
| 96 | + this->hidden_layers_count = hidden_layers_count; |
| 97 | + this->hidden_layer_sizes = hidden_layer_sizes; |
| 98 | + this->output_layer_size = output_layer_size; |
| 99 | + this->predictions = nullptr; |
| 100 | + this->layers = new Layer *[hidden_layers_count + 1]; // +1 for output layer |
| 101 | + |
| 102 | + // Initialize Hidden layers |
| 103 | + for (int i = 0; i < hidden_layers_count; i++) { |
| 104 | + this->layers[i] = |
| 105 | + new Layer(hidden_layer_sizes[i], |
| 106 | + (i - 1 < 0) ? input_layer_size : hidden_layer_sizes[i - 1], |
| 107 | + activations::sigmoid); |
| 108 | + } |
| 109 | + // initialize output layer |
| 110 | + this->layers[this->hidden_layers_count] = new Layer( |
| 111 | + output_layer_size, hidden_layer_sizes[this->hidden_layers_count - 1], |
| 112 | + activations::softmax); |
| 113 | +} |
| 114 | +MLP::~MLP() { |
| 115 | + for (int i = 0; i < this->hidden_layers_count; i++) { |
| 116 | + delete[] this->layers[i]; |
| 117 | + } |
| 118 | + delete[] this->layers; |
| 119 | +} |
| 120 | + |
| 121 | +void MLP::describe() { |
| 122 | + // Using ANSI escape codes to make the terminal output colorful |
| 123 | + printInColor("\n+-----------------------------------------+\n", |
| 124 | + "32"); // Green |
| 125 | + printInColor("| Neural Network |\n", "32"); |
| 126 | + printInColor("+-----------------------------------------+\n", "32"); |
| 127 | + |
| 128 | + std::cout << "Layer Count : " << this->hidden_layers_count + 1 << std::endl; |
| 129 | + printInColor("Layer Sizes: \n", "36"); // Cyan |
| 130 | + |
| 131 | + std::cout << std::setw(4) << this->input_layer_size << " | "; |
| 132 | + for (int i = 0; i < this->hidden_layers_count; i++) { |
| 133 | + std::cout << std::setw(4) |
| 134 | + << this->hidden_layer_sizes[i]; // Formatting for better spacing |
| 135 | + std::cout << " | "; |
| 136 | + // Breaking line for better readability every 10 layers |
| 137 | + if ((i + 1) % 6 == 0) { |
| 138 | + std::cout << std::endl; |
| 139 | + } |
| 140 | + } |
| 141 | + std::cout << std::setw(4) |
| 142 | + << this->output_layer_size; // Formatting for better spacing |
| 143 | + std::cout << " | "; |
| 144 | + // Breaking line for better readability every 10 layers |
| 145 | + std::cout << std::endl << std::endl; |
| 146 | +} |
| 147 | +void MLP::print_parameters_count() { |
| 148 | + int weightsCount = this->input_layer_size; |
| 149 | + int prevLayerNeuronsCount = this->input_layer_size; |
| 150 | + int biasesCount = 0; |
| 151 | + |
| 152 | + for (int i = 0; i < this->hidden_layers_count; i++) { |
| 153 | + weightsCount += this->hidden_layer_sizes[i] * prevLayerNeuronsCount; |
| 154 | + prevLayerNeuronsCount = this->hidden_layer_sizes[i]; |
| 155 | + biasesCount += this->hidden_layer_sizes[i]; |
| 156 | + } |
| 157 | + std::cout << "Weights Count : " << weightsCount << std::endl; |
| 158 | + std::cout << "Biases Count : " << biasesCount << std::endl; |
| 159 | +} |
| 160 | + |
| 161 | +void MLP::feed_forward(float *inputs) { |
| 162 | + this->layers[0]->forward_pass(inputs); |
| 163 | + const float *intermediate_activations = this->layers[0]->activation; |
| 164 | + for (int i = 1; i <= this->hidden_layers_count; |
| 165 | + i++) { // "<=" to account for the output layer |
| 166 | + Layer *c_layer = this->layers[i]; |
| 167 | + c_layer->forward_pass(intermediate_activations); |
| 168 | + intermediate_activations = c_layer->activation; |
| 169 | + } |
| 170 | + this->predictions = intermediate_activations; |
| 171 | +} |
| 172 | +void MLP::predict(float **feature_samples, float **target_samples, |
| 173 | + int samples_count) { |
| 174 | + float accuracy = 0; |
| 175 | + for (int j = 0; j < samples_count; j++) { |
| 176 | + std::cout << "Inputs : "; |
| 177 | + for (int i = 0; i < this->input_layer_size; i++) { |
| 178 | + if (i != 0) { |
| 179 | + std::cout << ", "; |
| 180 | + } |
| 181 | + const float result = decimal_rounder(feature_samples[j][i]); |
| 182 | + std::cout << result; |
| 183 | + if (i == this->input_layer_size - 1) { |
| 184 | + std::cout << std::endl; |
| 185 | + } |
| 186 | + } |
| 187 | + this->feed_forward(feature_samples[j]); |
| 188 | + const int outputSize = this->output_layer_size; |
| 189 | + // Get highest Output |
| 190 | + float max = this->predictions[0]; |
| 191 | + int maxIndex = 0; |
| 192 | + for (int i = 1; i < outputSize; i++) { |
| 193 | + if (this->predictions[i] > max) { |
| 194 | + max = this->predictions[i]; |
| 195 | + maxIndex = i; |
| 196 | + } |
| 197 | + } |
| 198 | + std::cout << "Outputs : \n"; |
| 199 | + for (int i = 0; i < outputSize; i++) { |
| 200 | + const float result = decimal_rounder(this->predictions[i]); |
| 201 | + const char *color = "32"; // Green |
| 202 | + if (i != maxIndex) { |
| 203 | + color = "31"; |
| 204 | + } // Red |
| 205 | + else if (maxIndex == i && target_samples[j][i] != 1) { |
| 206 | + color = "33"; |
| 207 | + } // Yellow |
| 208 | + else if (maxIndex == i && target_samples[j][i] == 1) { |
| 209 | + accuracy += 100.0 / samples_count; |
| 210 | + } |
| 211 | + std::cout << "\033[" << color << "m" << "[" << i << "] : " << result |
| 212 | + << " => " << target_samples[j][i] << "\033[0m"; |
| 213 | + std::cout << std::endl; |
| 214 | + } |
| 215 | + } |
| 216 | + std::cout << "Accuracy : " << accuracy << "%" << std::endl; |
| 217 | + ; |
| 218 | +} |
| 219 | + |
| 220 | +} // namespace NeuralNet |
0 commit comments