Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ add_subdirectory(graphics)
add_subdirectory(greedy_algorithms)
add_subdirectory(hashing)
add_subdirectory(machine_learning)
add_subdirectory(advance_maths)
add_subdirectory(math)
add_subdirectory(numerical_methods)
add_subdirectory(operations_on_datastructures)
Expand Down
11 changes: 11 additions & 0 deletions advance_maths/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
file( GLOB APP_SOURCES RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} *.cpp )
foreach( testsourcefile ${APP_SOURCES} )
string( REPLACE ".cpp" "" testname ${testsourcefile} )
add_executable( ${testname} ${testsourcefile} )

set_target_properties(${testname} PROPERTIES LINKER_LANGUAGE CXX)
if(OpenMP_CXX_FOUND)
target_link_libraries(${testname} OpenMP::OpenMP_CXX)
endif()
install(TARGETS ${testname} DESTINATION "bin/advance_maths")
endforeach( testsourcefile ${APP_SOURCES} )
67 changes: 67 additions & 0 deletions advance_maths/calculus.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
/**
* @file
* @brief Basic and intermediate calculus utilities.
*/
#include <cassert>
#include <cmath>
#include <functional>
#include <iostream>
#include <vector>

namespace advance_maths::calculus {
using Vector = std::vector<double>;

double derivative(const std::function<double(double)>& f, double x, double h = 1e-5) {
return (f(x + h) - f(x - h)) / (2.0 * h);
}

double second_derivative(const std::function<double(double)>& f, double x, double h = 1e-5) {
return (f(x + h) - 2.0 * f(x) + f(x - h)) / (h * h);
}

double partial_derivative_x(const std::function<double(double, double)>& f, double x, double y,
double h = 1e-5) {
return (f(x + h, y) - f(x - h, y)) / (2.0 * h);
}

double partial_derivative_y(const std::function<double(double, double)>& f, double x, double y,
double h = 1e-5) {
return (f(x, y + h) - f(x, y - h)) / (2.0 * h);
}

Vector gradient_2d(const std::function<double(double, double)>& f, double x, double y) {
return {partial_derivative_x(f, x, y), partial_derivative_y(f, x, y)};
}

double chain_rule(double df_dg, double dg_dx) { return df_dg * dg_dx; }

double backprop_single_weight(double prediction, double target, double input) {
double loss_grad_prediction = 2.0 * (prediction - target); // dL/dy for MSE
double prediction_grad_weight = input; // dy/dw for y = wx
return chain_rule(loss_grad_prediction, prediction_grad_weight);
}
} // namespace advance_maths::calculus

static void test() {
using namespace advance_maths::calculus;

auto f = [](double x) { return x * x * x; };
assert(std::abs(derivative(f, 2.0) - 12.0) < 1e-3);
assert(std::abs(second_derivative(f, 2.0) - 12.0) < 1e-2);

auto g = [](double x, double y) { return x * x + 3.0 * x * y + y * y; };
assert(std::abs(partial_derivative_x(g, 1.0, 2.0) - 8.0) < 1e-3);
assert(std::abs(partial_derivative_y(g, 1.0, 2.0) - 7.0) < 1e-3);

auto grad = gradient_2d(g, 1.0, 2.0);
assert(std::abs(grad[0] - 8.0) < 1e-3 && std::abs(grad[1] - 7.0) < 1e-3);

double dldw = backprop_single_weight(3.0, 1.0, 2.0);
assert(std::abs(dldw - 8.0) < 1e-9);
}

int main() {
test();
std::cout << "Calculus module passed.\n";
return 0;
}
92 changes: 92 additions & 0 deletions advance_maths/geometry.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
/**
* @file
* @brief Geometry-related similarity and projection utilities.
*/
#include <cassert>
#include <cmath>
#include <iostream>
#include <set>
#include <stdexcept>
#include <vector>

namespace advance_maths::geometry {
using Vector = std::vector<double>;

double dot(const Vector& a, const Vector& b) {
if (a.size() != b.size()) {
throw std::invalid_argument("Vectors must have the same size.");
}
double sum = 0.0;
for (size_t i = 0; i < a.size(); ++i) {
sum += a[i] * b[i];
}
return sum;
}

double norm(const Vector& v) {
return std::sqrt(dot(v, v));
}

double cosine_similarity(const Vector& a, const Vector& b) {
double denom = norm(a) * norm(b);
if (denom < 1e-12) {
throw std::invalid_argument("Norm cannot be zero.");
}
return dot(a, b) / denom;
}

double jaccard_similarity(const std::set<int>& a, const std::set<int>& b) {
size_t intersection_count = 0;
for (int item : a) {
if (b.count(item) > 0) {
++intersection_count;
}
}
const size_t union_count = a.size() + b.size() - intersection_count;
return union_count == 0 ? 1.0 : static_cast<double>(intersection_count) / static_cast<double>(union_count);
}

bool are_orthogonal(const Vector& a, const Vector& b, double eps = 1e-9) {
return std::abs(dot(a, b)) < eps;
}

Vector projection(const Vector& a, const Vector& b) {
const double denom = dot(b, b);
if (denom < 1e-12) {
throw std::invalid_argument("Cannot project onto a zero vector.");
}
const double scale = dot(a, b) / denom;
Vector proj = b;
for (double& value : proj) {
value *= scale;
}
return proj;
}
} // namespace advance_maths::geometry

static void test() {
using namespace advance_maths::geometry;

Vector a = {1.0, 2.0, 3.0};
Vector b = {2.0, 4.0, 6.0};
assert(std::abs(cosine_similarity(a, b) - 1.0) < 1e-9);

std::set<int> s1 = {1, 2, 3, 5};
std::set<int> s2 = {2, 3, 4};
assert(std::abs(jaccard_similarity(s1, s2) - 0.4) < 1e-9);

Vector o1 = {1.0, 0.0};
Vector o2 = {0.0, 4.0};
assert(are_orthogonal(o1, o2));

Vector p = projection(a, b);
assert(std::abs(p[0] - 1.0) < 1e-9);
assert(std::abs(p[1] - 2.0) < 1e-9);
assert(std::abs(p[2] - 3.0) < 1e-9);
}

int main() {
test();
std::cout << "Geometry module passed.\n";
return 0;
}
146 changes: 146 additions & 0 deletions advance_maths/linear_algebra.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,146 @@
/**
* @file
* @brief Core linear algebra utilities and demonstrations.
*/
#include <cassert>
#include <cmath>
#include <iostream>
#include <stdexcept>
#include <utility>
#include <vector>

namespace advance_maths::linear_algebra {
using Matrix = std::vector<std::vector<double>>;
using Vector = std::vector<double>;

static void validate_same_size(const Vector& a, const Vector& b) {
if (a.size() != b.size()) {
throw std::invalid_argument("Vectors must have the same size.");
}
}

double dot_product(const Vector& a, const Vector& b) {
validate_same_size(a, b);
double sum = 0.0;
for (size_t i = 0; i < a.size(); ++i) {
sum += a[i] * b[i];
}
return sum;
}

double l2_norm(const Vector& a) { return std::sqrt(dot_product(a, a)); }

double manhattan_norm(const Vector& a) {
double sum = 0.0;
for (double value : a) {
sum += std::abs(value);
}
return sum;
}

double euclidean_distance(const Vector& a, const Vector& b) {
validate_same_size(a, b);
double sum = 0.0;
for (size_t i = 0; i < a.size(); ++i) {
sum += (a[i] - b[i]) * (a[i] - b[i]);
}
return std::sqrt(sum);
}

double manhattan_distance(const Vector& a, const Vector& b) {
validate_same_size(a, b);
double sum = 0.0;
for (size_t i = 0; i < a.size(); ++i) {
sum += std::abs(a[i] - b[i]);
}
return sum;
}

Matrix multiply(const Matrix& a, const Matrix& b) {
if (a.empty() || b.empty() || a[0].size() != b.size()) {
throw std::invalid_argument("Incompatible matrix dimensions.");
}

Matrix result(a.size(), Vector(b[0].size(), 0.0));
for (size_t i = 0; i < a.size(); ++i) {
for (size_t k = 0; k < b.size(); ++k) {
for (size_t j = 0; j < b[0].size(); ++j) {
result[i][j] += a[i][k] * b[k][j];
}
}
}
return result;
}

std::pair<Matrix, Matrix> lu_decomposition_2x2(const Matrix& m) {
if (m.size() != 2 || m[0].size() != 2 || m[1].size() != 2) {
throw std::invalid_argument("This demo supports only 2x2 matrices.");
}
if (std::abs(m[0][0]) < 1e-12) {
throw std::invalid_argument("Pivot too small for this simple LU decomposition.");
}

Matrix l = {{1.0, 0.0}, {m[1][0] / m[0][0], 1.0}};
Matrix u = {{m[0][0], m[0][1]}, {0.0, m[1][1] - l[1][0] * m[0][1]}};
return {l, u};
}

std::pair<double, double> eigenvalues_2x2(const Matrix& m) {
if (m.size() != 2 || m[0].size() != 2 || m[1].size() != 2) {
throw std::invalid_argument("This demo supports only 2x2 matrices.");
}

const double trace = m[0][0] + m[1][1];
const double det = m[0][0] * m[1][1] - m[0][1] * m[1][0];
const double disc = std::sqrt(trace * trace - 4.0 * det);
return {(trace + disc) / 2.0, (trace - disc) / 2.0};
}

Vector dominant_right_singular_vector_2x2(const Matrix& m) {
Matrix mtm = {
{m[0][0] * m[0][0] + m[1][0] * m[1][0], m[0][0] * m[0][1] + m[1][0] * m[1][1]},
{m[0][0] * m[0][1] + m[1][0] * m[1][1], m[0][1] * m[0][1] + m[1][1] * m[1][1]}};

auto eig = eigenvalues_2x2(mtm);
double lambda = std::max(eig.first, eig.second);

Vector v = {mtm[0][1], lambda - mtm[0][0]};
double norm = l2_norm(v);
if (norm < 1e-12) {
return {1.0, 0.0};
}
v[0] /= norm;
v[1] /= norm;
return v;
}
} // namespace advance_maths::linear_algebra

static void test() {
using namespace advance_maths::linear_algebra;

Vector a = {1.0, 2.0, 3.0};
Vector b = {4.0, 1.0, -2.0};
assert(std::abs(dot_product(a, b) - 0.0) < 1e-9);
assert(std::abs(l2_norm(a) - std::sqrt(14.0)) < 1e-9);
assert(std::abs(manhattan_norm(a) - 6.0) < 1e-9);
assert(std::abs(euclidean_distance(a, b) - std::sqrt(35.0)) < 1e-9);
assert(std::abs(manhattan_distance(a, b) - 9.0) < 1e-9);

Matrix m = {{4.0, 3.0}, {4.0, 3.0}};
auto [l, u] = lu_decomposition_2x2(m);
Matrix reconstructed = multiply(l, u);
assert(std::abs(reconstructed[0][0] - 4.0) < 1e-9);
assert(std::abs(reconstructed[1][0] - 4.0) < 1e-9);

auto eig = eigenvalues_2x2(m);
assert(std::abs(eig.first - 7.0) < 1e-9 || std::abs(eig.second - 7.0) < 1e-9);

Vector sv = dominant_right_singular_vector_2x2({{1.0, 2.0}, {3.0, 4.0}});
assert(std::abs(l2_norm(sv) - 1.0) < 1e-9);
}

int main() {
test();
std::cout << "Linear algebra module passed.\n";
return 0;
}
62 changes: 62 additions & 0 deletions advance_maths/optimization.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
/**
* @file
* @brief Optimization algorithms: gradient descent and SGD.
*/
#include <cassert>
#include <cmath>
#include <iostream>
#include <random>
#include <utility>
#include <vector>

namespace advance_maths::optimization {

double objective(double x) { return (x - 3.0) * (x - 3.0); }

double objective_grad(double x) { return 2.0 * (x - 3.0); }

double gradient_descent(double initial_x, double learning_rate, int iterations) {
double x = initial_x;
for (int i = 0; i < iterations; ++i) {
x -= learning_rate * objective_grad(x);
}
return x;
}

std::pair<double, double> linear_sgd(const std::vector<double>& x, const std::vector<double>& y,
double lr, int epochs) {
double w = 0.0;
double b = 0.0;
std::mt19937 gen(42);
std::uniform_int_distribution<size_t> dist(0, x.size() - 1);

for (int epoch = 0; epoch < epochs; ++epoch) {
size_t i = dist(gen);
double pred = w * x[i] + b;
double err = pred - y[i];
w -= lr * 2.0 * err * x[i];
b -= lr * 2.0 * err;
}
return {w, b};
}
} // namespace advance_maths::optimization

static void test() {
using namespace advance_maths::optimization;

double optimum = gradient_descent(10.0, 0.1, 100);
assert(std::abs(optimum - 3.0) < 1e-4);
assert(objective(optimum) < 1e-6);

std::vector<double> x = {1, 2, 3, 4};
std::vector<double> y = {3, 5, 7, 9}; // y = 2x + 1
auto [w, b] = linear_sgd(x, y, 0.01, 10000);
assert(std::abs(w - 2.0) < 0.25);
assert(std::abs(b - 1.0) < 0.5);
}

int main() {
test();
std::cout << "Optimization module passed.\n";
return 0;
}
Loading