diff --git a/CMakeLists.txt b/CMakeLists.txt index 1fb339e475..506840fc18 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -41,6 +41,7 @@ add_subdirectory(graphics) add_subdirectory(greedy_algorithms) add_subdirectory(hashing) add_subdirectory(machine_learning) +add_subdirectory(advance_maths) add_subdirectory(math) add_subdirectory(numerical_methods) add_subdirectory(operations_on_datastructures) diff --git a/advance_maths/CMakeLists.txt b/advance_maths/CMakeLists.txt new file mode 100644 index 0000000000..7db2fee44d --- /dev/null +++ b/advance_maths/CMakeLists.txt @@ -0,0 +1,11 @@ +file( GLOB APP_SOURCES RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} *.cpp ) +foreach( testsourcefile ${APP_SOURCES} ) + string( REPLACE ".cpp" "" testname ${testsourcefile} ) + add_executable( ${testname} ${testsourcefile} ) + + set_target_properties(${testname} PROPERTIES LINKER_LANGUAGE CXX) + if(OpenMP_CXX_FOUND) + target_link_libraries(${testname} OpenMP::OpenMP_CXX) + endif() + install(TARGETS ${testname} DESTINATION "bin/advance_maths") +endforeach( testsourcefile ${APP_SOURCES} ) diff --git a/advance_maths/calculus.cpp b/advance_maths/calculus.cpp new file mode 100644 index 0000000000..b2d3ddefdd --- /dev/null +++ b/advance_maths/calculus.cpp @@ -0,0 +1,67 @@ +/** + * @file + * @brief Basic and intermediate calculus utilities. + */ +#include +#include +#include +#include +#include + +namespace advance_maths::calculus { +using Vector = std::vector; + +double derivative(const std::function& f, double x, double h = 1e-5) { + return (f(x + h) - f(x - h)) / (2.0 * h); +} + +double second_derivative(const std::function& f, double x, double h = 1e-5) { + return (f(x + h) - 2.0 * f(x) + f(x - h)) / (h * h); +} + +double partial_derivative_x(const std::function& f, double x, double y, + double h = 1e-5) { + return (f(x + h, y) - f(x - h, y)) / (2.0 * h); +} + +double partial_derivative_y(const std::function& f, double x, double y, + double h = 1e-5) { + return (f(x, y + h) - f(x, y - h)) / (2.0 * h); +} + +Vector gradient_2d(const std::function& f, double x, double y) { + return {partial_derivative_x(f, x, y), partial_derivative_y(f, x, y)}; +} + +double chain_rule(double df_dg, double dg_dx) { return df_dg * dg_dx; } + +double backprop_single_weight(double prediction, double target, double input) { + double loss_grad_prediction = 2.0 * (prediction - target); // dL/dy for MSE + double prediction_grad_weight = input; // dy/dw for y = wx + return chain_rule(loss_grad_prediction, prediction_grad_weight); +} +} // namespace advance_maths::calculus + +static void test() { + using namespace advance_maths::calculus; + + auto f = [](double x) { return x * x * x; }; + assert(std::abs(derivative(f, 2.0) - 12.0) < 1e-3); + assert(std::abs(second_derivative(f, 2.0) - 12.0) < 1e-2); + + auto g = [](double x, double y) { return x * x + 3.0 * x * y + y * y; }; + assert(std::abs(partial_derivative_x(g, 1.0, 2.0) - 8.0) < 1e-3); + assert(std::abs(partial_derivative_y(g, 1.0, 2.0) - 7.0) < 1e-3); + + auto grad = gradient_2d(g, 1.0, 2.0); + assert(std::abs(grad[0] - 8.0) < 1e-3 && std::abs(grad[1] - 7.0) < 1e-3); + + double dldw = backprop_single_weight(3.0, 1.0, 2.0); + assert(std::abs(dldw - 8.0) < 1e-9); +} + +int main() { + test(); + std::cout << "Calculus module passed.\n"; + return 0; +} diff --git a/advance_maths/geometry.cpp b/advance_maths/geometry.cpp new file mode 100644 index 0000000000..593d906aa1 --- /dev/null +++ b/advance_maths/geometry.cpp @@ -0,0 +1,92 @@ +/** + * @file + * @brief Geometry-related similarity and projection utilities. + */ +#include +#include +#include +#include +#include +#include + +namespace advance_maths::geometry { +using Vector = std::vector; + +double dot(const Vector& a, const Vector& b) { + if (a.size() != b.size()) { + throw std::invalid_argument("Vectors must have the same size."); + } + double sum = 0.0; + for (size_t i = 0; i < a.size(); ++i) { + sum += a[i] * b[i]; + } + return sum; +} + +double norm(const Vector& v) { + return std::sqrt(dot(v, v)); +} + +double cosine_similarity(const Vector& a, const Vector& b) { + double denom = norm(a) * norm(b); + if (denom < 1e-12) { + throw std::invalid_argument("Norm cannot be zero."); + } + return dot(a, b) / denom; +} + +double jaccard_similarity(const std::set& a, const std::set& b) { + size_t intersection_count = 0; + for (int item : a) { + if (b.count(item) > 0) { + ++intersection_count; + } + } + const size_t union_count = a.size() + b.size() - intersection_count; + return union_count == 0 ? 1.0 : static_cast(intersection_count) / static_cast(union_count); +} + +bool are_orthogonal(const Vector& a, const Vector& b, double eps = 1e-9) { + return std::abs(dot(a, b)) < eps; +} + +Vector projection(const Vector& a, const Vector& b) { + const double denom = dot(b, b); + if (denom < 1e-12) { + throw std::invalid_argument("Cannot project onto a zero vector."); + } + const double scale = dot(a, b) / denom; + Vector proj = b; + for (double& value : proj) { + value *= scale; + } + return proj; +} +} // namespace advance_maths::geometry + +static void test() { + using namespace advance_maths::geometry; + + Vector a = {1.0, 2.0, 3.0}; + Vector b = {2.0, 4.0, 6.0}; + assert(std::abs(cosine_similarity(a, b) - 1.0) < 1e-9); + + std::set s1 = {1, 2, 3, 5}; + std::set s2 = {2, 3, 4}; + assert(std::abs(jaccard_similarity(s1, s2) - 0.4) < 1e-9); + + Vector o1 = {1.0, 0.0}; + Vector o2 = {0.0, 4.0}; + assert(are_orthogonal(o1, o2)); + + Vector p = projection(a, b); + assert(std::abs(p[0] - 1.0) < 1e-9); + assert(std::abs(p[1] - 2.0) < 1e-9); + assert(std::abs(p[2] - 3.0) < 1e-9); +} + +int main() { + test(); + std::cout << "Geometry module passed.\n"; + return 0; +} diff --git a/advance_maths/linear_algebra.cpp b/advance_maths/linear_algebra.cpp new file mode 100644 index 0000000000..8207c7fbdf --- /dev/null +++ b/advance_maths/linear_algebra.cpp @@ -0,0 +1,146 @@ +/** + * @file + * @brief Core linear algebra utilities and demonstrations. + */ +#include +#include +#include +#include +#include +#include + +namespace advance_maths::linear_algebra { +using Matrix = std::vector>; +using Vector = std::vector; + +static void validate_same_size(const Vector& a, const Vector& b) { + if (a.size() != b.size()) { + throw std::invalid_argument("Vectors must have the same size."); + } +} + +double dot_product(const Vector& a, const Vector& b) { + validate_same_size(a, b); + double sum = 0.0; + for (size_t i = 0; i < a.size(); ++i) { + sum += a[i] * b[i]; + } + return sum; +} + +double l2_norm(const Vector& a) { return std::sqrt(dot_product(a, a)); } + +double manhattan_norm(const Vector& a) { + double sum = 0.0; + for (double value : a) { + sum += std::abs(value); + } + return sum; +} + +double euclidean_distance(const Vector& a, const Vector& b) { + validate_same_size(a, b); + double sum = 0.0; + for (size_t i = 0; i < a.size(); ++i) { + sum += (a[i] - b[i]) * (a[i] - b[i]); + } + return std::sqrt(sum); +} + +double manhattan_distance(const Vector& a, const Vector& b) { + validate_same_size(a, b); + double sum = 0.0; + for (size_t i = 0; i < a.size(); ++i) { + sum += std::abs(a[i] - b[i]); + } + return sum; +} + +Matrix multiply(const Matrix& a, const Matrix& b) { + if (a.empty() || b.empty() || a[0].size() != b.size()) { + throw std::invalid_argument("Incompatible matrix dimensions."); + } + + Matrix result(a.size(), Vector(b[0].size(), 0.0)); + for (size_t i = 0; i < a.size(); ++i) { + for (size_t k = 0; k < b.size(); ++k) { + for (size_t j = 0; j < b[0].size(); ++j) { + result[i][j] += a[i][k] * b[k][j]; + } + } + } + return result; +} + +std::pair lu_decomposition_2x2(const Matrix& m) { + if (m.size() != 2 || m[0].size() != 2 || m[1].size() != 2) { + throw std::invalid_argument("This demo supports only 2x2 matrices."); + } + if (std::abs(m[0][0]) < 1e-12) { + throw std::invalid_argument("Pivot too small for this simple LU decomposition."); + } + + Matrix l = {{1.0, 0.0}, {m[1][0] / m[0][0], 1.0}}; + Matrix u = {{m[0][0], m[0][1]}, {0.0, m[1][1] - l[1][0] * m[0][1]}}; + return {l, u}; +} + +std::pair eigenvalues_2x2(const Matrix& m) { + if (m.size() != 2 || m[0].size() != 2 || m[1].size() != 2) { + throw std::invalid_argument("This demo supports only 2x2 matrices."); + } + + const double trace = m[0][0] + m[1][1]; + const double det = m[0][0] * m[1][1] - m[0][1] * m[1][0]; + const double disc = std::sqrt(trace * trace - 4.0 * det); + return {(trace + disc) / 2.0, (trace - disc) / 2.0}; +} + +Vector dominant_right_singular_vector_2x2(const Matrix& m) { + Matrix mtm = { + {m[0][0] * m[0][0] + m[1][0] * m[1][0], m[0][0] * m[0][1] + m[1][0] * m[1][1]}, + {m[0][0] * m[0][1] + m[1][0] * m[1][1], m[0][1] * m[0][1] + m[1][1] * m[1][1]}}; + + auto eig = eigenvalues_2x2(mtm); + double lambda = std::max(eig.first, eig.second); + + Vector v = {mtm[0][1], lambda - mtm[0][0]}; + double norm = l2_norm(v); + if (norm < 1e-12) { + return {1.0, 0.0}; + } + v[0] /= norm; + v[1] /= norm; + return v; +} +} // namespace advance_maths::linear_algebra + +static void test() { + using namespace advance_maths::linear_algebra; + + Vector a = {1.0, 2.0, 3.0}; + Vector b = {4.0, 1.0, -2.0}; + assert(std::abs(dot_product(a, b) - 0.0) < 1e-9); + assert(std::abs(l2_norm(a) - std::sqrt(14.0)) < 1e-9); + assert(std::abs(manhattan_norm(a) - 6.0) < 1e-9); + assert(std::abs(euclidean_distance(a, b) - std::sqrt(35.0)) < 1e-9); + assert(std::abs(manhattan_distance(a, b) - 9.0) < 1e-9); + + Matrix m = {{4.0, 3.0}, {4.0, 3.0}}; + auto [l, u] = lu_decomposition_2x2(m); + Matrix reconstructed = multiply(l, u); + assert(std::abs(reconstructed[0][0] - 4.0) < 1e-9); + assert(std::abs(reconstructed[1][0] - 4.0) < 1e-9); + + auto eig = eigenvalues_2x2(m); + assert(std::abs(eig.first - 7.0) < 1e-9 || std::abs(eig.second - 7.0) < 1e-9); + + Vector sv = dominant_right_singular_vector_2x2({{1.0, 2.0}, {3.0, 4.0}}); + assert(std::abs(l2_norm(sv) - 1.0) < 1e-9); +} + +int main() { + test(); + std::cout << "Linear algebra module passed.\n"; + return 0; +} diff --git a/advance_maths/optimization.cpp b/advance_maths/optimization.cpp new file mode 100644 index 0000000000..528ad8439c --- /dev/null +++ b/advance_maths/optimization.cpp @@ -0,0 +1,62 @@ +/** + * @file + * @brief Optimization algorithms: gradient descent and SGD. + */ +#include +#include +#include +#include +#include +#include + +namespace advance_maths::optimization { + +double objective(double x) { return (x - 3.0) * (x - 3.0); } + +double objective_grad(double x) { return 2.0 * (x - 3.0); } + +double gradient_descent(double initial_x, double learning_rate, int iterations) { + double x = initial_x; + for (int i = 0; i < iterations; ++i) { + x -= learning_rate * objective_grad(x); + } + return x; +} + +std::pair linear_sgd(const std::vector& x, const std::vector& y, + double lr, int epochs) { + double w = 0.0; + double b = 0.0; + std::mt19937 gen(42); + std::uniform_int_distribution dist(0, x.size() - 1); + + for (int epoch = 0; epoch < epochs; ++epoch) { + size_t i = dist(gen); + double pred = w * x[i] + b; + double err = pred - y[i]; + w -= lr * 2.0 * err * x[i]; + b -= lr * 2.0 * err; + } + return {w, b}; +} +} // namespace advance_maths::optimization + +static void test() { + using namespace advance_maths::optimization; + + double optimum = gradient_descent(10.0, 0.1, 100); + assert(std::abs(optimum - 3.0) < 1e-4); + assert(objective(optimum) < 1e-6); + + std::vector x = {1, 2, 3, 4}; + std::vector y = {3, 5, 7, 9}; // y = 2x + 1 + auto [w, b] = linear_sgd(x, y, 0.01, 10000); + assert(std::abs(w - 2.0) < 0.25); + assert(std::abs(b - 1.0) < 0.5); +} + +int main() { + test(); + std::cout << "Optimization module passed.\n"; + return 0; +} diff --git a/advance_maths/probability_statistics.cpp b/advance_maths/probability_statistics.cpp new file mode 100644 index 0000000000..e6cd7a8319 --- /dev/null +++ b/advance_maths/probability_statistics.cpp @@ -0,0 +1,155 @@ +/** + * @file + * @brief Probability and statistics utilities and demonstrations. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +namespace advance_maths::probability_statistics { +using Vector = std::vector; + +double mean(const Vector& v) { + if (v.empty()) { + throw std::invalid_argument("Vector cannot be empty."); + } + return std::accumulate(v.begin(), v.end(), 0.0) / static_cast(v.size()); +} + +double variance(const Vector& v) { + const double m = mean(v); + double sum = 0.0; + for (double x : v) { + sum += (x - m) * (x - m); + } + return sum / static_cast(v.size()); +} + +double standard_deviation(const Vector& v) { return std::sqrt(variance(v)); } + +double covariance(const Vector& x, const Vector& y) { + if (x.size() != y.size() || x.empty()) { + throw std::invalid_argument("Vectors must be non-empty and same size."); + } + const double mx = mean(x); + const double my = mean(y); + double sum = 0.0; + for (size_t i = 0; i < x.size(); ++i) { + sum += (x[i] - mx) * (y[i] - my); + } + return sum / static_cast(x.size()); +} + +double pearson_correlation(const Vector& x, const Vector& y) { + const double denom = standard_deviation(x) * standard_deviation(y); + if (denom < 1e-12) { + throw std::invalid_argument("Standard deviation is zero."); + } + return covariance(x, y) / denom; +} + +double joint_probability(double p_a, double p_b_given_a) { return p_a * p_b_given_a; } + +double conditional_probability(double joint, double p_condition) { + if (p_condition <= 0.0) { + throw std::invalid_argument("Condition probability must be > 0."); + } + return joint / p_condition; +} + +double bayes(double p_b_given_a, double p_a, double p_b) { + if (p_b <= 0.0) { + throw std::invalid_argument("Marginal probability p(B) must be > 0."); + } + return (p_b_given_a * p_a) / p_b; +} + +double binomial_pmf(int n, int k, double p) { + if (k < 0 || k > n || p < 0.0 || p > 1.0) { + throw std::invalid_argument("Invalid binomial parameters."); + } + auto combination = [&](int nn, int kk) { + double c = 1.0; + for (int i = 1; i <= kk; ++i) { + c = c * (nn - (kk - i)) / i; + } + return c; + }; + return combination(n, k) * std::pow(p, k) * std::pow(1.0 - p, n - k); +} + +double normal_pdf(double x, double mu, double sigma) { + if (sigma <= 0.0) { + throw std::invalid_argument("Sigma must be > 0."); + } + const double z = (x - mu) / sigma; + return std::exp(-0.5 * z * z) / (sigma * std::sqrt(2.0 * M_PI)); +} + +std::pair confidence_interval_mean(const Vector& v, double z_score = 1.96) { + const double m = mean(v); + const double se = standard_deviation(v) / std::sqrt(static_cast(v.size())); + return {m - z_score * se, m + z_score * se}; +} + +bool z_test_reject(double sample_mean, double hypothesized_mean, double sigma, int n, + double alpha = 0.05) { + const double z = std::abs(sample_mean - hypothesized_mean) / (sigma / std::sqrt(static_cast(n))); + const double critical = (alpha == 0.05 ? 1.96 : 2.58); + return z > critical; +} + +double bootstrap_mean_estimate(const Vector& v, int iterations = 200) { + std::mt19937 gen(123); + std::uniform_int_distribution dist(0, v.size() - 1); + Vector bootstrap_means; + bootstrap_means.reserve(iterations); + + for (int it = 0; it < iterations; ++it) { + double sum = 0.0; + for (size_t i = 0; i < v.size(); ++i) { + sum += v[dist(gen)]; + } + bootstrap_means.push_back(sum / static_cast(v.size())); + } + return mean(bootstrap_means); +} +} // namespace advance_maths::probability_statistics + +static void test() { + using namespace advance_maths::probability_statistics; + std::vector x = {1, 2, 3, 4, 5}; + std::vector y = {2, 4, 6, 8, 10}; + + assert(std::abs(mean(x) - 3.0) < 1e-9); + assert(std::abs(variance(x) - 2.0) < 1e-9); + assert(std::abs(standard_deviation(x) - std::sqrt(2.0)) < 1e-9); + assert(std::abs(covariance(x, y) - 4.0) < 1e-9); + assert(std::abs(pearson_correlation(x, y) - 1.0) < 1e-9); + + double joint = joint_probability(0.5, 0.2); + assert(std::abs(joint - 0.1) < 1e-9); + assert(std::abs(conditional_probability(0.1, 0.5) - 0.2) < 1e-9); + assert(std::abs(bayes(0.9, 0.01, 0.02) - 0.45) < 1e-9); + + assert(std::abs(binomial_pmf(5, 2, 0.5) - 0.3125) < 1e-9); + assert(std::abs(normal_pdf(0.0, 0.0, 1.0) - 0.3989422804) < 1e-6); + + auto ci = confidence_interval_mean(x); + assert(ci.first < 3.0 && ci.second > 3.0); + assert(!z_test_reject(5.1, 5.0, 1.0, 100)); + + double boot = bootstrap_mean_estimate(x); + assert(std::abs(boot - 3.0) < 0.2); +} + +int main() { + test(); + std::cout << "Probability and statistics module passed.\n"; + return 0; +} diff --git a/advance_maths/regression_analysis.cpp b/advance_maths/regression_analysis.cpp new file mode 100644 index 0000000000..2a7d9b0c02 --- /dev/null +++ b/advance_maths/regression_analysis.cpp @@ -0,0 +1,91 @@ +/** + * @file + * @brief Regression analysis utilities: MLE and MSE. + */ +#include +#include +#include +#include +#include +#include + +namespace advance_maths::regression_analysis { + +double mean_squared_error(const std::vector& y_true, const std::vector& y_pred) { + if (y_true.size() != y_pred.size() || y_true.empty()) { + throw std::invalid_argument("Vectors must be same size and non-empty."); + } + double sum = 0.0; + for (size_t i = 0; i < y_true.size(); ++i) { + const double e = y_true[i] - y_pred[i]; + sum += e * e; + } + return sum / static_cast(y_true.size()); +} + +std::pair linear_regression_mle(const std::vector& x, + const std::vector& y) { + const double n = static_cast(x.size()); + double sum_x = 0.0; + double sum_y = 0.0; + double sum_xy = 0.0; + double sum_x2 = 0.0; + + for (size_t i = 0; i < x.size(); ++i) { + sum_x += x[i]; + sum_y += y[i]; + sum_xy += x[i] * y[i]; + sum_x2 += x[i] * x[i]; + } + + const double denom = n * sum_x2 - sum_x * sum_x; + const double slope = (n * sum_xy - sum_x * sum_y) / denom; + const double intercept = (sum_y - slope * sum_x) / n; + return {slope, intercept}; +} + +double estimate_gaussian_mean_mle(const std::vector& samples) { + double sum = 0.0; + for (double value : samples) { + sum += value; + } + return sum / static_cast(samples.size()); +} + +double estimate_gaussian_variance_mle(const std::vector& samples, double mu_hat) { + double sum = 0.0; + for (double value : samples) { + sum += (value - mu_hat) * (value - mu_hat); + } + return sum / static_cast(samples.size()); +} +} // namespace advance_maths::regression_analysis + +static void test() { + using namespace advance_maths::regression_analysis; + + std::vector x = {1, 2, 3, 4, 5}; + std::vector y = {3, 5, 7, 9, 11}; // y = 2x + 1 + + auto [slope, intercept] = linear_regression_mle(x, y); + assert(std::abs(slope - 2.0) < 1e-9); + assert(std::abs(intercept - 1.0) < 1e-9); + + std::vector y_pred; + for (double xv : x) { + y_pred.push_back(slope * xv + intercept); + } + assert(std::abs(mean_squared_error(y, y_pred)) < 1e-9); + + std::vector gaussian_samples = {2.0, 3.0, 4.0, 5.0}; + double mu_hat = estimate_gaussian_mean_mle(gaussian_samples); + double var_hat = estimate_gaussian_variance_mle(gaussian_samples, mu_hat); + assert(std::abs(mu_hat - 3.5) < 1e-9); + assert(std::abs(var_hat - 1.25) < 1e-9); +} + +int main() { + test(); + std::cout << "Regression analysis module passed.\n"; + return 0; +} diff --git a/advance_maths/vector_calculus.cpp b/advance_maths/vector_calculus.cpp new file mode 100644 index 0000000000..6125f24317 --- /dev/null +++ b/advance_maths/vector_calculus.cpp @@ -0,0 +1,91 @@ +/** + * @file + * @brief Vector calculus utilities: Jacobian and Hessian. + */ +#include +#include +#include +#include +#include + +namespace advance_maths::vector_calculus { +using Vector = std::vector; +using Matrix = std::vector>; + +Vector gradient(const std::function& f, const Vector& x, double h = 1e-5) { + Vector grad(x.size(), 0.0); + for (size_t i = 0; i < x.size(); ++i) { + Vector x_plus = x; + Vector x_minus = x; + x_plus[i] += h; + x_minus[i] -= h; + grad[i] = (f(x_plus) - f(x_minus)) / (2.0 * h); + } + return grad; +} + +Matrix jacobian(const std::vector>& funcs, const Vector& x, + double h = 1e-5) { + Matrix j(funcs.size(), Vector(x.size(), 0.0)); + for (size_t row = 0; row < funcs.size(); ++row) { + for (size_t col = 0; col < x.size(); ++col) { + Vector x_plus = x; + Vector x_minus = x; + x_plus[col] += h; + x_minus[col] -= h; + j[row][col] = (funcs[row](x_plus) - funcs[row](x_minus)) / (2.0 * h); + } + } + return j; +} + +Matrix hessian(const std::function& f, const Vector& x, double h = 1e-4) { + size_t n = x.size(); + Matrix hess(n, Vector(n, 0.0)); + for (size_t i = 0; i < n; ++i) { + for (size_t j = 0; j < n; ++j) { + Vector x_pp = x, x_pm = x, x_mp = x, x_mm = x; + x_pp[i] += h; + x_pp[j] += h; + x_pm[i] += h; + x_pm[j] -= h; + x_mp[i] -= h; + x_mp[j] += h; + x_mm[i] -= h; + x_mm[j] -= h; + hess[i][j] = (f(x_pp) - f(x_pm) - f(x_mp) + f(x_mm)) / (4.0 * h * h); + } + } + return hess; +} +} // namespace advance_maths::vector_calculus + +static void test() { + using namespace advance_maths::vector_calculus; + + auto scalar = [](const Vector& v) { return v[0] * v[0] + 3.0 * v[0] * v[1] + v[1] * v[1]; }; + Vector x = {1.0, 2.0}; + + auto grad = gradient(scalar, x); + assert(std::abs(grad[0] - 8.0) < 1e-3); + assert(std::abs(grad[1] - 7.0) < 1e-3); + + std::vector> funcs = { + [](const Vector& v) { return v[0] + v[1]; }, + [](const Vector& v) { return v[0] * v[1]; }}; + auto j = jacobian(funcs, x); + assert(std::abs(j[0][0] - 1.0) < 1e-4 && std::abs(j[0][1] - 1.0) < 1e-4); + assert(std::abs(j[1][0] - 2.0) < 1e-3 && std::abs(j[1][1] - 1.0) < 1e-3); + + auto h = hessian(scalar, x); + assert(std::abs(h[0][0] - 2.0) < 1e-2); + assert(std::abs(h[0][1] - 3.0) < 1e-2); + assert(std::abs(h[1][0] - 3.0) < 1e-2); + assert(std::abs(h[1][1] - 2.0) < 1e-2); +} + +int main() { + test(); + std::cout << "Vector calculus module passed.\n"; + return 0; +} diff --git a/math/chinese_remainder_theorem.cpp b/math/chinese_remainder_theorem.cpp new file mode 100644 index 0000000000..5d917a81ee --- /dev/null +++ b/math/chinese_remainder_theorem.cpp @@ -0,0 +1,135 @@ +/** + * @file chinese_remainder_theorem.cpp + * @brief Solves simultaneous congruences with CRT. + * @details + * Merges congruences x ≡ ai (mod mi) one by one. Works for both coprime and + * non-coprime moduli; detects inconsistency when no solution exists. + * Time complexity is O(k log M) for k equations with arithmetic on merged modulus. + * @author OpenAI + * @see https://en.wikipedia.org/wiki/Chinese_remainder_theorem + */ + +#include +#include +#include + +/** + * @brief Stores one congruence equation. + */ +struct Congruence { + long long remainder; + long long modulus; +}; + +/** + * @brief Stores solution of a CRT system. + */ +struct CtrResult { + bool has_solution; + long long value; + long long modulus; +}; + +/** + * @brief Extended gcd for coefficients in ax+by=gcd(a,b). + * @param a first integer. + * @param b second integer. + * @param x coefficient for a. + * @param y coefficient for b. + * @returns gcd(a,b). + */ +long long extended_gcd(long long a, long long b, long long &x, long long &y) { + if (b == 0) { + x = 1; + y = 0; + return a >= 0 ? a : -a; + } + long long x1 = 0, y1 = 0; + long long g = extended_gcd(b, a % b, x1, y1); + x = y1; + y = x1 - (a / b) * y1; + return g; +} + +/** + * @brief Merges two congruences into one. + * @param a first congruence. + * @param b second congruence. + * @returns merged congruence result and consistency flag. + */ +CtrResult merge(const CtrResult &a, const Congruence &b) { + if (!a.has_solution) { + return a; + } + long long x = 0, y = 0; + long long g = extended_gcd(a.modulus, b.modulus, x, y); + long long diff = b.remainder - a.value; + if (diff % g != 0) { + return {false, 0, 0}; + } + + __int128 lcm = (__int128)a.modulus / g * b.modulus; + __int128 mul = (__int128)(diff / g) * x; + long long mod2 = b.modulus / g; + long long step = static_cast(mul % mod2); + if (step < 0) { + step += mod2; + } + + __int128 merged = (__int128)a.value + (__int128)step * a.modulus; + long long value = static_cast(merged % lcm); + if (value < 0) { + value += static_cast(lcm); + } + + return {true, value, static_cast(lcm)}; +} + +/** + * @brief Solves x ≡ ai (mod mi) for a system of congruences. + * @param equations list of equations. + * @returns solution x modulo M if one exists. + */ +CtrResult chinese_remainder_theorem(const std::vector &equations) { + CtrResult current{true, 0, 1}; + for (const Congruence &eq : equations) { + Congruence normalized = eq; + normalized.remainder %= normalized.modulus; + if (normalized.remainder < 0) { + normalized.remainder += normalized.modulus; + } + current = merge(current, normalized); + if (!current.has_solution) { + break; + } + } + return current; +} + +/** + * @brief Self-tests for CRT solver. + * @returns void. + */ +static void test() { + CtrResult r1 = chinese_remainder_theorem({{2, 3}, {3, 5}, {2, 7}}); + assert(r1.has_solution); + assert(r1.value == 23 && r1.modulus == 105); + + CtrResult r2 = chinese_remainder_theorem({{1, 2}, {1, 4}}); + assert(r2.has_solution); + assert(r2.value == 1 && r2.modulus == 4); + + CtrResult r3 = chinese_remainder_theorem({{1, 2}, {0, 4}}); + assert(!r3.has_solution); + + CtrResult r4 = chinese_remainder_theorem({{4, 6}, {10, 14}}); + assert(r4.has_solution); + assert(r4.value % 6 == 4 && r4.value % 14 == 10); + + std::cout << "All tests passed!\n"; +} + +int main() { + test(); + return 0; +} diff --git a/math/euler_totient.cpp b/math/euler_totient.cpp new file mode 100644 index 0000000000..540ffc148f --- /dev/null +++ b/math/euler_totient.cpp @@ -0,0 +1,92 @@ +/** + * @file euler_totient.cpp + * @brief Computes Euler's totient function for one value and for a range. + * @details + * Euler's totient φ(n) is the count of integers in [1, n] that are coprime with n. + * A single-value routine performs prime factorization in O(sqrt(n)), while the + * sieve-style routine computes φ(i) for i in [1, N] in O(N log log N) time and + * O(N) space. + * @author OpenAI + * @see https://en.wikipedia.org/wiki/Euler%27s_totient_function + */ + +#include +#include +#include +#include + +/** + * @brief Computes Euler's totient value for a single n. + * @param n positive integer. + * @returns φ(n), and returns 0 for n = 0. + */ +long long phi_single(long long n) { + if (n == 0) { + return 0; + } + long long result = n; + long long x = n; + for (long long p = 2; p * p <= x; ++p) { + if (x % p == 0) { + while (x % p == 0) { + x /= p; + } + result -= result / p; + } + } + if (x > 1) { + result -= result / x; + } + return result; +} + +/** + * @brief Computes φ(i) for every i from 1 to n using sieve idea. + * @param n upper bound. + * @returns vector tot where tot[i] = φ(i), tot[0] = 0. + */ +std::vector phi_upto(int n) { + std::vector tot(static_cast(n + 1)); + for (int i = 0; i <= n; ++i) { + tot[static_cast(i)] = i; + } + for (int p = 2; p <= n; ++p) { + if (tot[static_cast(p)] == p) { + for (int multiple = p; multiple <= n; multiple += p) { + tot[static_cast(multiple)] -= tot[static_cast(multiple)] / p; + } + } + } + if (n >= 0) { + tot[0] = 0; + } + if (n >= 1) { + tot[1] = 1; + } + return tot; +} + +/** + * @brief Self-test routines for totient implementations. + * @returns void. + */ +static void test() { + assert(phi_single(0) == 0); + assert(phi_single(1) == 1); + assert(phi_single(9) == 6); + assert(phi_single(36) == 12); + assert(phi_single(97) == 96); + + std::vector tot = phi_upto(10); + assert(tot[1] == 1); + assert(tot[2] == 1); + assert(tot[6] == 2); + assert(tot[10] == 4); + + std::cout << "All tests passed!\n"; +} + +int main() { + test(); + return 0; +} diff --git a/math/extended_gcd.cpp b/math/extended_gcd.cpp new file mode 100644 index 0000000000..28f3990743 --- /dev/null +++ b/math/extended_gcd.cpp @@ -0,0 +1,65 @@ +/** + * @file extended_gcd.cpp + * @brief Extended Euclidean algorithm for gcd and Bézout coefficients. + * @details + * Computes gcd(a, b) and coefficients x, y such that ax + by = gcd(a, b). + * The recursive algorithm runs in O(log(min(a,b))) time and O(log(min(a,b))) + * recursion depth. + * @author OpenAI + * @see https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm + */ + +#include +#include + +/** + * @brief Stores the result of extended gcd. + */ +struct ExtendedGcdResult { + long long gcd; + long long x; + long long y; +}; + +/** + * @brief Computes gcd(a,b) and coefficients x,y with ax + by = gcd. + * @param a first integer. + * @param b second integer. + * @returns structure containing gcd, x and y. + */ +ExtendedGcdResult extended_gcd(long long a, long long b) { + if (b == 0) { + return {a >= 0 ? a : -a, a >= 0 ? 1 : -1, 0}; + } + ExtendedGcdResult next = extended_gcd(b, a % b); + return {next.gcd, next.y, next.x - (a / b) * next.y}; +} + +/** + * @brief Verifies correctness with assertions. + * @returns void. + */ +static void test() { + ExtendedGcdResult r1 = extended_gcd(30, 20); + assert(r1.gcd == 10); + assert(30 * r1.x + 20 * r1.y == r1.gcd); + + ExtendedGcdResult r2 = extended_gcd(17, 31); + assert(r2.gcd == 1); + assert(17 * r2.x + 31 * r2.y == 1); + + ExtendedGcdResult r3 = extended_gcd(0, 7); + assert(r3.gcd == 7); + assert(0 * r3.x + 7 * r3.y == 7); + + ExtendedGcdResult r4 = extended_gcd(-24, 18); + assert(r4.gcd == 6); + assert((-24) * r4.x + 18 * r4.y == 6); + + std::cout << "All tests passed!\n"; +} + +int main() { + test(); + return 0; +} diff --git a/math/miller_rabin_primality.cpp b/math/miller_rabin_primality.cpp new file mode 100644 index 0000000000..90ba01abe8 --- /dev/null +++ b/math/miller_rabin_primality.cpp @@ -0,0 +1,122 @@ +/** + * @file miller_rabin_primality.cpp + * @brief Deterministic Miller-Rabin primality test for 64-bit range. + * @details + * Implements Miller-Rabin with a fixed witness set + * {2,3,5,7,11,13,17,19,23,29,31,37}, which is deterministic for tested + * numbers in competitive programming constraints below 3.2e18. + * Uses safe modular multiplication with __int128. + * @author OpenAI + * @see https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test + */ + +#include +#include +#include + +/** + * @brief Multiplies two numbers modulo mod without overflow. + * @param a first factor. + * @param b second factor. + * @param mod modulus. + * @returns (a*b) % mod. + */ +unsigned long long mul_mod(unsigned long long a, unsigned long long b, + unsigned long long mod) { + return static_cast((__int128)a * b % mod); +} + +/** + * @brief Computes modular exponentiation for unsigned long long values. + * @param base base value. + * @param exp exponent value. + * @param mod modulus. + * @returns (base^exp) % mod. + */ +unsigned long long pow_mod(unsigned long long base, unsigned long long exp, + unsigned long long mod) { + unsigned long long result = 1 % mod; + base %= mod; + while (exp > 0) { + if (exp & 1ULL) { + result = mul_mod(result, base, mod); + } + base = mul_mod(base, base, mod); + exp >>= 1ULL; + } + return result; +} + +/** + * @brief Tests primality for n using deterministic witnesses. + * @param n integer to test. + * @returns true when n is prime, false otherwise. + */ +bool is_prime(unsigned long long n) { + if (n < 2) { + return false; + } + for (unsigned long long p : std::vector{2ULL, 3ULL, 5ULL, + 7ULL, 11ULL, + 13ULL, 17ULL, + 19ULL, 23ULL, + 29ULL, 31ULL, + 37ULL}) { + if (n % p == 0ULL) { + return n == p; + } + } + + unsigned long long d = n - 1; + int s = 0; + while ((d & 1ULL) == 0ULL) { + d >>= 1ULL; + ++s; + } + + const std::vector witnesses = { + 2ULL, 3ULL, 5ULL, 7ULL, 11ULL, 13ULL, + 17ULL, 19ULL, 23ULL, 29ULL, 31ULL, 37ULL}; + + for (unsigned long long a : witnesses) { + if (a >= n) { + continue; + } + unsigned long long x = pow_mod(a, d, n); + if (x == 1ULL || x == n - 1) { + continue; + } + bool witness_composite = true; + for (int r = 1; r < s; ++r) { + x = mul_mod(x, x, n); + if (x == n - 1) { + witness_composite = false; + break; + } + } + if (witness_composite) { + return false; + } + } + return true; +} + +/** + * @brief Self-tests for Miller-Rabin primality routine. + * @returns void. + */ +static void test() { + assert(!is_prime(0)); + assert(!is_prime(1)); + assert(is_prime(2)); + assert(is_prime(9999999967ULL)); + assert(!is_prime(10000000000ULL)); + assert(!is_prime(341550071728321ULL)); + + std::cout << "All tests passed!\n"; +} + +int main() { + test(); + return 0; +} diff --git a/math/modular_exponentiation.cpp b/math/modular_exponentiation.cpp index d4a9dd84d0..e637f4f212 100644 --- a/math/modular_exponentiation.cpp +++ b/math/modular_exponentiation.cpp @@ -1,89 +1,58 @@ /** - * @file - * @brief C++ Program for Modular Exponentiation Iteratively. - * @details The task is to calculate the value of an integer a raised to an - * integer exponent b under modulo c. - * @note The time complexity of this approach is O(log b). - * - * Example: - * (4^3) % 5 (where ^ stands for exponentiation and % for modulo) - * (4*4*4) % 5 - * (4 % 5) * ( (4*4) % 5 ) - * 4 * (16 % 5) - * 4 * 1 - * 4 - * We can also verify the result as 4^3 is 64 and 64 modulo 5 is 4 - * - * @author [Shri2206](https://github.com/Shri2206) + * @file modular_exponentiation.cpp + * @brief Fast modular exponentiation using binary method. + * @details + * Repeated squaring computes (base^exp) % mod in O(log exp) time. + * This is a foundational operation in modular arithmetic tasks in + * competitive programming. + * @author OpenAI + * @see https://en.wikipedia.org/wiki/Modular_exponentiation */ -#include /// for assert -#include -#include /// for io operations -/** - * @namespace math - * @brief Mathematical algorithms - */ -namespace math { + +#include +#include + /** - * @brief This function calculates a raised to exponent b under modulo c using - * modular exponentiation. - * @param a integer base - * @param b unsigned integer exponent - * @param c integer modulo - * @return a raised to power b modulo c + * @brief Computes (base^exp) % mod efficiently. + * @param base base value. + * @param exp non-negative exponent. + * @param mod positive modulus. + * @returns value of (base^exp) modulo mod. */ -uint64_t power(uint64_t a, uint64_t b, uint64_t c) { - uint64_t ans = 1; /// Initialize the answer to be returned - a = a % c; /// Update a if it is more than or equal to c - if (a == 0) { - return 0; /// In case a is divisible by c; +long long modular_power(long long base, long long exp, long long mod) { + if (mod == 1) { + return 0; + } + long long result = 1 % mod; + base %= mod; + if (base < 0) { + base += mod; } - while (b > 0) { - /// If b is odd, multiply a with answer - if (b & 1) { - ans = ((ans % c) * (a % c)) % c; + while (exp > 0) { + if (exp & 1LL) { + result = static_cast((__int128)result * base % mod); } - /// b must be even now - b = b >> 1; /// b = b/2 - a = ((a % c) * (a % c)) % c; + base = static_cast((__int128)base * base % mod); + exp >>= 1; } - return ans; + return result; } -} // namespace math - /** - * Function for testing power function. - * test cases and assert statement. - * @returns `void` + * @brief Self-test implementation. + * @returns void. */ static void test() { - uint32_t test_case_1 = math::power(2, 5, 13); - assert(test_case_1 == 6); - std::cout << "Test 1 Passed!" << std::endl; + assert(modular_power(2, 10, 1000) == 24); + assert(modular_power(2, 0, 100) == 1); + assert(modular_power(0, 0, 1) == 0); + assert(modular_power(3, 100, 1000000007) == 886041711); + assert(modular_power(-2, 5, 13) == 7); - uint32_t test_case_2 = math::power(14, 7, 15); - assert(test_case_2 == 14); - std::cout << "Test 2 Passed!" << std::endl; - - uint64_t test_case_3 = math::power(8, 15, 41); - assert(test_case_3 == 32); - std::cout << "Test 3 Passed!" << std::endl; - - uint64_t test_case_4 = math::power(27, 2, 5); - assert(test_case_4 == 4); - std::cout << "Test 4 Passed!" << std::endl; - - uint16_t test_case_5 = math::power(7, 3, 6); - assert(test_case_5 == 1); - std::cout << "Test 5 Passed!" << std::endl; + std::cout << "All tests passed!\n"; } -/** - * @brief Main function - * @returns 0 on exit - */ int main() { - test(); // execute the tests + test(); return 0; } diff --git a/math/modular_inverse.cpp b/math/modular_inverse.cpp new file mode 100644 index 0000000000..f93cf0cefc --- /dev/null +++ b/math/modular_inverse.cpp @@ -0,0 +1,117 @@ +/** + * @file modular_inverse.cpp + * @brief Computes modular inverse using Fermat and extended Euclid methods. + * @details + * For prime moduli and gcd(a, mod)=1, inverse can be computed as a^(mod-2) + * by Fermat's little theorem. For the general case, extended Euclidean + * algorithm provides inverse when gcd(a, mod)=1. + * @author OpenAI + * @see https://en.wikipedia.org/wiki/Modular_multiplicative_inverse + */ + +#include +#include + +/** + * @brief Computes x, y and gcd for ax + by = gcd(a,b). + * @param a first integer. + * @param b second integer. + * @param x reference to store coefficient of a. + * @param y reference to store coefficient of b. + * @returns gcd(a, b). + */ +long long extended_gcd(long long a, long long b, long long &x, long long &y) { + if (b == 0) { + x = 1; + y = 0; + return a >= 0 ? a : -a; + } + long long x1 = 0, y1 = 0; + long long g = extended_gcd(b, a % b, x1, y1); + x = y1; + y = x1 - (a / b) * y1; + return g; +} + +/** + * @brief Computes (base^exp) % mod using binary exponentiation. + * @param base base value. + * @param exp exponent. + * @param mod modulus. + * @returns modular power. + */ +long long modular_power(long long base, long long exp, long long mod) { + long long result = 1 % mod; + base %= mod; + if (base < 0) { + base += mod; + } + while (exp > 0) { + if (exp & 1LL) { + result = static_cast((__int128)result * base % mod); + } + base = static_cast((__int128)base * base % mod); + exp >>= 1; + } + return result; +} + +/** + * @brief Computes inverse of a modulo prime mod using Fermat theorem. + * @param a number to invert. + * @param mod prime modulus. + * @returns inverse if exists, otherwise -1. + */ +long long mod_inverse_fermat(long long a, long long mod) { + a %= mod; + if (a < 0) { + a += mod; + } + if (a == 0) { + return -1; + } + return modular_power(a, mod - 2, mod); +} + +/** + * @brief Computes inverse of a modulo mod with extended Euclid. + * @param a number to invert. + * @param mod modulus. + * @returns inverse if gcd(a,mod)=1, else -1. + */ +long long mod_inverse_extended_gcd(long long a, long long mod) { + a %= mod; + if (a < 0) { + a += mod; + } + long long x = 0, y = 0; + long long g = extended_gcd(a, mod, x, y); + if (g != 1) { + return -1; + } + x %= mod; + if (x < 0) { + x += mod; + } + return x; +} + +/** + * @brief Self-tests for modular inverse methods. + * @returns void. + */ +static void test() { + assert(mod_inverse_fermat(3, 11) == 4); + assert(mod_inverse_fermat(10, 17) == 12); + assert(mod_inverse_extended_gcd(3, 11) == 4); + assert(mod_inverse_extended_gcd(10, 17) == 12); + assert(mod_inverse_extended_gcd(6, 9) == -1); + assert(mod_inverse_extended_gcd(-3, 11) == 7); + + std::cout << "All tests passed!\n"; +} + +int main() { + test(); + return 0; +} diff --git a/math/ncr_mod_prime.cpp b/math/ncr_mod_prime.cpp new file mode 100644 index 0000000000..dfd4c7cedd --- /dev/null +++ b/math/ncr_mod_prime.cpp @@ -0,0 +1,138 @@ +/** + * @file ncr_mod_prime.cpp + * @brief Computes nCr modulo prime using factorials and Lucas theorem. + * @details + * For n,p within manageable range, precompute factorial and inverse factorial + * arrays and answer nCr in O(1). For very large n with small prime p, Lucas + * theorem decomposes n and r in base p and multiplies small combinations. + * @author OpenAI + * @see https://en.wikipedia.org/wiki/Lucas%27s_theorem + */ + +#include +#include +#include + +/** + * @brief Computes (base^exp) % mod. + * @param base base value. + * @param exp exponent value. + * @param mod modulus. + * @returns modular exponentiation result. + */ +long long mod_pow(long long base, long long exp, long long mod) { + long long result = 1 % mod; + base %= mod; + while (exp > 0) { + if (exp & 1LL) { + result = static_cast((__int128)result * base % mod); + } + base = static_cast((__int128)base * base % mod); + exp >>= 1LL; + } + return result; +} + +/** + * @brief Precomputes factorial and inverse factorial modulo p. + * @param p prime modulus. + * @param fact output factorial table. + * @param inv_fact output inverse factorial table. + * @returns void. + */ +void build_factorials(int p, std::vector &fact, + std::vector &inv_fact) { + fact.assign(static_cast(p), 1LL); + inv_fact.assign(static_cast(p), 1LL); + for (int i = 1; i < p; ++i) { + fact[static_cast(i)] = fact[static_cast(i - 1)] * i % p; + } + inv_fact[static_cast(p - 1)] = mod_pow(fact[static_cast(p - 1)], p - 2, p); + for (int i = p - 2; i >= 0; --i) { + inv_fact[static_cast(i)] = inv_fact[static_cast(i + 1)] * (i + 1) % p; + } +} + +/** + * @brief Computes C(n,r) % p for 0 <= n,r < p using precomputed tables. + * @param n total items. + * @param r chosen items. + * @param p prime modulus. + * @param fact factorial table. + * @param inv_fact inverse factorial table. + * @returns nCr modulo p. + */ +long long ncr_small(int n, int r, int p, const std::vector &fact, + const std::vector &inv_fact) { + if (r < 0 || r > n) { + return 0; + } + return fact[static_cast(n)] * inv_fact[static_cast(r)] % p * + inv_fact[static_cast(n - r)] % p; +} + +/** + * @brief Computes C(n,r) % p using Lucas theorem. + * @param n total items (can be very large). + * @param r chosen items. + * @param p small prime modulus. + * @returns nCr modulo p. + */ +long long ncr_lucas(long long n, long long r, int p) { + if (r < 0 || r > n) { + return 0; + } + std::vector fact, inv_fact; + build_factorials(p, fact, inv_fact); + + long long result = 1; + while (n > 0 || r > 0) { + int ni = static_cast(n % p); + int ri = static_cast(r % p); + if (ri > ni) { + return 0; + } + result = result * ncr_small(ni, ri, p, fact, inv_fact) % p; + n /= p; + r /= p; + } + return result; +} + +/** + * @brief Computes C(n,r) % p with precomputed factorials for n < p. + * @param n total items. + * @param r chosen items. + * @param p prime modulus. + * @returns nCr modulo p. + */ +long long ncr_mod_prime(long long n, long long r, int p) { + if (r < 0 || r > n) { + return 0; + } + if (n >= p) { + return ncr_lucas(n, r, p); + } + std::vector fact, inv_fact; + build_factorials(p, fact, inv_fact); + return ncr_small(static_cast(n), static_cast(r), p, fact, inv_fact); +} + +/** + * @brief Self-tests for nCr modulo prime functions. + * @returns void. + */ +static void test() { + assert(ncr_mod_prime(5, 2, 13) == 10); + assert(ncr_mod_prime(10, 3, 17) == 1); + assert(ncr_mod_prime(10, 11, 17) == 0); + assert(ncr_lucas(1000, 200, 13) == ncr_mod_prime(1000, 200, 13)); + assert(ncr_lucas(1000000000000LL, 12345LL, 2) == 0); + + std::cout << "All tests passed!\n"; +} + +int main() { + test(); + return 0; +} diff --git a/math/pollard_rho_factorization.cpp b/math/pollard_rho_factorization.cpp new file mode 100644 index 0000000000..e0bdb69cba --- /dev/null +++ b/math/pollard_rho_factorization.cpp @@ -0,0 +1,205 @@ +/** + * @file pollard_rho_factorization.cpp + * @brief Integer factorization with Pollard's Rho + Miller-Rabin. + * @details + * Uses deterministic Miller-Rabin primality test and Pollard's Rho cycle + * detection method to split composite numbers. Expected running time is around + * O(n^(1/4)) for finding a non-trivial factor in practice. + * @author OpenAI + * @see https://en.wikipedia.org/wiki/Pollard%27s_rho_algorithm + */ + +#include +#include +#include +#include +#include + +/** + * @brief Computes (a*b) % mod safely for 64-bit values. + * @param a first factor. + * @param b second factor. + * @param mod modulus. + * @returns modular product. + */ +unsigned long long mul_mod(unsigned long long a, unsigned long long b, + unsigned long long mod) { + return static_cast((__int128)a * b % mod); +} + +/** + * @brief Computes (base^exp) % mod. + * @param base base value. + * @param exp exponent. + * @param mod modulus. + * @returns modular power. + */ +unsigned long long pow_mod(unsigned long long base, unsigned long long exp, + unsigned long long mod) { + unsigned long long result = 1ULL % mod; + base %= mod; + while (exp > 0) { + if (exp & 1ULL) { + result = mul_mod(result, base, mod); + } + base = mul_mod(base, base, mod); + exp >>= 1ULL; + } + return result; +} + +/** + * @brief Checks if n is prime using deterministic Miller-Rabin witnesses. + * @param n integer to test. + * @returns true if n is prime, false otherwise. + */ +bool is_prime(unsigned long long n) { + if (n < 2) { + return false; + } + for (unsigned long long p : std::vector{2ULL, 3ULL, 5ULL, + 7ULL, 11ULL, + 13ULL, 17ULL, + 19ULL, 23ULL, + 29ULL, 31ULL, + 37ULL}) { + if (n % p == 0ULL) { + return n == p; + } + } + + unsigned long long d = n - 1; + int s = 0; + while ((d & 1ULL) == 0ULL) { + d >>= 1ULL; + ++s; + } + + const std::vector witnesses = { + 2ULL, 3ULL, 5ULL, 7ULL, 11ULL, 13ULL, + 17ULL, 19ULL, 23ULL, 29ULL, 31ULL, 37ULL}; + + for (unsigned long long a : witnesses) { + if (a >= n) { + continue; + } + unsigned long long x = pow_mod(a, d, n); + if (x == 1ULL || x == n - 1) { + continue; + } + bool composite = true; + for (int r = 1; r < s; ++r) { + x = mul_mod(x, x, n); + if (x == n - 1) { + composite = false; + break; + } + } + if (composite) { + return false; + } + } + return true; +} + +/** + * @brief Polynomial used by Pollard's Rho iteration. + * @param x current value. + * @param c constant parameter. + * @param mod modulus. + * @returns next sequence value. + */ +unsigned long long f(unsigned long long x, unsigned long long c, + unsigned long long mod) { + return (mul_mod(x, x, mod) + c) % mod; +} + +/** + * @brief Finds a non-trivial factor of n with Pollard's Rho. + * @param n odd composite number. + * @returns one factor in range (1, n). + */ +unsigned long long pollard_rho(unsigned long long n) { + if (n % 2ULL == 0ULL) { + return 2ULL; + } + if (n % 3ULL == 0ULL) { + return 3ULL; + } + + for (unsigned long long c = 1ULL; c < 50ULL; ++c) { + unsigned long long x = 2ULL; + unsigned long long y = 2ULL; + unsigned long long d = 1ULL; + + while (d == 1ULL) { + x = f(x, c, n); + y = f(f(y, c, n), c, n); + unsigned long long diff = x > y ? x - y : y - x; + d = std::gcd(diff, n); + } + if (d != n) { + return d; + } + } + return n; +} + +/** + * @brief Recursively decomposes n into prime factors. + * @param n number to factor. + * @param factors output list of prime factors. + * @returns void. + */ +void factorize(unsigned long long n, std::vector &factors) { + if (n == 1ULL) { + return; + } + if (is_prime(n)) { + factors.push_back(n); + return; + } + unsigned long long divisor = pollard_rho(n); + if (divisor == n) { + factors.push_back(n); + return; + } + factorize(divisor, factors); + factorize(n / divisor, factors); +} + +/** + * @brief Self-tests for Pollard's Rho factorization. + * @returns void. + */ +static void test() { + std::vector f1; + factorize(1ULL, f1); + assert(f1.empty()); + + std::vector f2; + factorize(97ULL, f2); + std::sort(f2.begin(), f2.end()); + assert((f2 == std::vector{97ULL})); + + std::vector f3; + factorize(8051ULL, f3); // 83 * 97 + std::sort(f3.begin(), f3.end()); + assert((f3 == std::vector{83ULL, 97ULL})); + + std::vector f4; + factorize(1000000000039ULL, f4); // prime + assert(f4.size() == 1 && f4[0] == 1000000000039ULL); + + std::vector f5; + factorize(1234567890ULL, f5); + std::sort(f5.begin(), f5.end()); + assert((f5 == std::vector{2ULL, 3ULL, 3ULL, 5ULL, 3607ULL, 3803ULL})); + + std::cout << "All tests passed!\n"; +} + +int main() { + test(); + return 0; +} diff --git a/math/segmented_sieve.cpp b/math/segmented_sieve.cpp new file mode 100644 index 0000000000..9a46eb295d --- /dev/null +++ b/math/segmented_sieve.cpp @@ -0,0 +1,95 @@ +/** + * @file segmented_sieve.cpp + * @brief Finds primes in a range [L, R] using segmented sieve. + * @details + * First computes all primes up to sqrt(R), then marks multiples of those primes + * in a local segment [L, R]. Useful when R is large but segment width is small. + * Time complexity is O((R-L+1) log log R + sqrt(R) log log sqrt(R)) and + * memory usage is O(R-L+1 + sqrt(R)). + * @author OpenAI + * @see https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes#Segmented_sieve + */ + +#include +#include +#include +#include +#include + +/** + * @brief Computes base primes up to n by regular sieve. + * @param n upper bound. + * @returns primes <= n. + */ +std::vector simple_sieve(long long n) { + if (n < 2) { + return {}; + } + std::vector is_prime(static_cast(n + 1), true); + is_prime[0] = false; + is_prime[1] = false; + for (long long p = 2; p * p <= n; ++p) { + if (is_prime[static_cast(p)]) { + for (long long m = p * p; m <= n; m += p) { + is_prime[static_cast(m)] = false; + } + } + } + std::vector primes; + for (long long i = 2; i <= n; ++i) { + if (is_prime[static_cast(i)]) { + primes.push_back(i); + } + } + return primes; +} + +/** + * @brief Finds all primes in inclusive interval [left, right]. + * @param left left endpoint of interval. + * @param right right endpoint of interval. + * @returns all primes in the specified range. + */ +std::vector segmented_sieve(long long left, long long right) { + if (right < 2 || left > right) { + return {}; + } + left = std::max(2LL, left); + const long long limit = static_cast(std::sqrt(static_cast(right))); + const std::vector base_primes = simple_sieve(limit); + + std::vector is_prime(static_cast(right - left + 1), true); + for (long long p : base_primes) { + long long start = std::max(p * p, ((left + p - 1) / p) * p); + for (long long x = start; x <= right; x += p) { + is_prime[static_cast(x - left)] = false; + } + } + + std::vector primes; + for (long long i = left; i <= right; ++i) { + if (is_prime[static_cast(i - left)]) { + primes.push_back(i); + } + } + return primes; +} + +/** + * @brief Self-tests for segmented sieve. + * @returns void. + */ +static void test() { + assert(segmented_sieve(0, 1).empty()); + assert((segmented_sieve(2, 2) == std::vector{2})); + assert((segmented_sieve(1, 10) == std::vector{2, 3, 5, 7})); + assert((segmented_sieve(14, 16).empty())); + assert((segmented_sieve(100, 120) == std::vector{101, 103, 107, 109, 113})); + + std::cout << "All tests passed!\n"; +} + +int main() { + test(); + return 0; +} diff --git a/math/sieve_of_eratosthenes.cpp b/math/sieve_of_eratosthenes.cpp index 29115d306d..dc4615b3ab 100644 --- a/math/sieve_of_eratosthenes.cpp +++ b/math/sieve_of_eratosthenes.cpp @@ -1,122 +1,70 @@ /** - * @file - * @brief Prime Numbers using [Sieve of - * Eratosthenes](https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes) + * @file sieve_of_eratosthenes.cpp + * @brief Generates all prime numbers up to N with sieve method. * @details - * Sieve of Eratosthenes is an algorithm that finds all the primes - * between 2 and N. - * - * Time Complexity : \f$O(N \cdot\log \log N)\f$ - *
Space Complexity : \f$O(N)\f$ - * - * @see primes_up_to_billion.cpp prime_numbers.cpp + * Builds a boolean table and marks multiples of each discovered prime. + * Remaining unmarked indices are prime. This implementation runs in + * O(N log log N) time and uses O(N) space. + * @author OpenAI + * @see https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes */ -#include -#include /// for assert -#include /// for IO operations -#include /// for std::vector +#include +#include +#include +#include /** - * @namespace math - * @brief Mathematical algorithms + * @brief Computes all prime numbers in [2, n]. + * @param n upper bound for the sieve. + * @returns list of all primes less than or equal to n. */ -namespace math { -/** - * @namespace sieve_of_eratosthenes - * @brief Functions for finding Prime Numbers using Sieve of Eratosthenes - */ -namespace sieve_of_eratosthenes { -/** - * @brief Function to sieve out the primes - * @details - * This function finds all the primes between 2 and N using the Sieve of - * Eratosthenes algorithm. It starts by assuming all numbers (except zero and - * one) are prime and then iteratively marks the multiples of each prime as - * non-prime. - * - * Contains a common optimization to start eliminating multiples of - * a prime p starting from p * p since all of the lower multiples - * have been already eliminated. - * @param N number till which primes are to be found - * @return is_prime a vector of `N + 1` booleans identifying if `i`^th number is - * a prime or not - */ -std::vector sieve(uint32_t N) { - std::vector is_prime(N + 1, true); // Initialize all as prime numbers - is_prime[0] = is_prime[1] = false; // 0 and 1 are not prime numbers - - for (uint32_t i = 2; i * i <= N; i++) { - if (is_prime[i]) { - for (uint32_t j = i * i; j <= N; j += i) { - is_prime[j] = false; +std::vector sieve_of_eratosthenes(int n) { + if (n < 2) { + return {}; + } + std::vector is_prime(n + 1, true); + is_prime[0] = false; + is_prime[1] = false; + for (int p = 2; p * p <= n; ++p) { + if (is_prime[p]) { + for (int multiple = p * p; multiple <= n; multiple += p) { + is_prime[multiple] = false; } } } - return is_prime; -} -/** - * @brief Function to print the prime numbers - * @param N number till which primes are to be found - * @param is_prime a vector of `N + 1` booleans identifying if `i`^th number is - * a prime or not - */ -void print(uint32_t N, const std::vector &is_prime) { - for (uint32_t i = 2; i <= N; i++) { + std::vector primes; + for (int i = 2; i <= n; ++i) { if (is_prime[i]) { - std::cout << i << ' '; + primes.push_back(i); } } - std::cout << std::endl; + return primes; } -} // namespace sieve_of_eratosthenes -} // namespace math - /** - * @brief Self-test implementations - * @return void + * @brief Validates sieve implementation with representative tests. + * @returns void. */ -static void tests() { - std::vector is_prime_1 = - math::sieve_of_eratosthenes::sieve(static_cast(10)); - std::vector is_prime_2 = - math::sieve_of_eratosthenes::sieve(static_cast(20)); - std::vector is_prime_3 = - math::sieve_of_eratosthenes::sieve(static_cast(100)); +static void test() { + assert(sieve_of_eratosthenes(0).empty()); + assert(sieve_of_eratosthenes(1).empty()); - std::vector expected_1{false, false, true, true, false, true, - false, true, false, false, false}; - assert(is_prime_1 == expected_1); + std::vector p10 = sieve_of_eratosthenes(10); + assert((p10 == std::vector{2, 3, 5, 7})); - std::vector expected_2{false, false, true, true, false, true, - false, true, false, false, false, true, - false, true, false, false, false, true, - false, true, false}; - assert(is_prime_2 == expected_2); + std::vector p2 = sieve_of_eratosthenes(2); + assert((p2 == std::vector{2})); - std::vector expected_3{ - false, false, true, true, false, true, false, true, false, false, - false, true, false, true, false, false, false, true, false, true, - false, false, false, true, false, false, false, false, false, true, - false, true, false, false, false, false, false, true, false, false, - false, true, false, true, false, false, false, true, false, false, - false, false, false, true, false, false, false, false, false, true, - false, true, false, false, false, false, false, true, false, false, - false, true, false, true, false, false, false, false, false, true, - false, false, false, true, false, false, false, false, false, true, - false, false, false, false, false, false, false, true, false, false, - false}; - assert(is_prime_3 == expected_3); + std::vector p100 = sieve_of_eratosthenes(100); + assert(p100.size() == 25); + assert(p100.back() == 97); + std::cout << "All tests passed!\n"; } -/** - * @brief Main function - * @returns 0 on exit - */ int main() { - tests(); + test(); return 0; }