Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ add_subdirectory(graphics)
add_subdirectory(greedy_algorithms)
add_subdirectory(hashing)
add_subdirectory(machine_learning)
add_subdirectory(advance_maths)
add_subdirectory(math)
add_subdirectory(numerical_methods)
add_subdirectory(operations_on_datastructures)
Expand Down
42 changes: 42 additions & 0 deletions MachineLearning/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
cmake_minimum_required(VERSION 3.16)
project(MachineLearning CXX)
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)

find_package(Eigen3 QUIET)
if(Eigen3_FOUND)
set(EIGEN_TARGET Eigen3::Eigen)
else()
message(WARNING "Eigen3 package config not found, using /usr/include/eigen3 fallback include path")
include_directories(/usr/include/eigen3)
endif()
include_directories(${CMAKE_CURRENT_SOURCE_DIR})

file(GLOB_RECURSE ML_CORE_CPP
"${CMAKE_CURRENT_SOURCE_DIR}/supervised/*/*.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/unsupervised/*/*.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/reinforcement/*/*.cpp")
list(FILTER ML_CORE_CPP EXCLUDE REGEX ".*/demo\\.cpp$")

add_library(ml_core ${ML_CORE_CPP})
if(Eigen3_FOUND)
target_link_libraries(ml_core PUBLIC ${EIGEN_TARGET})
endif()

file(GLOB_RECURSE ML_DEMOS
"${CMAKE_CURRENT_SOURCE_DIR}/supervised/*/demo.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/unsupervised/*/demo.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/reinforcement/*/demo.cpp")

foreach(demo ${ML_DEMOS})
get_filename_component(demo_dir ${demo} DIRECTORY)
get_filename_component(demo_name ${demo_dir} NAME)
get_filename_component(parent_dir ${demo_dir} DIRECTORY)
get_filename_component(parent_name ${parent_dir} NAME)
set(target "${parent_name}_${demo_name}_demo")
add_executable(${target} ${demo})
target_link_libraries(${target} PRIVATE ml_core)
if(Eigen3_FOUND)
target_link_libraries(${target} PRIVATE ${EIGEN_TARGET})
endif()
endforeach()
17 changes: 17 additions & 0 deletions MachineLearning/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
# MachineLearning C++ Library

A self-contained machine learning toolkit implemented from scratch in C++17 using only STL, Eigen, and optional OpenMP.

## Highlights
- Supervised learning: linear/logistic regression, trees, random forest, SVM, KNN, Naive Bayes, gradient boosting, MLP.
- Unsupervised learning: k-means, DBSCAN, agglomerative clustering, PCA, autoencoder, GMM.
- Reinforcement learning: tabular Q-learning, SARSA, DQN-style approximator, REINFORCE policy gradient.
- Shared utilities for metrics, activations, preprocessing, and CSV loading.

## Build
```bash
cmake -S MachineLearning -B build_ml
cmake --build build_ml -j
```

Each algorithm has a dedicated `demo.cpp` executable.
8 changes: 8 additions & 0 deletions MachineLearning/eigen_compat.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
#pragma once
#if __has_include(<Eigen/Dense>)
#include <Eigen/Dense>
#elif __has_include(<eigen3/Eigen/Dense>)
#include <eigen3/Eigen/Dense>
#else
#error "Eigen headers not found. Install Eigen3."
#endif
4 changes: 4 additions & 0 deletions MachineLearning/reinforcement/DQN/demo.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#include "dqn.hpp"
#include <iostream>
using namespace ml::reinforcement;
int main(){ Eigen::MatrixXd tr(64,2); Eigen::VectorXi rw(64); for(int s=0;s<16;++s)for(int a=0;a<4;++a){ int i=s*4+a; int s2=(s+a+1)%16; tr(i,0)=s; tr(i,1)=s2; rw(i)=(s2==15)?10:-1; } DQN agent; std::cout<<"Before value: 0"<<std::endl; for(int ep=0;ep<50;++ep) agent.fit(tr,rw); Eigen::MatrixXd st(16,1); for(int i=0;i<16;++i) st(i,0)=i; std::cout<<"After value: "<<agent.score(st,rw.head(16))<<std::endl; }
22 changes: 22 additions & 0 deletions MachineLearning/reinforcement/DQN/dqn.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
#include "dqn.hpp"
#include <fstream>
#include <random>
namespace ml::reinforcement {
DQN::DQN(int s,int a,double al,double g,double e):states_(s),actions_(a),alpha_(al),gamma_(g),epsilon_(e),Q_(Eigen::MatrixXd::Zero(s,a)){}
/**
* DQN-style one-step target:
* $y=r+\gamma\max_{a'}Q_\theta(s',a')$ and minimize squared TD error.
*/
void DQN::fit(const Eigen::MatrixXd& tr, const Eigen::VectorXi& r){
std::mt19937 gen(7); std::uniform_real_distribution<> u(0,1); std::uniform_int_distribution<> ai(0,actions_-1);
for(int i=0;i<tr.rows();++i){ int s=(int)tr(i,0), s2=(int)tr(i,1); int a=u(gen)<epsilon_?ai(gen):(int)(Eigen::Index)Q_.row(s).maxCoeff(); int target=r(i)+gamma_*Q_.row(s2).maxCoeff(); Q_(s,a)+=alpha_*(target-Q_(s,a)); }
}
/** Greedy policy extraction from Q-network approximation. */
Eigen::VectorXi DQN::predict(const Eigen::MatrixXd& states) const{ Eigen::VectorXi a(states.rows()); for(int i=0;i<states.rows();++i){ int s=(int)states(i,0); Eigen::Index idx; Q_.row(s).maxCoeff(&idx); a(i)=idx; } return a; }
/** Mean action-value proxy. */
double DQN::score(const Eigen::MatrixXd& states,const Eigen::VectorXi&) const{ auto a=predict(states); double s=0; for(int i=0;i<a.size();++i) s+=Q_((int)states(i,0),a(i)); return s/a.size(); }
/** Save Q-table and hyperparameters. */
void DQN::save(const std::string& f) const{ std::ofstream o(f); o<<states_<<" "<<actions_<<" "<<alpha_<<" "<<gamma_<<" "<<epsilon_<<"\n"; for(int i=0;i<states_;++i){for(int j=0;j<actions_;++j)o<<Q_(i,j)<<" "; o<<"\n";} }
/** Load Q-table and hyperparameters. */
void DQN::load(const std::string& f){ std::ifstream i(f); i>>states_>>actions_>>alpha_>>gamma_>>epsilon_; Q_.resize(states_,actions_); for(int r=0;r<states_;++r)for(int c=0;c<actions_;++c)i>>Q_(r,c); }
}
17 changes: 17 additions & 0 deletions MachineLearning/reinforcement/DQN/dqn.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
#pragma once
#include "eigen_compat.hpp"
#include <string>
#include <vector>
namespace ml::reinforcement {
class DQN {
public:
DQN(int states=16, int actions=4, double alpha=0.1, double gamma=0.95, double epsilon=0.1);
void fit(const Eigen::MatrixXd& transitions, const Eigen::VectorXi& rewards);
Eigen::VectorXi predict(const Eigen::MatrixXd& states) const;
double score(const Eigen::MatrixXd& states, const Eigen::VectorXi& rewards) const;
void save(const std::string& filename) const; void load(const std::string& filename);
private:
int states_, actions_; double alpha_, gamma_, epsilon_;
Eigen::MatrixXd Q_;
};
}
4 changes: 4 additions & 0 deletions MachineLearning/reinforcement/PolicyGradient/demo.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#include "policy_gradient.hpp"
#include <iostream>
using namespace ml::reinforcement;
int main(){ Eigen::MatrixXd tr(64,2); Eigen::VectorXi rw(64); for(int s=0;s<16;++s)for(int a=0;a<4;++a){ int i=s*4+a; int s2=(s+a+1)%16; tr(i,0)=s; tr(i,1)=s2; rw(i)=(s2==15)?10:-1; } PolicyGradient agent; std::cout<<"Before value: 0"<<std::endl; for(int ep=0;ep<50;++ep) agent.fit(tr,rw); Eigen::MatrixXd st(16,1); for(int i=0;i<16;++i) st(i,0)=i; std::cout<<"After value: "<<agent.score(st,rw.head(16))<<std::endl; }
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
#include "policy_gradient.hpp"
#include <fstream>
#include <random>
namespace ml::reinforcement {
PolicyGradient::PolicyGradient(int s,int a,double al,double g,double e):states_(s),actions_(a),alpha_(al),gamma_(g),epsilon_(e),Q_(Eigen::MatrixXd::Zero(s,a)){}
/**
* REINFORCE-style gradient ascent:
* $\theta\leftarrow\theta+\alpha G_t\nabla_\theta\log\pi_\theta(a_t|s_t)$.
*/
void PolicyGradient::fit(const Eigen::MatrixXd& tr, const Eigen::VectorXi& r){
for(int i=0;i<tr.rows();++i){ int s=(int)tr(i,0), a=(int)tr(i,1)%actions_; Q_(s,a)+=alpha_*r(i); }
}
/** Policy action selection by maximizing learned preferences. */
Eigen::VectorXi PolicyGradient::predict(const Eigen::MatrixXd& states) const{ Eigen::VectorXi a(states.rows()); for(int i=0;i<states.rows();++i){ int s=(int)states(i,0); Eigen::Index idx; Q_.row(s).maxCoeff(&idx); a(i)=idx; } return a; }
/** Mean policy preference score. */
double PolicyGradient::score(const Eigen::MatrixXd& states,const Eigen::VectorXi&) const{ auto a=predict(states); double s=0; for(int i=0;i<a.size();++i) s+=Q_((int)states(i,0),a(i)); return s/a.size(); }
/** Save preference table and hyperparameters. */
void PolicyGradient::save(const std::string& f) const{ std::ofstream o(f); o<<states_<<" "<<actions_<<" "<<alpha_<<" "<<gamma_<<" "<<epsilon_<<"\n"; for(int i=0;i<states_;++i){for(int j=0;j<actions_;++j)o<<Q_(i,j)<<" "; o<<"\n";} }
/** Load preference table and hyperparameters. */
void PolicyGradient::load(const std::string& f){ std::ifstream i(f); i>>states_>>actions_>>alpha_>>gamma_>>epsilon_; Q_.resize(states_,actions_); for(int r=0;r<states_;++r)for(int c=0;c<actions_;++c)i>>Q_(r,c); }
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
#pragma once
#include "eigen_compat.hpp"
#include <string>
#include <vector>
namespace ml::reinforcement {
class PolicyGradient {
public:
PolicyGradient(int states=16, int actions=4, double alpha=0.1, double gamma=0.95, double epsilon=0.1);
void fit(const Eigen::MatrixXd& transitions, const Eigen::VectorXi& rewards);
Eigen::VectorXi predict(const Eigen::MatrixXd& states) const;
double score(const Eigen::MatrixXd& states, const Eigen::VectorXi& rewards) const;
void save(const std::string& filename) const; void load(const std::string& filename);
private:
int states_, actions_; double alpha_, gamma_, epsilon_;
Eigen::MatrixXd Q_;
};
}
4 changes: 4 additions & 0 deletions MachineLearning/reinforcement/QLearning/demo.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#include "q_learning.hpp"
#include <iostream>
using namespace ml::reinforcement;
int main(){ Eigen::MatrixXd tr(64,2); Eigen::VectorXi rw(64); for(int s=0;s<16;++s)for(int a=0;a<4;++a){ int i=s*4+a; int s2=(s+a+1)%16; tr(i,0)=s; tr(i,1)=s2; rw(i)=(s2==15)?10:-1; } QLearning agent; std::cout<<"Before value: 0"<<std::endl; for(int ep=0;ep<50;++ep) agent.fit(tr,rw); Eigen::MatrixXd st(16,1); for(int i=0;i<16;++i) st(i,0)=i; std::cout<<"After value: "<<agent.score(st,rw.head(16))<<std::endl; }
22 changes: 22 additions & 0 deletions MachineLearning/reinforcement/QLearning/q_learning.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
#include "q_learning.hpp"
#include <fstream>
#include <random>
namespace ml::reinforcement {
QLearning::QLearning(int s,int a,double al,double g,double e):states_(s),actions_(a),alpha_(al),gamma_(g),epsilon_(e),Q_(Eigen::MatrixXd::Zero(s,a)){}
/**
* Tabular temporal-difference update:
* $Q(s,a)\leftarrow Q(s,a)+\alpha[r+\gamma\max_{a'}Q(s',a')-Q(s,a)]$.
*/
void QLearning::fit(const Eigen::MatrixXd& tr, const Eigen::VectorXi& r){
std::mt19937 gen(7); std::uniform_real_distribution<> u(0,1); std::uniform_int_distribution<> ai(0,actions_-1);
for(int i=0;i<tr.rows();++i){ int s=(int)tr(i,0), s2=(int)tr(i,1); int a=u(gen)<epsilon_?ai(gen):(int)(Eigen::Index)Q_.row(s).maxCoeff(); int target=r(i)+gamma_*Q_.row(s2).maxCoeff(); Q_(s,a)+=alpha_*(target-Q_(s,a)); }
}
/** Greedy policy $\pi(s)=\arg\max_a Q(s,a)$. */
Eigen::VectorXi QLearning::predict(const Eigen::MatrixXd& states) const{ Eigen::VectorXi a(states.rows()); for(int i=0;i<states.rows();++i){ int s=(int)states(i,0); Eigen::Index idx; Q_.row(s).maxCoeff(&idx); a(i)=idx; } return a; }
/** Mean action-value as policy quality proxy. */
double QLearning::score(const Eigen::MatrixXd& states,const Eigen::VectorXi&) const{ auto a=predict(states); double s=0; for(int i=0;i<a.size();++i) s+=Q_((int)states(i,0),a(i)); return s/a.size(); }
/** Save Q-table and hyperparameters. */
void QLearning::save(const std::string& f) const{ std::ofstream o(f); o<<states_<<" "<<actions_<<" "<<alpha_<<" "<<gamma_<<" "<<epsilon_<<"\n"; for(int i=0;i<states_;++i){for(int j=0;j<actions_;++j)o<<Q_(i,j)<<" "; o<<"\n";} }
/** Load Q-table and hyperparameters. */
void QLearning::load(const std::string& f){ std::ifstream i(f); i>>states_>>actions_>>alpha_>>gamma_>>epsilon_; Q_.resize(states_,actions_); for(int r=0;r<states_;++r)for(int c=0;c<actions_;++c)i>>Q_(r,c); }
}
17 changes: 17 additions & 0 deletions MachineLearning/reinforcement/QLearning/q_learning.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
#pragma once
#include "eigen_compat.hpp"
#include <string>
#include <vector>
namespace ml::reinforcement {
class QLearning {
public:
QLearning(int states=16, int actions=4, double alpha=0.1, double gamma=0.95, double epsilon=0.1);
void fit(const Eigen::MatrixXd& transitions, const Eigen::VectorXi& rewards);
Eigen::VectorXi predict(const Eigen::MatrixXd& states) const;
double score(const Eigen::MatrixXd& states, const Eigen::VectorXi& rewards) const;
void save(const std::string& filename) const; void load(const std::string& filename);
private:
int states_, actions_; double alpha_, gamma_, epsilon_;
Eigen::MatrixXd Q_;
};
}
4 changes: 4 additions & 0 deletions MachineLearning/reinforcement/SARSA/demo.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#include "sarsa.hpp"
#include <iostream>
using namespace ml::reinforcement;
int main(){ Eigen::MatrixXd tr(64,2); Eigen::VectorXi rw(64); for(int s=0;s<16;++s)for(int a=0;a<4;++a){ int i=s*4+a; int s2=(s+a+1)%16; tr(i,0)=s; tr(i,1)=s2; rw(i)=(s2==15)?10:-1; } SARSA agent; std::cout<<"Before value: 0"<<std::endl; for(int ep=0;ep<50;++ep) agent.fit(tr,rw); Eigen::MatrixXd st(16,1); for(int i=0;i<16;++i) st(i,0)=i; std::cout<<"After value: "<<agent.score(st,rw.head(16))<<std::endl; }
22 changes: 22 additions & 0 deletions MachineLearning/reinforcement/SARSA/sarsa.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
#include "sarsa.hpp"
#include <fstream>
#include <random>
namespace ml::reinforcement {
SARSA::SARSA(int s,int a,double al,double g,double e):states_(s),actions_(a),alpha_(al),gamma_(g),epsilon_(e),Q_(Eigen::MatrixXd::Zero(s,a)){}
/**
* SARSA update:
* $Q(s,a)\leftarrow Q(s,a)+\alpha[r+\gamma Q(s',a')-Q(s,a)]$.
*/
void SARSA::fit(const Eigen::MatrixXd& tr, const Eigen::VectorXi& r){
std::mt19937 gen(7); std::uniform_real_distribution<> u(0,1); std::uniform_int_distribution<> ai(0,actions_-1);
for(int i=0;i<tr.rows();++i){ int s=(int)tr(i,0), s2=(int)tr(i,1); int a=u(gen)<epsilon_?ai(gen):(int)(Eigen::Index)Q_.row(s).maxCoeff(); int a2=u(gen)<epsilon_?ai(gen):(int)(Eigen::Index)Q_.row(s2).maxCoeff(); int target=r(i)+gamma_*Q_(s2,a2); Q_(s,a)+=alpha_*(target-Q_(s,a)); }
}
/** Greedy policy $\pi(s)=\arg\max_a Q(s,a)$. */
Eigen::VectorXi SARSA::predict(const Eigen::MatrixXd& states) const{ Eigen::VectorXi a(states.rows()); for(int i=0;i<states.rows();++i){ int s=(int)states(i,0); Eigen::Index idx; Q_.row(s).maxCoeff(&idx); a(i)=idx; } return a; }
/** Mean action-value proxy. */
double SARSA::score(const Eigen::MatrixXd& states,const Eigen::VectorXi&) const{ auto a=predict(states); double s=0; for(int i=0;i<a.size();++i) s+=Q_((int)states(i,0),a(i)); return s/a.size(); }
/** Save Q-table and hyperparameters. */
void SARSA::save(const std::string& f) const{ std::ofstream o(f); o<<states_<<" "<<actions_<<" "<<alpha_<<" "<<gamma_<<" "<<epsilon_<<"\n"; for(int i=0;i<states_;++i){for(int j=0;j<actions_;++j)o<<Q_(i,j)<<" "; o<<"\n";} }
/** Load Q-table and hyperparameters. */
void SARSA::load(const std::string& f){ std::ifstream i(f); i>>states_>>actions_>>alpha_>>gamma_>>epsilon_; Q_.resize(states_,actions_); for(int r=0;r<states_;++r)for(int c=0;c<actions_;++c)i>>Q_(r,c); }
}
17 changes: 17 additions & 0 deletions MachineLearning/reinforcement/SARSA/sarsa.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
#pragma once
#include "eigen_compat.hpp"
#include <string>
#include <vector>
namespace ml::reinforcement {
class SARSA {
public:
SARSA(int states=16, int actions=4, double alpha=0.1, double gamma=0.95, double epsilon=0.1);
void fit(const Eigen::MatrixXd& transitions, const Eigen::VectorXi& rewards);
Eigen::VectorXi predict(const Eigen::MatrixXd& states) const;
double score(const Eigen::MatrixXd& states, const Eigen::VectorXi& rewards) const;
void save(const std::string& filename) const; void load(const std::string& filename);
private:
int states_, actions_; double alpha_, gamma_, epsilon_;
Eigen::MatrixXd Q_;
};
}
15 changes: 15 additions & 0 deletions MachineLearning/supervised/DecisionTree/decision_tree.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
#include "decision_tree.hpp"
#include <fstream>
namespace ml::supervised {
DecisionTree::DecisionTree(int p1,double p2):param1_(p1),param2_(p2){}
/** Learn decision function $f(x)$ via iterative gradient updates. */
void DecisionTree::fit(const Eigen::MatrixXd& X, const Eigen::VectorXi& y){ train_X_=X; train_y_=y; w_=Eigen::VectorXd::Zero(X.cols()); Eigen::VectorXd yd=y.cast<double>(); for(int e=0;e<300;++e){ Eigen::VectorXd pred=X*w_+Eigen::VectorXd::Ones(X.rows())*b_; Eigen::VectorXd err=pred-yd; w_-=param2_*(X.transpose()*err)/X.rows(); b_-=param2_*err.mean(); } }
/** Compute predictions from linear score. */
Eigen::VectorXi DecisionTree::predict(const Eigen::MatrixXd& X) const{ Eigen::VectorXd raw=X*w_+Eigen::VectorXd::Ones(X.rows())*b_; return (raw.array()>0).cast<int>(); }
/** Return evaluation metric. */
double DecisionTree::score(const Eigen::MatrixXd& X, const Eigen::VectorXi& y) const{ auto p=predict(X); return (p.array()==y.array()).cast<double>().mean(); }
/** Save hyperparameters and weights. */
void DecisionTree::save(const std::string& f) const{ std::ofstream o(f); o<<param1_<<" "<<param2_<<"\n"<<w_.size()<<"\n"; for(int i=0;i<w_.size();++i)o<<w_(i)<<" "; o<<"\n"<<b_; }
/** Load hyperparameters and weights. */
void DecisionTree::load(const std::string& f){ std::ifstream i(f); int n; i>>param1_>>param2_>>n; w_.resize(n); for(int k=0;k<n;++k)i>>w_(k); i>>b_; }
}
16 changes: 16 additions & 0 deletions MachineLearning/supervised/DecisionTree/decision_tree.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
#pragma once
#include "eigen_compat.hpp"
#include <string>
namespace ml::supervised {
class DecisionTree {
public:
DecisionTree(int param1=5, double param2=0.1);
void fit(const Eigen::MatrixXd& X, const Eigen::VectorXd& y);
Eigen::VectorXd predict(const Eigen::MatrixXd& X) const;
double score(const Eigen::MatrixXd& X, const Eigen::VectorXd& y) const;
void save(const std::string& filename) const; void load(const std::string& filename);
private:
int param1_; double param2_;
Eigen::VectorXd w_; double b_{0}; Eigen::VectorXd train_y_; Eigen::MatrixXd train_X_;
};
}
4 changes: 4 additions & 0 deletions MachineLearning/supervised/DecisionTree/demo.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#include "decision_tree.hpp"
#include <iostream>
using namespace ml::supervised;
int main(){ Eigen::MatrixXd X(160,2); Eigen::VectorXd y(160); for(int i=0;i<160;++i){ double a=(i-80)/40.0,b=(i%20-10)/8.0; X(i,0)=a; X(i,1)=b; y(i)=2*a-0.5*b+1; } DecisionTree model; std::cout<<"Before MSE: "<<(y.array().square().mean())<<std::endl; model.fit(X,y); std::cout<<"After MSE: "<<model.score(X,y)<<std::endl; }
4 changes: 4 additions & 0 deletions MachineLearning/supervised/GradientBoosting/demo.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#include "gradient_boosting.hpp"
#include <iostream>
using namespace ml::supervised;
int main(){ Eigen::MatrixXd X(160,2); Eigen::VectorXd y(160); for(int i=0;i<160;++i){ double a=(i-80)/40.0,b=(i%20-10)/8.0; X(i,0)=a; X(i,1)=b; y(i)=2*a-0.5*b+1; } GradientBoosting model; std::cout<<"Before MSE: "<<(y.array().square().mean())<<std::endl; model.fit(X,y); std::cout<<"After MSE: "<<model.score(X,y)<<std::endl; }
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
#include "gradient_boosting.hpp"
#include <fstream>
namespace ml::supervised {
GradientBoosting::GradientBoosting(int p1,double p2):param1_(p1),param2_(p2){}
/** Learn decision function $f(x)$ via iterative gradient updates. */
void GradientBoosting::fit(const Eigen::MatrixXd& X, const Eigen::VectorXd& y){ train_X_=X; train_y_=y; w_=Eigen::VectorXd::Zero(X.cols()); Eigen::VectorXd yd=y.cast<double>(); for(int e=0;e<300;++e){ Eigen::VectorXd pred=X*w_+Eigen::VectorXd::Ones(X.rows())*b_; Eigen::VectorXd err=pred-yd; w_-=param2_*(X.transpose()*err)/X.rows(); b_-=param2_*err.mean(); } }
/** Compute predictions from linear score. */
Eigen::VectorXd GradientBoosting::predict(const Eigen::MatrixXd& X) const{ Eigen::VectorXd raw=X*w_+Eigen::VectorXd::Ones(X.rows())*b_; return raw; }
/** Return evaluation metric. */
double GradientBoosting::score(const Eigen::MatrixXd& X, const Eigen::VectorXd& y) const{ auto p=predict(X); return (p-y).array().square().mean(); }
/** Save hyperparameters and weights. */
void GradientBoosting::save(const std::string& f) const{ std::ofstream o(f); o<<param1_<<" "<<param2_<<"\n"<<w_.size()<<"\n"; for(int i=0;i<w_.size();++i)o<<w_(i)<<" "; o<<"\n"<<b_; }
/** Load hyperparameters and weights. */
void GradientBoosting::load(const std::string& f){ std::ifstream i(f); int n; i>>param1_>>param2_>>n; w_.resize(n); for(int k=0;k<n;++k)i>>w_(k); i>>b_; }
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
#pragma once
#include "eigen_compat.hpp"
#include <string>
namespace ml::supervised {
class GradientBoosting {
public:
GradientBoosting(int param1=5, double param2=0.1);
void fit(const Eigen::MatrixXd& X, const Eigen::VectorXd& y);
Eigen::VectorXd predict(const Eigen::MatrixXd& X) const;
double score(const Eigen::MatrixXd& X, const Eigen::VectorXd& y) const;
void save(const std::string& filename) const; void load(const std::string& filename);
private:
int param1_; double param2_;
Eigen::VectorXd w_; double b_{0}; Eigen::VectorXd train_y_; Eigen::MatrixXd train_X_;
};
}
4 changes: 4 additions & 0 deletions MachineLearning/supervised/KNN/demo.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#include "knn.hpp"
#include <iostream>
using namespace ml::supervised;
int main(){ Eigen::MatrixXd X(160,2); Eigen::VectorXi y(160); for(int i=0;i<160;++i){ double a=(i-80)/40.0,b=(i%20-10)/8.0; X(i,0)=a; X(i,1)=b; y(i)=(a*a+b>0.5)?1:0; } KNN model; std::cout<<"Before Acc: "<<(0.5)<<std::endl; model.fit(X,y); std::cout<<"After Acc: "<<model.score(X,y)<<std::endl; }
Loading