|
| 1 | +"""Experiments page code snippets for documentation. |
| 2 | +
|
| 3 | +This snippet file contains examples from the experiments.rst page covering |
| 4 | +custom objectives, built-in experiments, and benchmarks. |
| 5 | +""" |
| 6 | + |
| 7 | +import numpy as np |
| 8 | + |
| 9 | +# [start:simple_objective] |
| 10 | +def objective(params): |
| 11 | + x = params["x"] |
| 12 | + y = params["y"] |
| 13 | + # Hyperactive MAXIMIZES this score |
| 14 | + return -(x**2 + y**2) |
| 15 | +# [end:simple_objective] |
| 16 | + |
| 17 | + |
| 18 | +# [start:ackley_function] |
| 19 | +import numpy as np |
| 20 | +from hyperactive.opt.gfo import BayesianOptimizer |
| 21 | + |
| 22 | +# Ackley function (a common benchmark) |
| 23 | +def ackley(params): |
| 24 | + x = params["x"] |
| 25 | + y = params["y"] |
| 26 | + |
| 27 | + term1 = -20 * np.exp(-0.2 * np.sqrt(0.5 * (x**2 + y**2))) |
| 28 | + term2 = -np.exp(0.5 * (np.cos(2 * np.pi * x) + np.cos(2 * np.pi * y))) |
| 29 | + result = term1 + term2 + np.e + 20 |
| 30 | + |
| 31 | + return -result # Negate to maximize (minimize the Ackley function) |
| 32 | + |
| 33 | +search_space = { |
| 34 | + "x": np.linspace(-5, 5, 100), |
| 35 | + "y": np.linspace(-5, 5, 100), |
| 36 | +} |
| 37 | + |
| 38 | +optimizer = BayesianOptimizer( |
| 39 | + search_space=search_space, |
| 40 | + n_iter=50, |
| 41 | + experiment=ackley, |
| 42 | +) |
| 43 | +best_params = optimizer.solve() |
| 44 | +# [end:ackley_function] |
| 45 | + |
| 46 | + |
| 47 | +# [start:external_simulation] |
| 48 | +import subprocess |
| 49 | + |
| 50 | +def run_simulation(params): |
| 51 | + # Run an external simulation with the given parameters |
| 52 | + result = subprocess.run( |
| 53 | + ["./my_simulation", str(params["param1"]), str(params["param2"])], |
| 54 | + capture_output=True, |
| 55 | + text=True, |
| 56 | + ) |
| 57 | + # Parse the output and return the score |
| 58 | + score = float(result.stdout.strip()) |
| 59 | + return score |
| 60 | +# [end:external_simulation] |
| 61 | + |
| 62 | + |
| 63 | +# [start:sklearn_cv_experiment] |
| 64 | +from sklearn.ensemble import RandomForestClassifier |
| 65 | +from sklearn.datasets import load_iris |
| 66 | +from sklearn.model_selection import KFold |
| 67 | +from sklearn.metrics import accuracy_score |
| 68 | +from hyperactive.experiment.integrations import SklearnCvExperiment |
| 69 | +from hyperactive.opt.gfo import HillClimbing |
| 70 | + |
| 71 | +X, y = load_iris(return_X_y=True) |
| 72 | + |
| 73 | +experiment = SklearnCvExperiment( |
| 74 | + estimator=RandomForestClassifier(random_state=42), |
| 75 | + X=X, |
| 76 | + y=y, |
| 77 | + cv=KFold(n_splits=5, shuffle=True, random_state=42), |
| 78 | + scoring=accuracy_score, # Optional: defaults to estimator's score method |
| 79 | +) |
| 80 | + |
| 81 | +search_space = { |
| 82 | + "n_estimators": list(range(10, 200, 10)), |
| 83 | + "max_depth": list(range(1, 20)), |
| 84 | + "min_samples_split": list(range(2, 10)), |
| 85 | +} |
| 86 | + |
| 87 | +optimizer = HillClimbing( |
| 88 | + search_space=search_space, |
| 89 | + n_iter=30, |
| 90 | + experiment=experiment, |
| 91 | +) |
| 92 | +best_params = optimizer.solve() |
| 93 | +# [end:sklearn_cv_experiment] |
| 94 | + |
| 95 | + |
| 96 | +# [start:sktime_forecasting] |
| 97 | +from sktime.forecasting.naive import NaiveForecaster |
| 98 | +from sktime.datasets import load_airline |
| 99 | +from hyperactive.experiment.integrations import SktimeForecastingExperiment |
| 100 | +from hyperactive.opt.gfo import RandomSearch |
| 101 | + |
| 102 | +y = load_airline() |
| 103 | + |
| 104 | +experiment = SktimeForecastingExperiment( |
| 105 | + estimator=NaiveForecaster(), |
| 106 | + y=y, |
| 107 | + fh=[1, 2, 3], # Forecast horizon |
| 108 | +) |
| 109 | + |
| 110 | +search_space = { |
| 111 | + "strategy": ["mean", "last", "drift"], |
| 112 | +} |
| 113 | + |
| 114 | +optimizer = RandomSearch( |
| 115 | + search_space=search_space, |
| 116 | + n_iter=10, |
| 117 | + experiment=experiment, |
| 118 | +) |
| 119 | +best_params = optimizer.solve() |
| 120 | +# [end:sktime_forecasting] |
| 121 | + |
| 122 | + |
| 123 | +# [start:torch_experiment] |
| 124 | +from hyperactive.experiment.integrations import TorchExperiment |
| 125 | + |
| 126 | +experiment = TorchExperiment( |
| 127 | + model_class=MyLightningModel, |
| 128 | + datamodule=my_datamodule, |
| 129 | + trainer_kwargs={"max_epochs": 10}, |
| 130 | +) |
| 131 | +# [end:torch_experiment] |
| 132 | + |
| 133 | + |
| 134 | +# [start:benchmark_experiments] |
| 135 | +from hyperactive.experiment.bench import Ackley, Sphere, Parabola |
| 136 | + |
| 137 | +# Use benchmark as experiment |
| 138 | +ackley = Ackley(dim=2) |
| 139 | + |
| 140 | +optimizer = BayesianOptimizer( |
| 141 | + search_space=ackley.search_space, |
| 142 | + n_iter=50, |
| 143 | + experiment=ackley, |
| 144 | +) |
| 145 | +# [end:benchmark_experiments] |
| 146 | + |
| 147 | + |
| 148 | +# [start:score_method] |
| 149 | +from hyperactive.experiment.integrations import SklearnCvExperiment |
| 150 | + |
| 151 | +experiment = SklearnCvExperiment( |
| 152 | + estimator=RandomForestClassifier(), |
| 153 | + X=X, y=y, cv=5, |
| 154 | +) |
| 155 | + |
| 156 | +# Evaluate specific parameters |
| 157 | +params = {"n_estimators": 100, "max_depth": 10} |
| 158 | +score, additional_info = experiment.score(params) |
| 159 | + |
| 160 | +print(f"Score: {score}") |
| 161 | +print(f"Additional info: {additional_info}") |
| 162 | +# [end:score_method] |
| 163 | + |
| 164 | + |
| 165 | +# [start:robust_objective] |
| 166 | +def robust_objective(params): |
| 167 | + try: |
| 168 | + score = compute_score(params) |
| 169 | + return score |
| 170 | + except Exception: |
| 171 | + return -np.inf # Return bad score on failure |
| 172 | +# [end:robust_objective] |
| 173 | + |
| 174 | + |
| 175 | +# --- Runnable test code below --- |
| 176 | +if __name__ == "__main__": |
| 177 | + # Test simple objective |
| 178 | + params = {"x": 0.0, "y": 0.0} |
| 179 | + score = objective(params) |
| 180 | + assert score == 0.0, f"Expected 0.0, got {score}" |
| 181 | + |
| 182 | + # Test Ackley function |
| 183 | + params = {"x": 0.0, "y": 0.0} |
| 184 | + ackley_score = ackley(params) |
| 185 | + # Ackley minimum is at (0,0) with value 0 |
| 186 | + assert abs(ackley_score) < 0.01, f"Expected ~0, got {ackley_score}" |
| 187 | + |
| 188 | + # Test sklearn CV experiment |
| 189 | + from sklearn.ensemble import RandomForestClassifier |
| 190 | + from sklearn.datasets import load_iris |
| 191 | + from hyperactive.experiment.integrations import SklearnCvExperiment |
| 192 | + from hyperactive.opt.gfo import HillClimbing |
| 193 | + |
| 194 | + X, y = load_iris(return_X_y=True) |
| 195 | + experiment = SklearnCvExperiment( |
| 196 | + estimator=RandomForestClassifier(random_state=42), |
| 197 | + X=X, |
| 198 | + y=y, |
| 199 | + cv=3, |
| 200 | + ) |
| 201 | + |
| 202 | + search_space = { |
| 203 | + "n_estimators": [10, 50, 100], |
| 204 | + "max_depth": [3, 5, 10], |
| 205 | + } |
| 206 | + |
| 207 | + optimizer = HillClimbing( |
| 208 | + search_space=search_space, |
| 209 | + n_iter=5, |
| 210 | + experiment=experiment, |
| 211 | + random_state=42, |
| 212 | + ) |
| 213 | + best_params = optimizer.solve() |
| 214 | + assert "n_estimators" in best_params |
| 215 | + assert "max_depth" in best_params |
| 216 | + |
| 217 | + print("Experiments snippets passed!") |
0 commit comments