Skip to content

Commit f41aafa

Browse files
committed
[DOC] add more python examples via 'literalinclude' + add tests
1 parent ae04285 commit f41aafa

14 files changed

Lines changed: 1412 additions & 748 deletions
Lines changed: 76 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,76 @@
1+
"""Advanced examples for the examples.rst page.
2+
3+
This snippet file contains runnable examples demonstrating Hyperactive's
4+
advanced functionality like warm starting and optimizer comparison.
5+
"""
6+
7+
import numpy as np
8+
from sklearn.datasets import load_wine
9+
from sklearn.ensemble import RandomForestClassifier
10+
from hyperactive.experiment.integrations import SklearnCvExperiment
11+
from hyperactive.opt.gfo import HillClimbing
12+
13+
# Setup common fixtures for examples
14+
X, y = load_wine(return_X_y=True)
15+
experiment = SklearnCvExperiment(
16+
estimator=RandomForestClassifier(random_state=42),
17+
X=X, y=y, cv=3,
18+
)
19+
search_space = {
20+
"n_estimators": list(range(10, 101, 10)),
21+
"max_depth": list(range(1, 11)),
22+
"min_samples_split": list(range(2, 11)),
23+
}
24+
25+
26+
# [start:warm_starting]
27+
from hyperactive.opt.gfo import HillClimbing
28+
29+
# Previous best parameters
30+
warm_start_points = [
31+
{"n_estimators": 100, "max_depth": 10, "min_samples_split": 5},
32+
]
33+
34+
optimizer = HillClimbing(
35+
search_space=search_space,
36+
n_iter=40,
37+
experiment=experiment,
38+
initialize={"warm_start": warm_start_points},
39+
)
40+
best_params = optimizer.solve()
41+
# [end:warm_starting]
42+
43+
44+
# [start:comparing_optimizers]
45+
from hyperactive.opt.gfo import (
46+
HillClimbing,
47+
RandomSearch,
48+
BayesianOptimizer,
49+
ParticleSwarmOptimizer,
50+
)
51+
52+
optimizers = {
53+
"HillClimbing": HillClimbing,
54+
"RandomSearch": RandomSearch,
55+
"Bayesian": BayesianOptimizer,
56+
"ParticleSwarm": ParticleSwarmOptimizer,
57+
}
58+
59+
results = {}
60+
for name, OptClass in optimizers.items():
61+
optimizer = OptClass(
62+
search_space=search_space,
63+
n_iter=50,
64+
experiment=experiment,
65+
random_state=42,
66+
)
67+
best = optimizer.solve()
68+
score, _ = experiment.score(best)
69+
results[name] = {"params": best, "score": score}
70+
print(f"{name}: score={score:.4f}")
71+
# [end:comparing_optimizers]
72+
73+
74+
if __name__ == "__main__":
75+
print("Advanced examples passed!")
76+
print(f"Best optimizer results: {results}")
Lines changed: 71 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,71 @@
1+
"""Basic examples for the examples.rst page.
2+
3+
This snippet file contains runnable examples demonstrating Hyperactive's
4+
basic functionality including custom function and sklearn optimization.
5+
"""
6+
7+
import numpy as np
8+
9+
# [start:custom_function]
10+
import numpy as np
11+
from hyperactive.opt.gfo import HillClimbing
12+
13+
14+
def objective(params):
15+
x = params["x"]
16+
y = params["y"]
17+
return -(x**2 + y**2) # Maximize (minimize the parabola)
18+
19+
20+
search_space = {
21+
"x": np.arange(-5, 5, 0.1),
22+
"y": np.arange(-5, 5, 0.1),
23+
}
24+
25+
optimizer = HillClimbing(
26+
search_space=search_space,
27+
n_iter=100,
28+
experiment=objective,
29+
)
30+
best_params = optimizer.solve()
31+
print(f"Best parameters: {best_params}")
32+
# [end:custom_function]
33+
34+
35+
# [start:sklearn_tuning]
36+
from sklearn.datasets import load_wine
37+
from sklearn.ensemble import RandomForestClassifier
38+
from hyperactive.experiment.integrations import SklearnCvExperiment
39+
from hyperactive.opt.gfo import HillClimbing
40+
41+
# Load data
42+
X, y = load_wine(return_X_y=True)
43+
44+
# Create experiment
45+
experiment = SklearnCvExperiment(
46+
estimator=RandomForestClassifier(random_state=42),
47+
X=X, y=y, cv=3,
48+
)
49+
50+
# Define search space
51+
search_space = {
52+
"n_estimators": list(range(10, 201)),
53+
"max_depth": list(range(1, 21)),
54+
"min_samples_split": list(range(2, 21)),
55+
"min_samples_leaf": list(range(1, 11)),
56+
}
57+
58+
# Optimize
59+
optimizer = HillClimbing(
60+
search_space=search_space,
61+
n_iter=40,
62+
random_state=42,
63+
experiment=experiment,
64+
)
65+
best_params = optimizer.solve()
66+
# [end:sklearn_tuning]
67+
68+
69+
if __name__ == "__main__":
70+
print("Basic examples passed!")
71+
print(f"Custom function best: {best_params}")
Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
"""Installation verification snippet.
2+
3+
This snippet demonstrates how to verify Hyperactive installation.
4+
"""
5+
6+
# [start:verify_installation]
7+
import hyperactive
8+
print(f"Hyperactive version: {hyperactive.__version__}")
9+
10+
# Quick test
11+
import numpy as np
12+
from hyperactive.opt.gfo import HillClimbing
13+
14+
15+
def objective(params):
16+
return -(params["x"] ** 2)
17+
18+
19+
optimizer = HillClimbing(
20+
search_space={"x": np.arange(-5, 5, 0.1)},
21+
n_iter=10,
22+
experiment=objective,
23+
)
24+
best = optimizer.solve()
25+
print(f"Test optimization successful: {best}")
26+
# [end:verify_installation]
27+
28+
29+
if __name__ == "__main__":
30+
print("Installation verification passed!")
Lines changed: 217 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,217 @@
1+
"""Experiments page code snippets for documentation.
2+
3+
This snippet file contains examples from the experiments.rst page covering
4+
custom objectives, built-in experiments, and benchmarks.
5+
"""
6+
7+
import numpy as np
8+
9+
# [start:simple_objective]
10+
def objective(params):
11+
x = params["x"]
12+
y = params["y"]
13+
# Hyperactive MAXIMIZES this score
14+
return -(x**2 + y**2)
15+
# [end:simple_objective]
16+
17+
18+
# [start:ackley_function]
19+
import numpy as np
20+
from hyperactive.opt.gfo import BayesianOptimizer
21+
22+
# Ackley function (a common benchmark)
23+
def ackley(params):
24+
x = params["x"]
25+
y = params["y"]
26+
27+
term1 = -20 * np.exp(-0.2 * np.sqrt(0.5 * (x**2 + y**2)))
28+
term2 = -np.exp(0.5 * (np.cos(2 * np.pi * x) + np.cos(2 * np.pi * y)))
29+
result = term1 + term2 + np.e + 20
30+
31+
return -result # Negate to maximize (minimize the Ackley function)
32+
33+
search_space = {
34+
"x": np.linspace(-5, 5, 100),
35+
"y": np.linspace(-5, 5, 100),
36+
}
37+
38+
optimizer = BayesianOptimizer(
39+
search_space=search_space,
40+
n_iter=50,
41+
experiment=ackley,
42+
)
43+
best_params = optimizer.solve()
44+
# [end:ackley_function]
45+
46+
47+
# [start:external_simulation]
48+
import subprocess
49+
50+
def run_simulation(params):
51+
# Run an external simulation with the given parameters
52+
result = subprocess.run(
53+
["./my_simulation", str(params["param1"]), str(params["param2"])],
54+
capture_output=True,
55+
text=True,
56+
)
57+
# Parse the output and return the score
58+
score = float(result.stdout.strip())
59+
return score
60+
# [end:external_simulation]
61+
62+
63+
# [start:sklearn_cv_experiment]
64+
from sklearn.ensemble import RandomForestClassifier
65+
from sklearn.datasets import load_iris
66+
from sklearn.model_selection import KFold
67+
from sklearn.metrics import accuracy_score
68+
from hyperactive.experiment.integrations import SklearnCvExperiment
69+
from hyperactive.opt.gfo import HillClimbing
70+
71+
X, y = load_iris(return_X_y=True)
72+
73+
experiment = SklearnCvExperiment(
74+
estimator=RandomForestClassifier(random_state=42),
75+
X=X,
76+
y=y,
77+
cv=KFold(n_splits=5, shuffle=True, random_state=42),
78+
scoring=accuracy_score, # Optional: defaults to estimator's score method
79+
)
80+
81+
search_space = {
82+
"n_estimators": list(range(10, 200, 10)),
83+
"max_depth": list(range(1, 20)),
84+
"min_samples_split": list(range(2, 10)),
85+
}
86+
87+
optimizer = HillClimbing(
88+
search_space=search_space,
89+
n_iter=30,
90+
experiment=experiment,
91+
)
92+
best_params = optimizer.solve()
93+
# [end:sklearn_cv_experiment]
94+
95+
96+
# [start:sktime_forecasting]
97+
from sktime.forecasting.naive import NaiveForecaster
98+
from sktime.datasets import load_airline
99+
from hyperactive.experiment.integrations import SktimeForecastingExperiment
100+
from hyperactive.opt.gfo import RandomSearch
101+
102+
y = load_airline()
103+
104+
experiment = SktimeForecastingExperiment(
105+
estimator=NaiveForecaster(),
106+
y=y,
107+
fh=[1, 2, 3], # Forecast horizon
108+
)
109+
110+
search_space = {
111+
"strategy": ["mean", "last", "drift"],
112+
}
113+
114+
optimizer = RandomSearch(
115+
search_space=search_space,
116+
n_iter=10,
117+
experiment=experiment,
118+
)
119+
best_params = optimizer.solve()
120+
# [end:sktime_forecasting]
121+
122+
123+
# [start:torch_experiment]
124+
from hyperactive.experiment.integrations import TorchExperiment
125+
126+
experiment = TorchExperiment(
127+
model_class=MyLightningModel,
128+
datamodule=my_datamodule,
129+
trainer_kwargs={"max_epochs": 10},
130+
)
131+
# [end:torch_experiment]
132+
133+
134+
# [start:benchmark_experiments]
135+
from hyperactive.experiment.bench import Ackley, Sphere, Parabola
136+
137+
# Use benchmark as experiment
138+
ackley = Ackley(dim=2)
139+
140+
optimizer = BayesianOptimizer(
141+
search_space=ackley.search_space,
142+
n_iter=50,
143+
experiment=ackley,
144+
)
145+
# [end:benchmark_experiments]
146+
147+
148+
# [start:score_method]
149+
from hyperactive.experiment.integrations import SklearnCvExperiment
150+
151+
experiment = SklearnCvExperiment(
152+
estimator=RandomForestClassifier(),
153+
X=X, y=y, cv=5,
154+
)
155+
156+
# Evaluate specific parameters
157+
params = {"n_estimators": 100, "max_depth": 10}
158+
score, additional_info = experiment.score(params)
159+
160+
print(f"Score: {score}")
161+
print(f"Additional info: {additional_info}")
162+
# [end:score_method]
163+
164+
165+
# [start:robust_objective]
166+
def robust_objective(params):
167+
try:
168+
score = compute_score(params)
169+
return score
170+
except Exception:
171+
return -np.inf # Return bad score on failure
172+
# [end:robust_objective]
173+
174+
175+
# --- Runnable test code below ---
176+
if __name__ == "__main__":
177+
# Test simple objective
178+
params = {"x": 0.0, "y": 0.0}
179+
score = objective(params)
180+
assert score == 0.0, f"Expected 0.0, got {score}"
181+
182+
# Test Ackley function
183+
params = {"x": 0.0, "y": 0.0}
184+
ackley_score = ackley(params)
185+
# Ackley minimum is at (0,0) with value 0
186+
assert abs(ackley_score) < 0.01, f"Expected ~0, got {ackley_score}"
187+
188+
# Test sklearn CV experiment
189+
from sklearn.ensemble import RandomForestClassifier
190+
from sklearn.datasets import load_iris
191+
from hyperactive.experiment.integrations import SklearnCvExperiment
192+
from hyperactive.opt.gfo import HillClimbing
193+
194+
X, y = load_iris(return_X_y=True)
195+
experiment = SklearnCvExperiment(
196+
estimator=RandomForestClassifier(random_state=42),
197+
X=X,
198+
y=y,
199+
cv=3,
200+
)
201+
202+
search_space = {
203+
"n_estimators": [10, 50, 100],
204+
"max_depth": [3, 5, 10],
205+
}
206+
207+
optimizer = HillClimbing(
208+
search_space=search_space,
209+
n_iter=5,
210+
experiment=experiment,
211+
random_state=42,
212+
)
213+
best_params = optimizer.solve()
214+
assert "n_estimators" in best_params
215+
assert "max_depth" in best_params
216+
217+
print("Experiments snippets passed!")

0 commit comments

Comments
 (0)