11# Integrations with AI framework toolboxes
22
33This directory contains examples for estimator level integration with
4- common AI toolbox libraries such as ` scikit-learn ` , ` sktime ` , or ` torch ` .
4+ common AI toolbox libraries such as ` scikit-learn ` or ` sktime ` .
55
66## Quick Start
77
@@ -12,10 +12,9 @@ You can also run any example directly:
1212python sklearn_classif_example.py
1313python sktime_forecasting_example.py
1414python sktime_tsc_example.py
15- python torch_experiment_example.py
1615```
1716
18- Requires ` scikit-learn ` resp ` sktime ` resp ` torch ` and ` lightning ` installed.
17+ Requires ` scikit-learn ` resp ` sktime ` installed.
1918
2019## Available Integrations
2120
@@ -24,7 +23,6 @@ Requires `scikit-learn` resp `sktime` resp `torch` and `lightning` installed.
2423| ` sklearn ` classifier or regressor tuner | [ OptCV] ( sklearn_classif_example.py ) |
2524| ` sktime ` forecasting tuner | [ ForecastingOptCV] ( sktime_forecasting_example.py ) |
2625| ` sktime ` time series classifier tuner | [ TSCOptCV] ( sktime_tsc_example.py ) |
27- | ` torch ` experiment tuner | [ TorchExperiment] ( torch_experiment_example.py ) |
2826
2927## Integration with sklearn
3028
@@ -145,100 +143,3 @@ y_pred = tuned_naive.predict(X_test)
145143best_params = tuned_naive.best_params_
146144best_classifier = tuned_naive.best_estimator_
147145```
148-
149- ## Integration with torch
150-
151- Any available tuning engine from hyperactive can be used, for example:
152-
153- * hill climbing - `` from hyperactive.opt import HillClimbing ``
154- * bayesian optimization - `` from hyperactive.opt.gfo import BayesianOptimizer ``
155- * optuna parzen-tree search - `` from hyperactive.opt.optuna import TPEOptimizer ``
156-
157- For illustration, we use hill climbing, this can be replaced by any other optimizer.
158-
159- ``` python
160- # 1. defining the experiment:
161- import numpy as np
162- import lightning as L
163- import torch
164- from torch import nn
165- from torch.utils.data import DataLoader
166-
167- from hyperactive.experiment.integrations import TorchExperiment
168- from hyperactive.opt.gfo import HillClimbing
169-
170- class SimpleLightningModule (L .LightningModule ):
171- def __init__ (self , input_dim = 10 , hidden_dim = 16 , lr = 1e-3 ):
172- super ().__init__ ()
173- self .save_hyperparameters()
174- self .model = nn.Sequential(
175- nn.Linear(input_dim, hidden_dim), nn.ReLU(),
176- nn.Linear(hidden_dim, 2 ),
177- )
178- self .lr = lr
179-
180- def forward (self , x ):
181- return self .model(x)
182-
183- def training_step (self , batch , batch_idx ):
184- x, y = batch
185- loss = nn.functional.cross_entropy(self (x), y)
186- self .log(" train_loss" , loss)
187- return loss
188-
189- def validation_step (self , batch , batch_idx ):
190- x, y = batch
191- val_loss = nn.functional.cross_entropy(self (x), y)
192- self .log(" val_loss" , val_loss, on_epoch = True )
193- return val_loss
194-
195- def configure_optimizers (self ):
196- return torch.optim.Adam(self .parameters(), lr = self .lr)
197-
198- class RandomDataModule (L .LightningDataModule ):
199- def __init__ (self , batch_size = 32 ):
200- super ().__init__ ()
201- self .batch_size = batch_size
202-
203- def setup (self , stage = None ):
204- dataset = torch.utils.data.TensorDataset(
205- torch.randn(200 , 10 ), torch.randint(0 , 2 , (200 ,)),
206- )
207- self .train, self .val = torch.utils.data.random_split(
208- dataset, [160 , 40 ]
209- )
210-
211- def train_dataloader (self ):
212- return DataLoader(self .train, batch_size = self .batch_size)
213-
214- def val_dataloader (self ):
215- return DataLoader(self .val, batch_size = self .batch_size)
216-
217- # 2. creating the experiment:
218- datamodule = RandomDataModule(batch_size = 16 )
219- datamodule.setup()
220-
221- experiment = TorchExperiment(
222- datamodule = datamodule,
223- lightning_module = SimpleLightningModule,
224- trainer_kwargs = {" max_epochs" : 3 },
225- objective_metric = " val_loss" ,
226- )
227-
228- # 3. defining search space and running optimization:
229- search_space = {
230- " hidden_dim" : [16 , 32 , 64 , 128 ],
231- " lr" : np.logspace(- 4 , - 1 , 10 ).tolist(),
232- }
233-
234- optimizer = HillClimbing(
235- search_space = search_space,
236- n_iter = 5 ,
237- experiment = experiment,
238- )
239- best_params = optimizer.solve()
240-
241- # 4. obtaining best parameters
242- print (f " Best params: { best_params} " )
243- ```
244-
0 commit comments