|
1 | 1 | import numpy as np |
2 | 2 | import matplotlib.pyplot as plt |
| 3 | +from functools import partial |
3 | 4 | from sklearn.gaussian_process import GaussianProcessRegressor |
4 | 5 | from sklearn.gaussian_process.kernels import Matern |
5 | 6 | from modAL.models import BayesianOptimizer |
|
16 | 17 | # defining the kernel for the Gaussian process |
17 | 18 | kernel = Matern(length_scale=1.0) |
18 | 19 |
|
19 | | -# initializing the optimizer |
20 | | -optimizer = BayesianOptimizer( |
21 | | - estimator=GaussianProcessRegressor(kernel=kernel), |
22 | | - X_training=X_initial, y_training=y_initial, |
23 | | - query_strategy=max_EI |
| 20 | +tr = 0.1 |
| 21 | +PI_tr = partial(PI, tradeoff=tr) |
| 22 | +PI_tr.__name__ = 'PI, tradeoff = %1.1f' % tr |
| 23 | +max_PI_tr = partial(max_PI, tradeoff=tr) |
| 24 | + |
| 25 | +acquisitions = zip( |
| 26 | + [PI_tr, EI, UCB], |
| 27 | + [max_PI_tr, max_EI, max_UCB], |
24 | 28 | ) |
25 | 29 |
|
26 | | -# plotting the initial estimation |
27 | | -with plt.style.context('seaborn-white'): |
28 | | - plt.figure(figsize=(30, 6)) |
29 | | - for n_query in range(5): |
30 | | - # plot current prediction |
31 | | - plt.subplot(2, 5, n_query + 1) |
32 | | - plt.title('Query no. %d' %(n_query + 1)) |
33 | | - if n_query == 0: |
34 | | - plt.ylabel('Predictions') |
35 | | - plt.ylim([-1.5, 3]) |
36 | | - pred, std = optimizer.predict(X.reshape(-1, 1), return_std=True) |
37 | | - utility = EI(optimizer, X) |
38 | | - plt.plot(X, pred) |
39 | | - plt.fill_between(X.reshape(-1, ), pred.reshape(-1, ) - std, pred.reshape(-1, ) + std, alpha=0.2) |
40 | | - plt.plot(X, y, c='k', linewidth=3) |
41 | | - # plotting acquired values |
42 | | - plt.scatter(optimizer.X_training[-1], optimizer.y_training[-1], c='w', s=40, zorder=20) |
43 | | - plt.scatter(optimizer.X_training, optimizer.y_training, c='k', s=80, zorder=1) |
44 | | - |
45 | | - plt.subplot(2, 5, 5 + n_query + 1) |
46 | | - if n_query == 0: |
47 | | - plt.ylabel('Expected improvement') |
48 | | - plt.plot(X, 5*utility, c='r') |
49 | | - plt.ylim([-0.1, 1]) |
50 | | - |
51 | | - # query |
52 | | - query_idx, query_inst = optimizer.query(X) |
53 | | - optimizer.teach(X[query_idx].reshape(1, -1), y[query_idx].reshape(1, -1)) |
54 | | - plt.show() |
| 30 | +for acquisition, query_strategy in acquisitions: |
| 31 | + |
| 32 | + # initializing the optimizer |
| 33 | + optimizer = BayesianOptimizer( |
| 34 | + estimator=GaussianProcessRegressor(kernel=kernel), |
| 35 | + X_training=X_initial, y_training=y_initial, |
| 36 | + query_strategy=query_strategy |
| 37 | + ) |
| 38 | + |
| 39 | + # plotting the initial estimation |
| 40 | + with plt.style.context('seaborn-white'): |
| 41 | + plt.figure(figsize=(30, 6)) |
| 42 | + for n_query in range(5): |
| 43 | + # plot current prediction |
| 44 | + plt.subplot(2, 5, n_query + 1) |
| 45 | + plt.title('Query no. %d' %(n_query + 1)) |
| 46 | + if n_query == 0: |
| 47 | + plt.ylabel('Predictions') |
| 48 | + plt.ylim([-1.5, 3]) |
| 49 | + pred, std = optimizer.predict(X.reshape(-1, 1), return_std=True) |
| 50 | + utility_score = acquisition(optimizer, X) |
| 51 | + plt.plot(X, pred) |
| 52 | + plt.fill_between(X.reshape(-1, ), pred.reshape(-1, ) - std, pred.reshape(-1, ) + std, alpha=0.2) |
| 53 | + plt.plot(X, y, c='k', linewidth=3) |
| 54 | + # plotting acquired values |
| 55 | + plt.scatter(optimizer.X_training[-1], optimizer.y_training[-1], c='w', s=40, zorder=20) |
| 56 | + plt.scatter(optimizer.X_training, optimizer.y_training, c='k', s=80, zorder=1) |
| 57 | + |
| 58 | + plt.subplot(2, 5, 5 + n_query + 1) |
| 59 | + if n_query == 0: |
| 60 | + plt.ylabel(acquisition.__name__) |
| 61 | + plt.plot(X, 5 * utility_score, c='r') |
| 62 | + #plt.ylim([-0.1, 1]) |
| 63 | + |
| 64 | + # query |
| 65 | + query_idx, query_inst = optimizer.query(X) |
| 66 | + optimizer.teach(X[query_idx].reshape(1, -1), y[query_idx].reshape(1, -1)) |
| 67 | + plt.show() |
0 commit comments