@@ -34,16 +34,19 @@ class Acquisition(Parameterized):
3434 In Bayesian Optimization this function is typically optimized over the optimization domain
3535 to determine the next point for evaluation.
3636
37- An object of this class holds a list of GPflow models. For single objective optimization this is typically a
38- single model. Subclasses implement a build_acquisition function which computes the acquisition function (usually
39- from the predictive distribution) using TensorFlow. Each model is automatically optimized when an acquisition object
40- is constructed or when set_data is called.
37+ An object of this class holds a list of GPflow models. Subclasses implement a build_acquisition function
38+ which computes the acquisition function (usually from the predictive distribution) using TensorFlow.
39+ Each model is automatically optimized when an acquisition object is constructed or when set_data is called.
4140
42- Acquisition functions can be combined through addition or multiplication to construct joint criteria
43- (for instance for constrained optimization)
41+ Acquisition functions can be combined through addition or multiplication to construct joint criteria.
42+ For instance, for constrained optimization.
4443 """
4544
4645 def __init__ (self , models = [], optimize_restarts = 5 ):
46+ """
47+ :param models: list of GPflow models representing our beliefs about the problem
48+ :param optimize_restarts: number of optimization restarts to use when training the models
49+ """
4750 super (Acquisition , self ).__init__ ()
4851 self ._models = ParamList ([DataScaler (m ) for m in np .atleast_1d (models ).tolist ()])
4952 self ._default_params = list (map (lambda m : m .get_free_state (), self ._models ))
@@ -56,10 +59,11 @@ def _optimize_models(self):
5659 """
5760 Optimizes the hyperparameters of all models that the acquisition function is based on.
5861
59- It is called after initialization and set_data(), and before optimizing the acquisition function itself.
62+ It is called automatically during initialization and each time set_data() is called.
63+ When using the high-level :class:`..BayesianOptimizer` class calling set_data() is taken care of.
6064
6165 For each model the hyperparameters of the model at the time it was passed to __init__() are used as initial
62- point and optimized. If optimize_restarts was configured to values larger than one additional randomization
66+ point and optimized. If optimize_restarts is set to >1, additional randomization
6367 steps are performed.
6468
6569 As a special case, if optimize_restarts is set to zero, the hyperparameters of the models are not optimized.
@@ -82,14 +86,15 @@ def _optimize_models(self):
8286 best_idx = np .argmin ([r .fun for r in runs ])
8387 model .set_state (runs [best_idx ].x )
8488
85- def build_acquisition (self ):
89+ def build_acquisition (self , Xcand ):
8690 raise NotImplementedError
8791
8892 def enable_scaling (self , domain ):
8993 """
9094 Enables and configures the :class:`.DataScaler` objects wrapping the GP models.
95+
9196 :param domain: :class:`.Domain` object, the input transform of the data scalers is configured as a transform
92- from domain to the unit cube with the same dimensionality.
97+ from domain to the unit cube with the same dimensionality.
9398 """
9499 n_inputs = self .data [0 ].shape [1 ]
95100 assert (domain .size == n_inputs )
@@ -103,11 +108,11 @@ def set_data(self, X, Y):
103108 Update the training data of the contained models. Automatically triggers a hyperparameter optimization
104109 step by calling _optimize_all() and an update of pre-computed quantities by calling setup().
105110
106- Consider Q to be the the sum of the output dimensions of the contained models, Y should have a minimum of
111+ Let Q be the the sum of the output dimensions of all contained models, Y should have a minimum of
107112 Q columns. Only the first Q columns of Y are used while returning the scalar Q
108113
109114 :param X: input data N x D
110- :param Y: Responses N x M (M >= Q)
115+ :param Y: output data N x R (R >= Q)
111116 :return: Q (sum of output dimensions of contained models)
112117 """
113118 num_outputs_sum = 0
@@ -120,23 +125,30 @@ def set_data(self, X, Y):
120125 model .Y = Ypart
121126
122127 self ._optimize_models ()
128+
129+ # Only call setup for the high-level acquisition function
123130 if self .highest_parent == self :
124131 self .setup ()
125132 return num_outputs_sum
126133
127134 @property
128135 def models (self ):
136+ """
137+ The GPflow models representing our beliefs of the optimization problem.
138+
139+ :return: list of GPflow models
140+ """
129141 return self ._models
130142
131143 @property
132144 def data (self ):
133145 """
134- Property for accessing the training data of the models.
146+ The training data of the models.
135147
136148 Corresponds to the input data X which is the same for every model,
137149 and column-wise concatenation of the Y data over all models
138150
139- :return: X, Y tensors (if in tf_mode) or X, Y numpy arrays.
151+ :return: tuple X, Y of tensors (if in tf_mode) or numpy arrays.
140152 """
141153 if self ._tf_mode :
142154 return self .models [0 ].X , tf .concat (list (map (lambda model : model .Y , self .models )), 1 )
@@ -153,32 +165,39 @@ def constraint_indices(self):
153165 def objective_indices (self ):
154166 """
155167 Method returning the indices of the model outputs which are objective functions.
156- By default all outputs are objectives
168+
169+ By default all outputs are objectives.
170+
171+ :return: indices to the objectives, size R
157172 """
158173 return np .setdiff1d (np .arange (self .data [1 ].shape [1 ]), self .constraint_indices ())
159174
160175 def feasible_data_index (self ):
161176 """
162177 Returns a boolean array indicating which data points are considered feasible (according to the acquisition
163178 function(s) ) and which not.
164- By default all data is considered feasible
165- :return: boolean ndarray, N
179+
180+ By default all data is considered feasible.
181+
182+ :return: logical indices to the feasible data points, size N
166183 """
167184 return np .ones (self .data [0 ].shape [0 ], dtype = bool )
168185
169186 def setup (self ):
170187 """
171- Method triggered after calling set_data().
172-
173- Override for pre-calculation of quantities used later in
174- the evaluation of the acquisition function for candidate points
188+ Pre-calculation of quantities used later in the evaluation of the acquisition function for candidate points.
189+
190+ Automatically triggered by :meth:`~.Acquisition.set_data`.
175191 """
176192 pass
177193
178194 @AutoFlow ((float_type , [None , None ]))
179195 def evaluate_with_gradients (self , Xcand ):
180196 """
181197 AutoFlow method to compute the acquisition scores for candidates, also returns the gradients.
198+
199+ :return: acquisition scores, size N x 1
200+ the gradients of the acquisition scores, size N x D
182201 """
183202 acq = self .build_acquisition (Xcand )
184203 return acq , tf .gradients (acq , [Xcand ], name = "acquisition_gradient" )[0 ]
@@ -187,6 +206,8 @@ def evaluate_with_gradients(self, Xcand):
187206 def evaluate (self , Xcand ):
188207 """
189208 AutoFlow method to compute the acquisition scores for candidates, without returning the gradients.
209+
210+ :return: acquisition scores, size N x 1
190211 """
191212 return self .build_acquisition (Xcand )
192213
@@ -198,7 +219,6 @@ def __add__(self, other):
198219 >>> a2 = GPflowOpt.acquisition.ProbabilityOfFeasibility(m2)
199220 >>> type(a1 + a2)
200221 <type 'GPflowOpt.acquisition.AcquisitionSum'>
201-
202222 """
203223 if isinstance (other , AcquisitionSum ):
204224 return AcquisitionSum ([self ] + other .operands .sorted_params )
@@ -212,7 +232,6 @@ def __mul__(self, other):
212232 >>> a2 = GPflowOpt.acquisition.ProbabilityOfFeasibility(m2)
213233 >>> type(a1 * a2)
214234 <type 'GPflowOpt.acquisition.AcquisitionProduct'>
215-
216235 """
217236 if isinstance (other , AcquisitionProduct ):
218237 return AcquisitionProduct ([self ] + other .operands .sorted_params )
@@ -221,12 +240,11 @@ def __mul__(self, other):
221240
222241class AcquisitionAggregation (Acquisition ):
223242 """
224- Special acquisition implementation for aggregating multiple others , using a TensorFlow reduce operation.
243+ Aggregates multiple acquisition functions , using a TensorFlow reduce operation.
225244 """
226245
227246 def __init__ (self , operands , oper ):
228247 """
229- Constructor
230248 :param operands: list of acquisition objects
231249 :param oper: a tf.reduce operation (e.g., tf.reduce_sum) for aggregating the returned scores of each operand.
232250 """
@@ -310,11 +328,12 @@ def __mul__(self, other):
310328
311329class MCMCAcquistion (AcquisitionSum ):
312330 """
313- Acquisition object to apply MCMC over the hyperparameters of the models. The models of the acquisition object passed
314- into an object of this class is optimized with MLE, and then sampled with HMC. These hyperparameter samples are then
315- set in copies of the acquisition.
331+ Apply MCMC over the hyperparameters of an acquisition function (= over the hyperparameters of the contained models).
332+
333+ The models passed into an object of this class are optimized with MLE, and then further sampled with HMC.
334+ These hyperparameter samples are then set in copies of the acquisition.
316335
317- To compute the acquisition, the predictions of the acquisition copies are averaged.
336+ For evaluating the underlying acquisition function , the predictions of the acquisition copies are averaged.
318337 """
319338 def __init__ (self , acquisition , n_slices , ** kwargs ):
320339 assert isinstance (acquisition , Acquisition )
0 commit comments