Skip to content

Commit 13de6f4

Browse files
authored
Package rename (#75)
* Renaming GPflowOpt package to gpflowopt * Revert some changes in comments
1 parent cc36cfe commit 13de6f4

38 files changed

Lines changed: 387 additions & 378 deletions

doc/source/conf.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
# documentation root, use os.path.abspath to make it absolute, like shown here.
1919
#
2020
import os
21-
from GPflowOpt import __version__
21+
from gpflowopt import __version__
2222

2323
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
2424
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
@@ -151,7 +151,7 @@
151151
# (source start file, target name, title,
152152
# author, documentclass [howto, manual, or own class]).
153153
latex_documents = [
154-
(master_doc, 'GPflowOpt.tex', 'GPflowOpt Documentation',
154+
(master_doc, 'gpflowopt.tex', 'GPflowOpt Documentation',
155155
'Joachim van der Herten', 'manual'),
156156
]
157157

@@ -161,7 +161,7 @@
161161
# One entry per manual page. List of tuples
162162
# (source start file, name, description, authors, manual section).
163163
man_pages = [
164-
(master_doc, 'gpflowopt', 'GPflowOpt Documentation',
164+
(master_doc, 'GPflowOpt', 'GPflowOpt Documentation',
165165
[author], 1)
166166
]
167167

doc/source/notebooks/constrained_bo.ipynb

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,8 @@
2828
"%matplotlib inline\n",
2929
"import matplotlib.pyplot as plt\n",
3030
"\n",
31-
"import GPflow\n",
32-
"import GPflowOpt\n",
31+
"import gpflow\n",
32+
"import gpflowopt\n",
3333
"import numpy as np"
3434
]
3535
},
@@ -67,12 +67,12 @@
6767
" return -(-np.cos(1.5*X[:,0]+np.pi)*np.cos(1.5*X[:,1])+np.sin(1.5*X[:,0]+np.pi)*np.sin(1.5*X[:,1]))[:,None]\n",
6868
"\n",
6969
"# Setup input domain\n",
70-
"domain = GPflowOpt.domain.ContinuousParameter('x1', -2.25, 2.5) + \\\n",
71-
" GPflowOpt.domain.ContinuousParameter('x2', -2.5, 1.75)\n",
70+
"domain = gpflowopt.domain.ContinuousParameter('x1', -2.25, 2.5) + \\\n",
71+
" gpflowopt.domain.ContinuousParameter('x2', -2.5, 1.75)\n",
7272
"\n",
7373
"# Plot\n",
7474
"def plotfx(): \n",
75-
" X = GPflowOpt.design.FactorialDesign(101, domain).generate()\n",
75+
" X = gpflowopt.design.FactorialDesign(101, domain).generate()\n",
7676
" Zo = townsend(X)\n",
7777
" Zc = constraint(X)\n",
7878
" mask = Zc>=0\n",
@@ -111,22 +111,22 @@
111111
"outputs": [],
112112
"source": [
113113
"# Initial evaluations\n",
114-
"design = GPflowOpt.design.LatinHyperCube(11, domain)\n",
114+
"design = gpflowopt.design.LatinHyperCube(11, domain)\n",
115115
"X = design.generate()\n",
116116
"Yo = townsend(X)\n",
117117
"Yc = constraint(X)\n",
118118
"\n",
119119
"# Models\n",
120-
"objective_model = GPflow.gpr.GPR(X, Yo, GPflow.kernels.Matern52(2, ARD=True))\n",
120+
"objective_model = gpflow.gpr.GPR(X, Yo, gpflow.kernels.Matern52(2, ARD=True))\n",
121121
"objective_model.likelihood.variance = 0.01\n",
122-
"constraint_model = GPflow.gpr.GPR(np.copy(X), Yc, GPflow.kernels.Matern52(2, ARD=True))\n",
123-
"constraint_model.kern.lengthscales.transform = GPflow.transforms.Log1pe(1e-3)\n",
122+
"constraint_model = gpflow.gpr.GPR(np.copy(X), Yc, gpflow.kernels.Matern52(2, ARD=True))\n",
123+
"constraint_model.kern.lengthscales.transform = gpflow.transforms.Log1pe(1e-3)\n",
124124
"constraint_model.likelihood.variance = 0.01\n",
125-
"constraint_model.likelihood.variance.prior = GPflow.priors.Gamma(1./4.,1.0)\n",
125+
"constraint_model.likelihood.variance.prior = gpflow.priors.Gamma(1./4.,1.0)\n",
126126
"\n",
127127
"# Setup\n",
128-
"ei = GPflowOpt.acquisition.ExpectedImprovement(objective_model)\n",
129-
"pof = GPflowOpt.acquisition.ProbabilityOfFeasibility(constraint_model)\n",
128+
"ei = gpflowopt.acquisition.ExpectedImprovement(objective_model)\n",
129+
"pof = gpflowopt.acquisition.ProbabilityOfFeasibility(constraint_model)\n",
130130
"joint = ei * pof"
131131
]
132132
},
@@ -172,7 +172,7 @@
172172
],
173173
"source": [
174174
"def plot():\n",
175-
" Xeval = GPflowOpt.design.FactorialDesign(101, domain).generate()\n",
175+
" Xeval = gpflowopt.design.FactorialDesign(101, domain).generate()\n",
176176
" Yevala,_ = joint.operands[0].models[0].predict_f(Xeval)\n",
177177
" Yevalb,_ = joint.operands[1].models[0].predict_f(Xeval)\n",
178178
" Yevalc = np.maximum(ei.evaluate(Xeval), 0)\n",
@@ -237,11 +237,11 @@
237237
"source": [
238238
"# First setup the optimization strategy for the acquisition function\n",
239239
"# Combining MC step followed by L-BFGS-B\n",
240-
"acquisition_opt = GPflowOpt.optim.StagedOptimizer([GPflowOpt.optim.MCOptimizer(domain, 200), \n",
241-
" GPflowOpt.optim.SciPyOptimizer(domain)])\n",
240+
"acquisition_opt = gpflowopt.optim.StagedOptimizer([gpflowopt.optim.MCOptimizer(domain, 200), \n",
241+
" gpflowopt.optim.SciPyOptimizer(domain)])\n",
242242
"\n",
243243
"# Then run the BayesianOptimizer for 50 iterations\n",
244-
"optimizer = GPflowOpt.BayesianOptimizer(domain, joint, optimizer=acquisition_opt)\n",
244+
"optimizer = gpflowopt.BayesianOptimizer(domain, joint, optimizer=acquisition_opt)\n",
245245
"with optimizer.silent():\n",
246246
" result = optimizer.optimize([townsend, constraint], n_iter=50)\n",
247247
" \n",

doc/source/notebooks/firststeps.ipynb

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@
5353
],
5454
"source": [
5555
"import numpy as np\n",
56-
"from GPflowOpt.domain import ContinuousParameter\n",
56+
"from gpflowopt.domain import ContinuousParameter\n",
5757
"\n",
5858
"\n",
5959
"def fx(X):\n",
@@ -93,18 +93,18 @@
9393
}
9494
],
9595
"source": [
96-
"import GPflow\n",
97-
"from GPflowOpt.bo import BayesianOptimizer\n",
98-
"from GPflowOpt.design import LatinHyperCube\n",
99-
"from GPflowOpt.acquisition import ExpectedImprovement\n",
100-
"from GPflowOpt.optim import SciPyOptimizer\n",
96+
"import gpflow\n",
97+
"from gpflowopt.bo import BayesianOptimizer\n",
98+
"from gpflowopt.design import LatinHyperCube\n",
99+
"from gpflowopt.acquisition import ExpectedImprovement\n",
100+
"from gpflowopt.optim import SciPyOptimizer\n",
101101
"\n",
102102
"# Use standard Gaussian process Regression\n",
103103
"lhd = LatinHyperCube(21, domain)\n",
104104
"X = lhd.generate()\n",
105105
"Y = fx(X)\n",
106-
"model = GPflow.gpr.GPR(X, Y, GPflow.kernels.Matern52(2, ARD=True))\n",
107-
"model.kern.lengthscales.transform = GPflow.transforms.Log1pe(1e-3)\n",
106+
"model = gpflow.gpr.GPR(X, Y, gpflow.kernels.Matern52(2, ARD=True))\n",
107+
"model.kern.lengthscales.transform = gpflow.transforms.Log1pe(1e-3)\n",
108108
"\n",
109109
"# Now create the Bayesian Optimizer\n",
110110
"alpha = ExpectedImprovement(model)\n",
@@ -140,7 +140,7 @@
140140
"name": "python",
141141
"nbconvert_exporter": "python",
142142
"pygments_lexer": "ipython3",
143-
"version": "3.6.1"
143+
"version": "3.5.2"
144144
}
145145
},
146146
"nbformat": 4,

doc/source/notebooks/hyperopt.ipynb

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -101,9 +101,9 @@
101101
}
102102
],
103103
"source": [
104-
"from GPflow.kernels import RBF, Cosine, Linear, Bias, Matern52\n",
105-
"from GPflow import transforms\n",
106-
"from GPflow.gpr import GPR\n",
104+
"from gpflow.kernels import RBF, Cosine, Linear, Bias, Matern52\n",
105+
"from gpflow import transforms\n",
106+
"from gpflow.gpr import GPR\n",
107107
"\n",
108108
"Q = 10 # nr of terms in the sum\n",
109109
"max_iters = 1000\n",
@@ -183,8 +183,8 @@
183183
}
184184
],
185185
"source": [
186-
"from GPflowOpt.domain import ContinuousParameter\n",
187-
"from GPflowOpt.objective import batch_apply\n",
186+
"from gpflowopt.domain import ContinuousParameter\n",
187+
"from gpflowopt.objective import batch_apply\n",
188188
"\n",
189189
"# Objective function for our optimization\n",
190190
"# Input: N x 2Q ndarray, output: N x 1.\n",
@@ -239,9 +239,9 @@
239239
}
240240
],
241241
"source": [
242-
"from GPflowOpt.design import LatinHyperCube\n",
243-
"from GPflowOpt.acquisition import ExpectedImprovement\n",
244-
"from GPflowOpt import optim, BayesianOptimizer\n",
242+
"from gpflowopt.design import LatinHyperCube\n",
243+
"from gpflowopt.acquisition import ExpectedImprovement\n",
244+
"from gpflowopt import optim, BayesianOptimizer\n",
245245
"design = LatinHyperCube(6, domain)\n",
246246
"X = design.generate()\n",
247247
"\n",

doc/source/notebooks/multiobjective.ipynb

Lines changed: 87 additions & 78 deletions
Large diffs are not rendered by default.

doc/source/notebooks/new_acquisition.ipynb

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,8 @@
3030
"\n",
3131
"import matplotlib.pyplot as plt \n",
3232
"import copy\n",
33-
"import GPflow\n",
34-
"import GPflowOpt\n",
33+
"import gpflow\n",
34+
"import gpflowopt\n",
3535
"import tensorflow as tf"
3636
]
3737
},
@@ -49,8 +49,8 @@
4949
" return f[:,None] + rng.rand(X.shape[0], 1) * 0.25\n",
5050
"\n",
5151
"# Setup input domain\n",
52-
"domain = GPflowOpt.domain.ContinuousParameter('x1', -3, 3) + \\\n",
53-
" GPflowOpt.domain.ContinuousParameter('x2', -2, 2)"
52+
"domain = gpflowopt.domain.ContinuousParameter('x1', -3, 3) + \\\n",
53+
" gpflowopt.domain.ContinuousParameter('x2', -2, 2)"
5454
]
5555
},
5656
{
@@ -77,7 +77,7 @@
7777
},
7878
"outputs": [],
7979
"source": [
80-
"class AugmentedEI(GPflowOpt.acquisition.ExpectedImprovement):\n",
80+
"class AugmentedEI(gpflowopt.acquisition.ExpectedImprovement):\n",
8181
" def __init__(self, model):\n",
8282
" super(AugmentedEI, self).__init__(model)\n",
8383
"\n",
@@ -115,32 +115,32 @@
115115
}
116116
],
117117
"source": [
118-
"design = GPflowOpt.design.LatinHyperCube(9, domain)\n",
118+
"design = gpflowopt.design.LatinHyperCube(9, domain)\n",
119119
"X = design.generate()\n",
120120
"Y = camelback(X)\n",
121-
"m = GPflow.gpr.GPR(X, Y, GPflow.kernels.Matern52(2, ARD=True, lengthscales=[10,10], variance=10000))\n",
121+
"m = gpflow.gpr.GPR(X, Y, gpflow.kernels.Matern52(2, ARD=True, lengthscales=[10,10], variance=10000))\n",
122122
"m.likelihood.variance = 1\n",
123123
"m.likelihood.variance.fixed = True\n",
124-
"ei = GPflowOpt.acquisition.ExpectedImprovement(m)\n",
125-
"m = GPflow.gpr.GPR(X, Y, GPflow.kernels.Matern52(2, ARD=True, lengthscales=[10,10], variance=10000))\n",
124+
"ei = gpflowopt.acquisition.ExpectedImprovement(m)\n",
125+
"m = gpflow.gpr.GPR(X, Y, gpflow.kernels.Matern52(2, ARD=True, lengthscales=[10,10], variance=10000))\n",
126126
"m.likelihood.variance = 1\n",
127127
"m.likelihood.variance.fixed = False\n",
128128
"aei = AugmentedEI(m)\n",
129129
"\n",
130-
"opt = GPflowOpt.optim.StagedOptimizer([GPflowOpt.optim.MCOptimizer(domain, 200), \n",
131-
" GPflowOpt.optim.SciPyOptimizer(domain)])\n",
130+
"opt = gpflowopt.optim.StagedOptimizer([gpflowopt.optim.MCOptimizer(domain, 200), \n",
131+
" gpflowopt.optim.SciPyOptimizer(domain)])\n",
132132
"\n",
133-
"bopt1 = GPflowOpt.BayesianOptimizer(domain, ei, optimizer=opt)\n",
133+
"bopt1 = gpflowopt.BayesianOptimizer(domain, ei, optimizer=opt)\n",
134134
"with bopt1.silent():\n",
135135
" bopt1.optimize(camelback, n_iter=50)\n",
136136
"\n",
137-
"bopt2 = GPflowOpt.BayesianOptimizer(domain, aei, optimizer=opt)\n",
137+
"bopt2 = gpflowopt.BayesianOptimizer(domain, aei, optimizer=opt)\n",
138138
"with bopt2.silent():\n",
139139
" bopt2.optimize(camelback, n_iter=50)\n",
140140
"\n",
141141
"f, axes = plt.subplots(1,2, figsize=(14,7))\n",
142142
"\n",
143-
"Xeval = GPflowOpt.design.FactorialDesign(101, domain).generate()\n",
143+
"Xeval = gpflowopt.design.FactorialDesign(101, domain).generate()\n",
144144
"Yeval = camelback(Xeval)\n",
145145
"titles = ['EI', 'AEI']\n",
146146
"shape = (101, 101)\n",
@@ -172,7 +172,7 @@
172172
"name": "python",
173173
"nbconvert_exporter": "python",
174174
"pygments_lexer": "ipython3",
175-
"version": "3.6.1"
175+
"version": "3.5.2"
176176
}
177177
},
178178
"nbformat": 4,

doc/source/notebooks/structure.ipynb

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@
7777
}
7878
],
7979
"source": [
80-
"from GPflowOpt.domain import ContinuousParameter\n",
80+
"from gpflowopt.domain import ContinuousParameter\n",
8181
"domain = ContinuousParameter('x1', -2, 2) + ContinuousParameter('x2', -1, 2)\n",
8282
"domain"
8383
]
@@ -116,7 +116,7 @@
116116
}
117117
],
118118
"source": [
119-
"from GPflowOpt.optim import SciPyOptimizer\n",
119+
"from gpflowopt.optim import SciPyOptimizer\n",
120120
"\n",
121121
"optimizer = SciPyOptimizer(domain, method='SLSQP')\n",
122122
"optimizer.set_initial([-1,-1])\n",
@@ -151,7 +151,7 @@
151151
}
152152
],
153153
"source": [
154-
"from GPflowOpt.optim import MCOptimizer\n",
154+
"from gpflowopt.optim import MCOptimizer\n",
155155
"optimizer = MCOptimizer(domain, 200)\n",
156156
"optimizer.optimize(fx)"
157157
]
@@ -207,10 +207,10 @@
207207
}
208208
],
209209
"source": [
210-
"from GPflowOpt.bo import BayesianOptimizer\n",
211-
"from GPflowOpt.design import FactorialDesign\n",
212-
"from GPflowOpt.acquisition import ExpectedImprovement\n",
213-
"import GPflow\n",
210+
"from gpflowopt.bo import BayesianOptimizer\n",
211+
"from gpflowopt.design import FactorialDesign\n",
212+
"from gpflowopt.acquisition import ExpectedImprovement\n",
213+
"import gpflow\n",
214214
"\n",
215215
"# The Bayesian Optimizer does not expect gradients to be returned\n",
216216
"def fx(X):\n",
@@ -224,7 +224,7 @@
224224
"\n",
225225
"# initializing a standard BO model, Gaussian Process Regression with\n",
226226
"# Matern52 ARD Kernel\n",
227-
"model = GPflow.gpr.GPR(X,Y,GPflow.kernels.Matern52(2, ARD=True))\n",
227+
"model = gpflow.gpr.GPR(X, Y, gpflow.kernels.Matern52(2, ARD=True))\n",
228228
"alpha = ExpectedImprovement(model)\n",
229229
"\n",
230230
"# Now we must specify an optimization algorithm to optimize the acquisition \n",
@@ -300,7 +300,7 @@
300300
"name": "python",
301301
"nbconvert_exporter": "python",
302302
"pygments_lexer": "ipython3",
303-
"version": "3.6.1"
303+
"version": "3.5.2"
304304
}
305305
},
306306
"nbformat": 4,

0 commit comments

Comments
 (0)