diff --git a/CHANGELOG.md b/CHANGELOG.md index 0614b78f4..042350506 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,9 +8,11 @@ All notable changes to this project will be documented in this file. ### Added - [#1716](https://github.com/pints-team/pints/pull/1716) PINTS is now tested on Python 3.14. - [#1715](https://github.com/pints-team/pints/pull/1715) Added methods `ProblemErrorMeasure.problem()`, `ProblemLogLikelihood.problem()`, `SingleOutputProblem.model()` and `MultiOutputProblem.model()`. +- [#1508](https://github.com/pints-team/pints/pull/1508) Added a method `OptimisationController.set_parameter_tolerance` that allows methods to stop after a fixed number of iterations with no significant movement in parameter space. ### Changed - [#1713](https://github.com/pints-team/pints/pull/1713) PINTS now requires matplotlib 2.2 or newer. ### Deprecated +- [#1508](https://github.com/pints-team/pints/pull/1508) The methods `OptimisationController.max_unchanged_iterations` and `set_max_unchanged_iterations` are deprecated, in favour of `function_tolerance` and `set_function_tolerance` respectively. ### Removed ### Fixed - [#1713](https://github.com/pints-team/pints/pull/1713) Fixed Numpy 2.4.1 compatibility issues. diff --git a/pints/_optimisers/__init__.py b/pints/_optimisers/__init__.py index 55bd9a45a..22341ce5f 100644 --- a/pints/_optimisers/__init__.py +++ b/pints/_optimisers/__init__.py @@ -448,29 +448,93 @@ def __init__( # :meth:`run` can only be called once self._has_run = False + # Post-run statistics + self._evaluations = None + self._iterations = None + self._time = None + # # Stopping criteria + # Note that we always minimise: likelihoods are wrapped in an Error + # class that multiplies by -1 # # Maximum iterations self._max_iterations = None - self.set_max_iterations() - # Maximum unchanged iterations - self._unchanged_max_iterations = None # n_iter w/o change until stop - self._unchanged_threshold = 1 # smallest significant f change - self.set_max_unchanged_iterations() + # Maximum number of iterations without significant change in f(x) + self._ftol_max = None # max number of iterations without change + self._ftol_threshold = None # smallest significant change + + # Maximum number of iterations without significant change in x + self._xtol_max = None # max number of iterations without change + self._xtol_threshold = None # smallest significant change per param # Maximum evaluations self._max_evaluations = None - # Threshold value - self._threshold = None + # Function threshold: stop if f(x) < threshold + self._function_threshold = None - # Post-run statistics - self._evaluations = None - self._iterations = None - self._time = None + # Default stopping critera + self.set_max_iterations() + self.set_function_tolerance() + + def _check_stopping_criteria(self, iterations, unchanged_f_iterations, + unchanged_x_iterations, evaluations, f_new): + """ + Checks the stopping criteria, returns either ``None`` or a string + explaining why to stop. + + Note: The 'error in optimiser' criterion is not checked here. + + Parameters + ---------- + iterations + The current number of iterations. + unchanged_f_iterations + The current number of iterations without a change in f (best or + guessed). + unchanged_x_iterations + The current number of iterations without a change in x (best or + guessed). + evaluations + The current number of function evaluations. + f_new + The current function value (best or guessed). + + """ + # Maximum number of iterations + if (self._max_iterations is not None and + iterations >= self._max_iterations): + return f'Maximum number of iterations reached ({iterations}).' + + # Maximum number of evaluations + if (self._max_evaluations is not None and + evaluations >= self._max_evaluations): + return ('Maximum number of evaluations reached' + f' ({self._max_evaluations}).') + + # Maximum number of iterations without significant change in f + if (self._ftol_max is not None and + unchanged_f_iterations >= self._ftol_max): + return ('No significant change in best function evaluation for' + f' {unchanged_f_iterations} iterations.') + + # Maximum number of iterations without significant change in x + if (self._xtol_max is not None and + unchanged_x_iterations >= self._xtol_max): + return ('No significant change in best parameters for' + f' {unchanged_x_iterations} iterations.') + + # Threshold function value + if (self._function_threshold is not None and + f_new < self._function_threshold): + return ('Objective function crossed threshold (' + f'{self._function_threshold}).') + + # All ok + return None def evaluations(self): """ @@ -479,13 +543,6 @@ def evaluations(self): """ return self._evaluations - def max_evaluations(self): - """ - Returns the maximum number of evaluations if this stopping criteria is - set, or ``None`` if it is not. See :meth:`set_max_evaluations`. - """ - return self._max_evaluations - def f_guessed_tracking(self): """ Returns ``True`` if the controller is set to track the optimiser @@ -496,6 +553,29 @@ def f_guessed_tracking(self): """ return self._use_f_guessed + def function_tolerance(self): + """ + Returns a tuple ``(iterations, threshold)`` specifying the maximum + iterations without a significant change in best function evaluation, + if this stopping criterion is set, else ``(None, None)``. + + The entries in the tuple correspond directly to the arguments to + :meth:`set_function_tolerance()`. + """ + if self._ftol_max is None: + return (None, None) + return (self._ftol_max, self._ftol_threshold) + + def _has_stopping_criterion(self): + """ Returns whether a stopping criterion has been set. """ + return any(( + self._max_iterations is not None, + self._max_evaluations is not None, + self._ftol_max is not None, + self._xtol_max is not None, + self._function_threshold is not None, + )) + def iterations(self): """ Returns the number of iterations performed during the last run, or @@ -503,25 +583,35 @@ def iterations(self): """ return self._iterations + def max_evaluations(self): + """ + Returns the maximum number of evaluations if this stopping criterion is + set, or ``None`` if it is not. + + See :meth:`set_max_evaluations`. + """ + return self._max_evaluations + def max_iterations(self): """ Returns the maximum iterations if this stopping criterion is set, or - ``None`` if it is not. See :meth:`set_max_iterations()`. + ``None`` if it is not. + + See :meth:`set_max_iterations()`. """ return self._max_iterations def max_unchanged_iterations(self): """ - Returns a tuple ``(iterations, threshold)`` specifying a maximum - unchanged iterations stopping criterion, or ``(None, None)`` if no such - criterion is set. - - The entries in the tuple correspond directly to the arguments to - :meth:`set_max_unchanged_iterations()`. + Deprecated alias of :meth:`function_tolerance()`. """ - if self._unchanged_max_iterations is None: - return (None, None) - return (self._unchanged_max_iterations, self._unchanged_threshold) + # Deprecated on 2026-02-05 + import warnings + warnings.warn( + 'The method `max_unchanged_iterations` is deprecated.' + ' Please use `function_tolerance` instead.') + + return self.function_tolerance() def optimiser(self): """ @@ -537,9 +627,27 @@ def parallel(self): """ return self._n_workers if self._parallel else False + def parameter_tolerance(self): + """ + Returns a tuple ``(iterations, threshold)`` specifying the maximum + iterations without a significant change in best parameters, if this + stopping criterion is set, else ``(None, None)``. + + The entries in the tuple correspond directly to the arguments to + :meth:`set_parameter_tolerance()`. + """ + if self._xtol_max is None: + return (None, None) + return (self._xtol_max, self._xtol_threshold) + def run(self): """ - Runs the optimisation, returns a tuple ``(x_best, f_best)``. + Runs the optimisation, returns a tuple ``(x, f)``. + + The returned ``x`` and ``f`` correspond to either the best ``f`` seen + during the optimisation, or to the best guessed ``f``, depending on the + setting for :meth:`set_f_guessed_tracking()`. See + :meth:Optimiser.f_guessed()` for details. An optional ``callback`` function can be passed in that will be called at the end of every iteration. The callback should take the arguments @@ -551,22 +659,17 @@ def run(self): raise RuntimeError("Controller is valid for single use only") self._has_run = True - # Check stopping criteria - has_stopping_criterion = False - has_stopping_criterion |= (self._max_iterations is not None) - has_stopping_criterion |= (self._unchanged_max_iterations is not None) - has_stopping_criterion |= (self._max_evaluations is not None) - has_stopping_criterion |= (self._threshold is not None) - if not has_stopping_criterion: + # Check if any stopping criteria have been set + if not self._has_stopping_criterion(): raise ValueError('At least one stopping criterion must be set.') # Iterations and function evaluations iteration = 0 evaluations = 0 - # Unchanged iterations count (used for stopping or just for - # information) - unchanged_iterations = 0 + # Number of iterations without a change in f(x) or x + unchanged_f_iterations = 0 + unchanged_x_iterations = 0 # Choose method to evaluate f = self._function @@ -592,8 +695,9 @@ def run(self): # Internally we always minimise! Keep a 2nd value to show the user. fb_user, fg_user = (fb, fg) if self._minimising else (-fb, -fg) - # Keep track of the last significant change + # Keep track of the last significant change in f and x f_sig = np.inf + x_sig = np.ones(self._function.n_parameters()) * np.inf # Set up progress reporting next_message = 0 @@ -667,14 +771,28 @@ def run(self): fb = self._optimiser.f_best() fg = self._optimiser.f_guessed() fb_user, fg_user = (fb, fg) if self._minimising else (-fb, -fg) - - # Check for significant changes f_new = fg if self._use_f_guessed else fb - if np.abs(f_new - f_sig) >= self._unchanged_threshold: - unchanged_iterations = 0 - f_sig = f_new - else: - unchanged_iterations += 1 + + # Check for significant changes in f or in x + if self._ftol_max: + if np.abs(f_new - f_sig) >= self._ftol_threshold: + unchanged_f_iterations = 0 + # Note: f_sig is only updated after a change, so that a + # slow drift that becomes significant over multiple + # iterations is still detected. + f_sig = f_new + else: + unchanged_f_iterations += 1 + + if self._xtol_max: + x_new = (self._optimiser.x_guessed() if self._use_f_guessed + else self._optimiser.x_best()) + if np.any(np.abs(x_new - x_sig) >= self._xtol_threshold): + unchanged_x_iterations = 0 + # Note: Only update here (see above) + x_sig = x_new + else: + unchanged_x_iterations += 1 # Update evaluation count evaluations += len(fs) @@ -696,40 +814,11 @@ def run(self): # Update iteration count iteration += 1 - # - # Check stopping criteria - # - - # Maximum number of iterations - if (self._max_iterations is not None and - iteration >= self._max_iterations): - running = False - halt_message = ('Maximum number of iterations (' - + str(iteration) + ') reached.') - - # Maximum number of iterations without significant change - halt = (self._unchanged_max_iterations is not None and - unchanged_iterations >= self._unchanged_max_iterations) - if running and halt: - running = False - halt_message = ('No significant change for ' + - str(unchanged_iterations) + ' iterations.') - - # Maximum number of evaluations - if (self._max_evaluations is not None and - evaluations >= self._max_evaluations): - running = False - halt_message = ( - 'Maximum number of evaluations (' - + str(self._max_evaluations) + ') reached.') - - # Threshold value - halt = (self._threshold is not None - and f_new < self._threshold) - if running and halt: - running = False - halt_message = ('Objective function crossed threshold: ' - + str(self._threshold) + '.') + # Check stopping criteria, set message if stopping + halt_message = self._check_stopping_criteria( + iteration, unchanged_f_iterations, unchanged_x_iterations, + evaluations, f_new) + running = halt_message is None # Error in optimiser error = self._optimiser.stop() @@ -813,7 +902,8 @@ def set_f_guessed_tracking(self, use_f_guessed=False): :meth:`pints.Optimiser.f_guessed()` or :meth:`pints.Optimiser.f_best()` (default). - The tracked ``f`` value is used to evaluate stopping criteria. + The tracked ``f`` (and/or ``x``) value is used to evaluate stopping + criteria, and is the one returned from :meth:`run`. """ self._use_f_guessed = bool(use_f_guessed) @@ -823,9 +913,9 @@ def set_log_interval(self, iters=20, warm_up=3): Parameters ---------- - ``interval`` + interval A log message will be shown every ``iters`` iterations. - ``warm_up`` + warm_up A log message will be shown every iteration, for the first ``warm_up`` iterations. """ @@ -861,8 +951,8 @@ def set_log_to_screen(self, enabled): def set_max_evaluations(self, evaluations=None): """ - Adds a stopping criterion, allowing the routine to halt after the - given number of ``evaluations``. + Adds a stopping criterion so that the routine halts after the given + number of function ``evaluations``. This criterion is disabled by default. To enable, pass in any positive integer. To disable again, use ``set_max_evaluations(None)``. @@ -876,8 +966,8 @@ def set_max_evaluations(self, evaluations=None): def set_max_iterations(self, iterations=10000): """ - Adds a stopping criterion, allowing the routine to halt after the - given number of ``iterations``. + Adds a stopping criterion so that the routine halts after the given + number of ``iterations``. This criterion is enabled by default. To disable it, use ``set_max_iterations(None)``. @@ -891,12 +981,28 @@ def set_max_iterations(self, iterations=10000): def set_max_unchanged_iterations(self, iterations=200, threshold=1e-11): """ - Adds a stopping criterion, allowing the routine to halt if the - objective function doesn't change by more than ``threshold`` for the - given number of ``iterations``. + Deprecated alias of :meth:`function_tolerance()`. + """ + # Deprecated on 2026-02-05 + import warnings + warnings.warn( + 'The method `set_max_unchanged_iterations` is deprecated.' + ' Please use `set_function_tolerance` instead.') + + self.set_function_tolerance(iterations, threshold) + + def set_function_tolerance(self, iterations=200, threshold=1e-11): + """ + Adds a stopping criterion so that the routine halts if the objective + function does not change by more than ``threshold`` for the given + number of ``iterations``. This criterion is enabled by default. To disable it, use - ``set_max_unchanged_iterations(None)``. + ``set_function_tolerance(None)``. + + Note that this can be used to implement an absolute "ftol" stopping + criteria, by calling + ``set_function_tolerance(1, ftol)``. """ if iterations is not None: iterations = int(iterations) @@ -904,12 +1010,59 @@ def set_max_unchanged_iterations(self, iterations=200, threshold=1e-11): raise ValueError( 'Maximum number of iterations cannot be negative.') - threshold = float(threshold) - if threshold < 0: - raise ValueError('Minimum significant change cannot be negative.') + threshold = float(threshold) + if threshold < 0: + raise ValueError( + 'Minimum significant function change cannot be negative.') + else: + threshold = None + + self._ftol_max = iterations + self._ftol_threshold = threshold + + def set_parameter_tolerance(self, iterations=200, threshold=1e-11): + """ + Adds a stopping criterion so that the routine halts if the position in + parameter space does not change by more ``threshold`` for the given + number of ``iterations``. + + Thresholds can be defined per parameter, or a single scalar value can + be passed in. The position is deemed to have changed if + ``np.any(np.abs(x_new - x_sig) >= threshold)``, where ``x_sig`` is + either the starting position, or the last position for which the + criterion was met. + + This criterion is disabled by default. Once enabled, it can be disabled + again by calling ``set_parameter_tolerance(None)``. + + Note that this can be used to implement an absolute "xtol" stopping + criteria, by calling + ``set_parameter_tolerance(1, xtol)``. + """ + if iterations is not None: + iterations = int(iterations) + if iterations < 0: + raise ValueError( + 'Maximum number of iterations cannot be negative.') + + # Test threshold size, convert scalar if needed, check sign + n_parameters = self._function.n_parameters() + if np.isscalar(threshold): + threshold = np.ones(n_parameters) * float(threshold) + elif len(threshold) == n_parameters: + threshold = pints.vector(threshold) + else: + raise ValueError( + 'Minimum significant parameter change must be a scalar or' + f' have length {n_parameters}, got {len(threshold)}.') + if np.any(threshold < 0): + raise ValueError( + 'Minimum significant parameter change cannot be negative.') + else: + threshold = None - self._unchanged_max_iterations = iterations - self._unchanged_threshold = threshold + self._xtol_max = iterations + self._xtol_threshold = threshold def set_parallel(self, parallel=False): """ @@ -934,24 +1087,25 @@ def set_parallel(self, parallel=False): def set_threshold(self, threshold): """ - Adds a stopping criterion, allowing the routine to halt once the - objective function goes below a set ``threshold``. + Adds a stopping criterion causing the routine to stop once the + objective function is less than the given ``threshold`` + (when minimising, or more when maximising). This criterion is disabled by default, but can be enabled by calling this method with a valid ``threshold``. To disable it, use ``set_treshold(None)``. """ if threshold is None: - self._threshold = None + self._function_threshold = None else: - self._threshold = float(threshold) + self._function_threshold = float(threshold) def threshold(self): """ Returns the threshold stopping criterion, or ``None`` if no threshold stopping criterion is set. See :meth:`set_threshold()`. """ - return self._threshold + return self._function_threshold def time(self): """ @@ -1117,7 +1271,7 @@ def f(x, a, b, c): # Set stopping criteria opt.set_threshold(threshold) opt.set_max_iterations(max_iter) - opt.set_max_unchanged_iterations(max_unchanged) + opt.set_function_tolerance(max_unchanged) # Set parallelisation opt.set_parallel(parallel) @@ -1225,7 +1379,7 @@ def f(x): # Set stopping criteria opt.set_threshold(threshold) opt.set_max_iterations(max_iter) - opt.set_max_unchanged_iterations(max_unchanged) + opt.set_function_tolerance(max_unchanged) # Set parallelisation opt.set_parallel(parallel) diff --git a/pints/tests/test_opt_adam.py b/pints/tests/test_opt_adam.py index 39f541c4c..1a3c488ee 100755 --- a/pints/tests/test_opt_adam.py +++ b/pints/tests/test_opt_adam.py @@ -94,7 +94,6 @@ def test_logging(self): r, x, s = self.problem() opt = pints.OptimisationController(r, x, s, method=method) opt.set_log_to_screen(True) - opt.set_max_unchanged_iterations(None) opt.set_max_iterations(3) with StreamCapture() as c: opt.run() diff --git a/pints/tests/test_opt_controller.py b/pints/tests/test_opt_controller.py index ca5e10c2e..bd59ac0e4 100755 --- a/pints/tests/test_opt_controller.py +++ b/pints/tests/test_opt_controller.py @@ -20,6 +20,47 @@ method = pints.XNES +class Mock1DError(pints.ErrorMeasure): + """ Mock-up 1d error, returned values intended to be ignored. """ + def n_parameters(self): + return 1 + + def __call__(self, x): + return 0 + + +class List1DOptimiser(pints.Optimiser): + """ + Mock-up optimiser using a fixed lists of values and evaluations. + """ + fs = np.linspace(1, 0, 100) + xs = np.zeros(100) + np = 1 + + def __init__(self, x0, sigma0=None, boundaries=None): + super().__init__(x0, sigma0, boundaries) + self._i = 0 + + def ask(self): + return np.array(self.xs[self._i: self._i + self.np]) + + def name(self): + return 'List1D' + + def tell(self, f): + self._i += self.np + + def x_best(self): + try: + return np.array(self.xs[self._i: self._i + self.np]) + except IndexError: + raise Exception('List1DOptimiser has exhausted list values at' + f' index {self._i}') + + def f_best(self): + return self.fs[self._i] + + class TestOptimisationController(unittest.TestCase): """ Tests shared optimisation properties. @@ -115,7 +156,7 @@ def cb(*arg): s = 0.01 opt = pints.OptimisationController(r, x0, s, method=method) opt.set_log_to_screen(False) - opt.set_max_unchanged_iterations(None) + opt.set_function_tolerance(None) opt.set_max_iterations(10) # Pass in an invalid value @@ -144,92 +185,13 @@ def cb(*arg): self.assertEqual(len(args), 0) opt = pints.OptimisationController(r, x0, s, method=method) opt.set_log_to_screen(False) - opt.set_max_unchanged_iterations(None) + opt.set_function_tolerance(None) opt.set_max_iterations(10) opt.set_callback(cb) opt.set_callback(None) opt.run() self.assertEqual(len(args), 0) - def test_optimise(self): - # Tests :meth: `pints.optimise()`. - - r = pints.toy.TwistedGaussianLogPDF(2, 0.01) - x = np.array([0, 1.01]) - s = 0.01 - b = pints.RectangularBoundaries([-0.01, 0.95], [0.01, 1.05]) - with StreamCapture(): - x, f = pints.optimise(r, x, s, b, method=pints.XNES) - self.assertEqual(x.shape, (2, )) - self.assertTrue(f < 1e-6) - - def test_transform(self): - # Test optimisation with parameter transformation. - - # Test with LogPDF - r = pints.toy.TwistedGaussianLogPDF(2, 0.01) - x0 = np.array([0, 1.01]) - b = pints.RectangularBoundaries([-0.01, 0.95], [0.01, 1.05]) - s = 0.01 - t = pints.RectangularBoundariesTransformation(b) - with warnings.catch_warnings(record=True): - opt = pints.OptimisationController(r, x0, s, b, t, method) - opt.set_log_to_screen(False) - opt.set_max_unchanged_iterations(None) - opt.set_max_iterations(10) - opt.run() - - # Test with ErrorMeasure - r = pints.toy.ParabolicError() - x0 = [0.1, 0.1] - b = pints.RectangularBoundaries([-1, -1], [1, 1]) - s = 0.1 - t = pints.RectangularBoundariesTransformation(b) - with warnings.catch_warnings(record=True): - opt = pints.OptimisationController(r, x0, s, b, t, method) - opt.set_log_to_screen(False) - opt.set_max_unchanged_iterations(None) - opt.set_max_iterations(10) - x, _ = opt.run() - - # Test output is detransformed - self.assertEqual(x.shape, (2, )) - self.assertTrue(b.check(x)) - - def test_stopping_max_evaluations(self): - # Runs an optimisation with the max_fevals stopping criterion. - - r = pints.toy.TwistedGaussianLogPDF(2, 0.01) - x = np.array([0, 1.01]) - b = pints.RectangularBoundaries([-0.01, 0.95], [0.01, 1.05]) - s = 0.01 - opt = pints.OptimisationController(r, x, s, b, method=method) - opt.set_log_to_screen(True) - opt.set_max_unchanged_iterations(None) - opt.set_max_evaluations(10) - self.assertEqual(opt.max_evaluations(), 10) - self.assertRaises(ValueError, opt.set_max_evaluations, -1) - with StreamCapture() as c: - opt.run() - self.assertIn('Halting: Maximum number of evaluations', c.text()) - - def test_stopping_max_iterations(self): - # Runs an optimisation with the max_iter stopping criterion. - - r = pints.toy.TwistedGaussianLogPDF(2, 0.01) - x = np.array([0, 1.01]) - b = pints.RectangularBoundaries([-0.01, 0.95], [0.01, 1.05]) - s = 0.01 - opt = pints.OptimisationController(r, x, s, b, method=method) - opt.set_log_to_screen(True) - opt.set_max_unchanged_iterations(None) - opt.set_max_iterations(10) - self.assertEqual(opt.max_iterations(), 10) - self.assertRaises(ValueError, opt.set_max_iterations, -1) - with StreamCapture() as c: - opt.run() - self.assertIn('Halting: Maximum number of iterations', c.text()) - def test_logging(self): # Test with logpdf @@ -240,7 +202,7 @@ def test_logging(self): s = 0.01 opt = pints.OptimisationController(r, x, s, b, method=method) opt.set_log_to_screen(True) - opt.set_max_unchanged_iterations(None) + opt.set_function_tolerance(None) opt.set_log_interval(3) opt.set_max_iterations(10) with StreamCapture() as c: @@ -268,7 +230,7 @@ def test_logging(self): self.assertEqual(lines[11][:-3], '10 47 -4.140462 -4.140463 0:0') self.assertEqual( - lines[12], 'Halting: Maximum number of iterations (10) reached.') + lines[12], 'Halting: Maximum number of iterations reached (10).') # Invalid log interval self.assertRaises(ValueError, opt.set_log_interval, 0) @@ -278,7 +240,7 @@ def test_logging(self): x = np.array([1.01, 1.01]) opt = pints.OptimisationController(r, x, method=pints.SNES) opt.set_log_to_screen(True) - opt.set_max_unchanged_iterations(None) + opt.set_function_tolerance(None) opt.set_log_interval(4) opt.set_max_iterations(11) opt.optimiser().set_population_size(4) @@ -307,61 +269,250 @@ def test_logging(self): self.assertEqual(lines[11][:-3], '11 44 0.0165 3.601763 0:0') self.assertEqual( - lines[12], 'Halting: Maximum number of iterations (11) reached.') + lines[12], 'Halting: Maximum number of iterations reached (11).') # Invalid log interval self.assertRaises(ValueError, opt.set_log_interval, 0) - def test_stopping_max_unchanged(self): - # Runs an optimisation with the max_unchanged stopping criterion. + def test_optimise(self): + # Tests :meth: `pints.optimise()`. + r = pints.toy.TwistedGaussianLogPDF(2, 0.01) x = np.array([0, 1.01]) + s = 0.01 + b = pints.RectangularBoundaries([-0.01, 0.95], [0.01, 1.05]) + with StreamCapture(): + x, f = pints.optimise(r, x, s, b, method=pints.XNES) + self.assertEqual(x.shape, (2, )) + self.assertTrue(f < 1e-6) + + def test_transform(self): + # Test optimisation with parameter transformation. + + # Test with LogPDF + r = pints.toy.TwistedGaussianLogPDF(2, 0.01) + x0 = np.array([0, 1.01]) b = pints.RectangularBoundaries([-0.01, 0.95], [0.01, 1.05]) s = 0.01 - opt = pints.OptimisationController(r, x, s, b, method=method) + t = pints.RectangularBoundariesTransformation(b) + with warnings.catch_warnings(record=True): + opt = pints.OptimisationController(r, x0, s, b, t, method) + opt.set_log_to_screen(False) + opt.set_function_tolerance(None) + opt.set_max_iterations(10) + opt.run() + + # Test with ErrorMeasure + r = pints.toy.ParabolicError() + x0 = [0.1, 0.1] + b = pints.RectangularBoundaries([-1, -1], [1, 1]) + s = 0.1 + t = pints.RectangularBoundariesTransformation(b) + with warnings.catch_warnings(record=True): + opt = pints.OptimisationController(r, x0, s, b, t, method) + opt.set_log_to_screen(False) + opt.set_function_tolerance(None) + opt.set_max_iterations(10) + x, _ = opt.run() + + # Test output is detransformed + self.assertEqual(x.shape, (2, )) + self.assertTrue(b.check(x)) + + def test_stopping_max_evaluations(self): + # Runs an optimisation with the max evaluations stopping criterion. + + e = Mock1DError() + opt = pints.OptimisationController(e, [0], method=List1DOptimiser) + opt.optimiser().np = 2 # Two evaluations per iteration + opt.set_log_to_screen(True) + opt.set_max_iterations(None) + opt.set_function_tolerance(None) + + # Test getting and setting + self.assertIs(opt.max_evaluations(), None) + opt.set_max_evaluations(5) + self.assertEqual(opt.max_evaluations(), 5) + opt.set_max_evaluations(None) + self.assertIs(opt.max_evaluations(), None) + opt.set_max_evaluations(23) + self.assertEqual(opt.max_evaluations(), 23) + self.assertRaises(ValueError, opt.set_max_evaluations, -1) + + # Run, test result + with StreamCapture() as c: + opt.run() + self.assertIn('Maximum number of evaluations reached (23)', c.text()) + self.assertEqual(opt.iterations(), 12) + + def test_stopping_max_iterations(self): + # Runs a mock optimisation with the max iterations stopping criterion. + + e = Mock1DError() + opt = pints.OptimisationController(e, [0], method=List1DOptimiser) + opt.optimiser().np = 2 # Two evaluations per iteration opt.set_log_to_screen(True) opt.set_max_iterations(None) - opt.set_max_unchanged_iterations(None) - self.assertEqual(opt.max_unchanged_iterations(), (None, None)) - opt.set_max_unchanged_iterations(2, 1e-6) - self.assertEqual(opt.max_unchanged_iterations(), (2, 1e-6)) - opt.set_max_unchanged_iterations(3) - self.assertEqual(opt.max_unchanged_iterations(), (3, 1e-11)) - self.assertRaises(ValueError, opt.set_max_unchanged_iterations, -1) - self.assertRaises(ValueError, opt.set_max_unchanged_iterations, 10, -1) + opt.set_function_tolerance(None) + + # Test getting and setting + self.assertIs(opt.max_iterations(), None) + opt.set_max_iterations(3) + self.assertEqual(opt.max_iterations(), 3) + opt.set_max_iterations(None) + self.assertIs(opt.max_iterations(), None) + opt.set_max_iterations(15) + self.assertEqual(opt.max_iterations(), 15) + self.assertRaises(ValueError, opt.set_max_iterations, -1) + + # Run, test result with StreamCapture() as c: opt.run() - self.assertIn('Halting: No significant change', c.text()) + self.assertIn('Maximum number of iterations reached (15)', c.text()) + self.assertEqual(opt.iterations(), 15) + + def test_stopping_function_tolerance(self): + # Runs a mock optimisation with the function tolerance criterion. + # Test case starts with drift (each step below threshold, but total + # change is above), then should halt at 5 + + e = Mock1DError() + opt = pints.OptimisationController(e, [0], method=List1DOptimiser) + m = opt.optimiser() + m.fs = [0, 0.5, 1, 1.5, 2.0, 2.5, 4, 5, 5.1, 5.2, 5.3, 5.4, 1, 3, 0] + m.xs = [0] * len(m.fs) + opt.set_log_to_screen(True) + opt.set_max_iterations(None) - def test_stopping_threshold(self): - # Runs an optimisation with the threshold stopping criterion. + # Set by default + self.assertEqual(opt.function_tolerance(), (200, 1e-11)) - r = pints.toy.TwistedGaussianLogPDF(2, 0.01) - x = np.array([0.008, 1.01]) - b = pints.RectangularBoundaries([-0.01, 0.95], [0.01, 1.05]) - s = 0.01 - opt = pints.OptimisationController(r, x, s, b, method=method) + # Unset and reset without threshold + opt.set_function_tolerance(None) + self.assertEqual(opt.function_tolerance(), (None, None)) + opt.set_function_tolerance(3) + self.assertEqual(opt.function_tolerance(), (3, 1e-11)) + + # Unset and reset with threshold + opt.set_function_tolerance(None, None) + self.assertEqual(opt.function_tolerance(), (None, None)) + opt.set_function_tolerance(4, 1) + self.assertEqual(opt.function_tolerance(), (4, 1)) + + # Bad calls + self.assertRaises(ValueError, opt.set_function_tolerance, -1) + self.assertRaises(ValueError, opt.set_function_tolerance, 10, -1) + + # Test deprecated aliases + a = opt.function_tolerance() + with warnings.catch_warnings(record=True) as w: + b = opt.max_unchanged_iterations() + self.assertIn('deprecated', str(w[-1].message)) + self.assertEqual(a, b) + with warnings.catch_warnings(record=True) as w: + opt.set_max_unchanged_iterations(1, 0) + self.assertIn('deprecated', str(w[-1].message)) + self.assertEqual(opt.function_tolerance(), (1, 0)) + opt.set_function_tolerance(4, 1) + + # Test + with StreamCapture() as c: + opt.run() + self.assertIn('No significant change in best function', c.text()) + self.assertEqual(opt.iterations(), 11) + + def test_stopping_parameter_tolerance(self): + # Runs a mock optimisation with the parameter tolerance criterion. + # Test case starts with drift (each step below threshold, but total + # change is above), then should halt at 4 + + e = Mock1DError() + opt = pints.OptimisationController(e, [0], method=List1DOptimiser) + m = opt.optimiser() + m.xs = [0, 1, 1.5, 2.0, 2.5, 3, 4, 4.1, 4.2, 4.3, 5, 6, 7] + m.fs = [0] * len(m.xs) + opt.set_log_to_screen(True) + opt.set_max_iterations(None) + opt.set_function_tolerance(None) + self.assertEqual(opt.parameter_tolerance(), (None, None)) + + # Set without threshold + opt.set_parameter_tolerance(2) + n, t = opt.parameter_tolerance() + self.assertEqual(n, 2) + self.assertEqual(list(t), [1e-11]) + + # Unset and reset without threshold + opt.set_parameter_tolerance(None) + n, t = opt.parameter_tolerance() + self.assertIsNone(n) + self.assertIsNone(t) + opt.set_parameter_tolerance(2) + n, t = opt.parameter_tolerance() + self.assertEqual(n, 2) + self.assertEqual(list(t), [1e-11]) + + # Unset and reset with threshold + opt.set_parameter_tolerance(None, None) + n, t = opt.parameter_tolerance() + self.assertIsNone(n) + self.assertIsNone(t) + opt.set_parameter_tolerance(3, 2) + n, t = opt.parameter_tolerance() + self.assertEqual(n, 3) + self.assertEqual(list(t), [2]) + opt.set_parameter_tolerance(3, [1]) + n, t = opt.parameter_tolerance() + self.assertEqual(n, 3) + self.assertEqual(list(t), [1]) + + # Bad calls + self.assertRaises(ValueError, opt.set_parameter_tolerance, -1) + self.assertRaises(ValueError, opt.set_parameter_tolerance, 10, -1) + self.assertRaises(ValueError, opt.set_parameter_tolerance, 10, [1, 1]) + + # Test + with StreamCapture() as c: + opt.run() + self.assertIn('No significant change in best parameters', c.text()) + self.assertEqual(opt.iterations(), 9) + + def test_stopping_function_threshold(self): + # Runs a mock optimisation with the function threshold stopping crit. + + e = Mock1DError() + opt = pints.OptimisationController(e, [0], method=List1DOptimiser) + m = opt.optimiser() + m.fs = [10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + m.xs = [0] * len(m.fs) opt.set_log_to_screen(True) opt.set_max_iterations(None) - opt.set_max_unchanged_iterations(None) + opt.set_function_tolerance(None) + + # Test getting and setting + self.assertIsNone(opt.threshold()) + opt.set_threshold(3) + self.assertEqual(opt.threshold(), 3) + opt.set_threshold(None) + self.assertIsNone(opt.threshold()) opt.set_threshold(5) self.assertEqual(opt.threshold(), 5) + + # Run, test result with StreamCapture() as c: opt.run() - self.assertIn( - 'Halting: Objective function crossed threshold', c.text()) + self.assertIn( + 'Halting: Objective function crossed threshold (5.0)', c.text()) + self.assertEqual(opt.iterations(), 6) def test_stopping_no_criterion(self): # Tries to run an optimisation with the no stopping criterion. - r = pints.toy.TwistedGaussianLogPDF(2, 0.01) - x = np.array([0, 1.01]) - b = pints.RectangularBoundaries([-0.01, 0.95], [0.01, 1.05]) - s = 0.01 - opt = pints.OptimisationController(r, x, s, b, method=method) + e = Mock1DError() + opt = pints.OptimisationController(e, [0], method=List1DOptimiser) opt.set_log_to_screen(debug) opt.set_max_iterations(None) - opt.set_max_unchanged_iterations(None) + opt.set_function_tolerance(None) self.assertRaises(ValueError, opt.run) def test_population_size_not_set(self): @@ -424,7 +575,7 @@ def test_post_run_statistics(self): s = 0.01 opt = pints.OptimisationController(r, x, s, b, method=method) opt.set_log_to_screen(False) - opt.set_max_unchanged_iterations(50, 1e-11) + opt.set_function_tolerance(50, 1e-11) # Before run methods return None self.assertIsNone(opt.iterations()) @@ -452,7 +603,7 @@ def test_exception_on_multi_use(self): s = 0.01 opt = pints.OptimisationController(r, x, s, b, method=method) opt.set_log_to_screen(False) - opt.set_max_unchanged_iterations(None) + opt.set_function_tolerance(None) opt.set_max_iterations(10) opt.run() self.assertRaisesRegex( diff --git a/pints/tests/test_opt_irpropmin.py b/pints/tests/test_opt_irpropmin.py index 4a4a30893..72d1cd2d9 100755 --- a/pints/tests/test_opt_irpropmin.py +++ b/pints/tests/test_opt_irpropmin.py @@ -190,7 +190,6 @@ def test_logging(self): r, x, s = self.problem() opt = pints.OptimisationController(r, x, s, method=method) opt.set_log_to_screen(True) - opt.set_max_unchanged_iterations(None) opt.set_max_iterations(2) with StreamCapture() as c: opt.run() @@ -207,7 +206,6 @@ def test_logging(self): r, x, s = self.problem() opt = pints.OptimisationController(r, x, s, method=method) opt.set_log_to_screen(True) - opt.set_max_unchanged_iterations(None) opt.set_max_iterations(4) opt.set_log_interval(1) opt.optimiser().set_min_step_size(0.03) diff --git a/pints/tests/test_opt_nelder_mead.py b/pints/tests/test_opt_nelder_mead.py index 89f85ad1c..3eb5ac17b 100755 --- a/pints/tests/test_opt_nelder_mead.py +++ b/pints/tests/test_opt_nelder_mead.py @@ -197,7 +197,8 @@ def test_rosenbrock(self): '400 416 0 0 0:00.0', '420 443 0 0 0:00.0', '428 452 0 0 0:00.0', - 'Halting: No significant change for 200 iterations.', + 'Halting: No significant change in best function evaluation' + ' for 200 iterations.', ) # Compare lenght of log diff --git a/pints/tests/test_opt_pso.py b/pints/tests/test_opt_pso.py index 29b70a446..a3c73002f 100755 --- a/pints/tests/test_opt_pso.py +++ b/pints/tests/test_opt_pso.py @@ -139,7 +139,7 @@ def test_logging(self): for line in lines[5:-1]: self.assertTrue(pattern.match(line)) self.assertEqual( - lines[-1], 'Halting: Maximum number of iterations (10) reached.') + lines[-1], 'Halting: Maximum number of iterations reached (10).') # Log to file opt = pints.OptimisationController(r, x, s, b, method=method)