Coverage for ase / optimize / sciopt.py: 68.81%
109 statements
« prev ^ index » next coverage.py v7.13.3, created at 2026-02-04 10:20 +0000
« prev ^ index » next coverage.py v7.13.3, created at 2026-02-04 10:20 +0000
1# fmt: off
3from typing import IO, Optional, Union
5import numpy as np
6import scipy.optimize as opt
8from ase import Atoms
9from ase.optimize.optimize import Optimizer
12class Converged(Exception):
13 pass
16class OptimizerConvergenceError(Exception):
17 pass
20class SciPyOptimizer(Optimizer):
21 """General interface for SciPy optimizers
23 Only the call to the optimizer is still needed
24 """
26 def __init__(
27 self,
28 atoms: Atoms,
29 logfile: Union[IO, str] = '-',
30 trajectory: Optional[str] = None,
31 callback_always: bool = False,
32 alpha: float = 70.0,
33 **kwargs,
34 ):
35 """Initialize object
37 Parameters
38 ----------
39 atoms: :class:`~ase.Atoms`
40 The Atoms object to relax.
42 trajectory: str
43 Trajectory file used to store optimisation path.
45 logfile: file object or str
46 If *logfile* is a string, a file with that name will be opened.
47 Use '-' for stdout.
49 callback_always: bool
50 Should the callback be run after each force call (also in the
51 linesearch)
53 alpha: float
54 Initial guess for the Hessian (curvature of energy surface). A
55 conservative value of 70.0 is the default, but number of needed
56 steps to converge might be less if a lower value is used. However,
57 a lower value also means risk of instability.
59 kwargs : dict, optional
60 Extra arguments passed to
61 :class:`~ase.optimize.optimize.Optimizer`.
63 """
64 restart = None
65 super().__init__(atoms, restart, logfile, trajectory, **kwargs)
66 self.force_calls = 0
67 self.callback_always = callback_always
68 self.H0 = alpha
69 self.max_steps = 0
71 def x0(self):
72 """Return x0 in a way SciPy can use
74 This class is mostly usable for subclasses wanting to redefine the
75 parameters (and the objective function)"""
76 return self.optimizable.get_x()
78 def f(self, x):
79 """Objective function for use of the optimizers"""
80 self.optimizable.set_x(x)
81 # Scale the problem as SciPy uses I as initial Hessian.
82 return self.optimizable.get_value() / self.H0
84 def fprime(self, x):
85 """Gradient of the objective function for use of the optimizers"""
86 self.optimizable.set_x(x)
87 self.force_calls += 1
89 if self.callback_always:
90 self.callback(x)
92 # Remember that forces are minus the gradient!
93 # Scale the problem as SciPy uses I as initial Hessian.
94 return self.optimizable.get_gradient() / self.H0
96 def callback(self, x):
97 """Callback function to be run after each iteration by SciPy
99 This should also be called once before optimization starts, as SciPy
100 optimizers only calls it after each iteration, while ase optimizers
101 call something similar before as well.
103 :meth:`callback`() can raise a :exc:`Converged` exception to signal the
104 optimisation is complete. This will be silently ignored by
105 :meth:`run`().
106 """
107 if self.nsteps < self.max_steps:
108 self.nsteps += 1
109 gradient = self.optimizable.get_gradient()
110 self.log(gradient)
111 self.call_observers()
112 if self.converged(gradient):
113 raise Converged
115 def run(self, fmax=0.05, steps=100000000):
116 self.fmax = fmax
118 try:
119 # As SciPy does not log the zeroth iteration, we do that manually
120 if self.nsteps == 0:
121 gradient = self.optimizable.get_gradient()
122 self.log(gradient)
123 self.call_observers()
125 self.max_steps = steps + self.nsteps
127 # Scale the problem as SciPy uses I as initial Hessian.
128 self.call_fmin(fmax / self.H0, steps)
129 except Converged:
130 pass
131 gradient = self.optimizable.get_gradient()
132 return self.converged(gradient)
134 def dump(self, data):
135 pass
137 def load(self):
138 pass
140 def call_fmin(self, fmax, steps):
141 raise NotImplementedError
144class SciPyFminCG(SciPyOptimizer):
145 """Non-linear (Polak-Ribiere) conjugate gradient algorithm"""
147 def call_fmin(self, fmax, steps):
148 output = opt.fmin_cg(self.f,
149 self.x0(),
150 fprime=self.fprime,
151 # args=(),
152 gtol=fmax * 0.1, # Should never be reached
153 norm=np.inf,
154 # epsilon=
155 maxiter=steps,
156 full_output=1,
157 disp=0,
158 # retall=0,
159 callback=self.callback)
160 warnflag = output[-1]
161 if warnflag == 2:
162 raise OptimizerConvergenceError(
163 'Warning: Desired error not necessarily achieved '
164 'due to precision loss')
167class SciPyFminBFGS(SciPyOptimizer):
168 """Quasi-Newton method (Broydon-Fletcher-Goldfarb-Shanno)"""
170 def call_fmin(self, fmax, steps):
171 output = opt.fmin_bfgs(self.f,
172 self.x0(),
173 fprime=self.fprime,
174 # args=(),
175 gtol=fmax * 0.1, # Should never be reached
176 norm=np.inf,
177 # epsilon=1.4901161193847656e-08,
178 maxiter=steps,
179 full_output=1,
180 disp=0,
181 # retall=0,
182 callback=self.callback)
183 warnflag = output[-1]
184 if warnflag == 2:
185 raise OptimizerConvergenceError(
186 'Warning: Desired error not necessarily achieved '
187 'due to precision loss')
190class SciPyGradientlessOptimizer(Optimizer):
191 """General interface for gradient less SciPy optimizers
193 Only the call to the optimizer is still needed
195 Note: If you redefine x0() and f(), you don't even need an atoms object.
196 Redefining these also allows you to specify an arbitrary objective
197 function.
199 XXX: This is still a work in progress
200 """
202 def __init__(
203 self,
204 atoms: Atoms,
205 logfile: Union[IO, str] = '-',
206 trajectory: Optional[str] = None,
207 callback_always: bool = False,
208 **kwargs,
209 ):
210 """Initialize object
212 Parameters
213 ----------
214 atoms: :class:`~ase.Atoms`
215 The Atoms object to relax.
217 trajectory: str
218 Trajectory file used to store optimisation path.
220 logfile: file object or str
221 If *logfile* is a string, a file with that name will be opened.
222 Use '-' for stdout.
224 callback_always: bool
225 Should the callback be run after each force call (also in the
226 linesearch)
228 alpha: float
229 Initial guess for the Hessian (curvature of energy surface). A
230 conservative value of 70.0 is the default, but number of needed
231 steps to converge might be less if a lower value is used. However,
232 a lower value also means risk of instability.
234 kwargs : dict, optional
235 Extra arguments passed to
236 :class:`~ase.optimize.optimize.Optimizer`.
238 """
239 restart = None
240 super().__init__(atoms, restart, logfile, trajectory, **kwargs)
241 self.function_calls = 0
242 self.callback_always = callback_always
244 def x0(self):
245 """Return x0 in a way SciPy can use
247 This class is mostly usable for subclasses wanting to redefine the
248 parameters (and the objective function)"""
249 return self.optimizable.get_x().reshape(-1)
251 def f(self, x):
252 """Objective function for use of the optimizers"""
253 self.optimizable.set_x(x)
254 self.function_calls += 1
255 # Scale the problem as SciPy uses I as initial Hessian.
256 return self.optimizable.get_value()
258 def callback(self, x):
259 """Callback function to be run after each iteration by SciPy
261 This should also be called once before optimization starts, as SciPy
262 optimizers only calls it after each iteration, while ase optimizers
263 call something similar before as well.
264 """
265 # We can't assume that forces are available!
266 # f = self.optimizable.get_gradient().reshape(-1, 3)
267 # self.log(f)
268 self.call_observers()
269 # if self.converged(f):
270 # raise Converged
271 self.nsteps += 1
273 def run(self, ftol=0.01, xtol=0.01, steps=100000000):
274 self.xtol = xtol
275 self.ftol = ftol
276 # As SciPy does not log the zeroth iteration, we do that manually
277 self.callback(None)
278 try:
279 # Scale the problem as SciPy uses I as initial Hessian.
280 self.call_fmin(xtol, ftol, steps)
281 except Converged:
282 pass
283 return self.converged()
285 def dump(self, data):
286 pass
288 def load(self):
289 pass
291 def call_fmin(self, xtol, ftol, steps):
292 raise NotImplementedError
295class SciPyFmin(SciPyGradientlessOptimizer):
296 """Nelder-Mead Simplex algorithm
298 Uses only function calls.
300 XXX: This is still a work in progress
301 """
303 def call_fmin(self, xtol, ftol, steps):
304 opt.fmin(self.f,
305 self.x0(),
306 # args=(),
307 xtol=xtol,
308 ftol=ftol,
309 maxiter=steps,
310 # maxfun=None,
311 # full_output=1,
312 disp=0,
313 # retall=0,
314 callback=self.callback)
317class SciPyFminPowell(SciPyGradientlessOptimizer):
318 """Powell's (modified) level set method
320 Uses only function calls.
322 XXX: This is still a work in progress
323 """
325 def __init__(self, *args, **kwargs):
326 """Parameters:
328 direc: float
329 How much to change x to initially. Defaults to 0.04.
330 """
331 direc = kwargs.pop('direc', None)
332 super().__init__(*args, **kwargs)
334 if direc is None:
335 self.direc = np.eye(len(self.x0()), dtype=float) * 0.04
336 else:
337 self.direc = np.eye(len(self.x0()), dtype=float) * direc
339 def call_fmin(self, xtol, ftol, steps):
340 opt.fmin_powell(self.f,
341 self.x0(),
342 # args=(),
343 xtol=xtol,
344 ftol=ftol,
345 maxiter=steps,
346 # maxfun=None,
347 # full_output=1,
348 disp=0,
349 # retall=0,
350 callback=self.callback,
351 direc=self.direc)