Coverage for /builds/ase/ase/ase/optimize/sciopt.py: 69.09%
110 statements
« prev ^ index » next coverage.py v7.5.3, created at 2025-08-02 00:12 +0000
« prev ^ index » next coverage.py v7.5.3, created at 2025-08-02 00:12 +0000
1# fmt: off
3from typing import IO, Optional, Union
5import numpy as np
6import scipy.optimize as opt
8from ase import Atoms
9from ase.optimize.optimize import Optimizer
12class Converged(Exception):
13 pass
16class OptimizerConvergenceError(Exception):
17 pass
20class SciPyOptimizer(Optimizer):
21 """General interface for SciPy optimizers
23 Only the call to the optimizer is still needed
24 """
26 def __init__(
27 self,
28 atoms: Atoms,
29 logfile: Union[IO, str] = '-',
30 trajectory: Optional[str] = None,
31 callback_always: bool = False,
32 alpha: float = 70.0,
33 **kwargs,
34 ):
35 """Initialize object
37 Parameters
38 ----------
39 atoms: :class:`~ase.Atoms`
40 The Atoms object to relax.
42 trajectory: str
43 Trajectory file used to store optimisation path.
45 logfile: file object or str
46 If *logfile* is a string, a file with that name will be opened.
47 Use '-' for stdout.
49 callback_always: bool
50 Should the callback be run after each force call (also in the
51 linesearch)
53 alpha: float
54 Initial guess for the Hessian (curvature of energy surface). A
55 conservative value of 70.0 is the default, but number of needed
56 steps to converge might be less if a lower value is used. However,
57 a lower value also means risk of instability.
59 kwargs : dict, optional
60 Extra arguments passed to
61 :class:`~ase.optimize.optimize.Optimizer`.
63 """
64 restart = None
65 Optimizer.__init__(self, atoms, restart, logfile, trajectory, **kwargs)
66 self.force_calls = 0
67 self.callback_always = callback_always
68 self.H0 = alpha
69 self.max_steps = 0
71 def x0(self):
72 """Return x0 in a way SciPy can use
74 This class is mostly usable for subclasses wanting to redefine the
75 parameters (and the objective function)"""
76 return self.optimizable.get_x()
78 def f(self, x):
79 """Objective function for use of the optimizers"""
80 self.optimizable.set_x(x)
81 # Scale the problem as SciPy uses I as initial Hessian.
82 return self.optimizable.get_value() / self.H0
84 def fprime(self, x):
85 """Gradient of the objective function for use of the optimizers"""
86 self.optimizable.set_x(x)
87 self.force_calls += 1
89 if self.callback_always:
90 self.callback(x)
92 # Remember that forces are minus the gradient!
93 # Scale the problem as SciPy uses I as initial Hessian.
94 return -self.optimizable.get_gradient() / self.H0
96 def callback(self, x):
97 """Callback function to be run after each iteration by SciPy
99 This should also be called once before optimization starts, as SciPy
100 optimizers only calls it after each iteration, while ase optimizers
101 call something similar before as well.
103 :meth:`callback`() can raise a :exc:`Converged` exception to signal the
104 optimisation is complete. This will be silently ignored by
105 :meth:`run`().
106 """
107 if self.nsteps < self.max_steps:
108 self.nsteps += 1
109 gradient = self.optimizable.get_gradient()
110 f = gradient.reshape(-1, 3)
111 self.log(f)
112 self.call_observers()
113 if self.converged(gradient):
114 raise Converged
116 def run(self, fmax=0.05, steps=100000000):
117 self.fmax = fmax
119 try:
120 # As SciPy does not log the zeroth iteration, we do that manually
121 if self.nsteps == 0:
122 gradient = self.optimizable.get_gradient()
123 self.log(gradient)
124 self.call_observers()
126 self.max_steps = steps + self.nsteps
128 # Scale the problem as SciPy uses I as initial Hessian.
129 self.call_fmin(fmax / self.H0, steps)
130 except Converged:
131 pass
132 gradient = self.optimizable.get_gradient()
133 return self.converged(gradient)
135 def dump(self, data):
136 pass
138 def load(self):
139 pass
141 def call_fmin(self, fmax, steps):
142 raise NotImplementedError
145class SciPyFminCG(SciPyOptimizer):
146 """Non-linear (Polak-Ribiere) conjugate gradient algorithm"""
148 def call_fmin(self, fmax, steps):
149 output = opt.fmin_cg(self.f,
150 self.x0(),
151 fprime=self.fprime,
152 # args=(),
153 gtol=fmax * 0.1, # Should never be reached
154 norm=np.inf,
155 # epsilon=
156 maxiter=steps,
157 full_output=1,
158 disp=0,
159 # retall=0,
160 callback=self.callback)
161 warnflag = output[-1]
162 if warnflag == 2:
163 raise OptimizerConvergenceError(
164 'Warning: Desired error not necessarily achieved '
165 'due to precision loss')
168class SciPyFminBFGS(SciPyOptimizer):
169 """Quasi-Newton method (Broydon-Fletcher-Goldfarb-Shanno)"""
171 def call_fmin(self, fmax, steps):
172 output = opt.fmin_bfgs(self.f,
173 self.x0(),
174 fprime=self.fprime,
175 # args=(),
176 gtol=fmax * 0.1, # Should never be reached
177 norm=np.inf,
178 # epsilon=1.4901161193847656e-08,
179 maxiter=steps,
180 full_output=1,
181 disp=0,
182 # retall=0,
183 callback=self.callback)
184 warnflag = output[-1]
185 if warnflag == 2:
186 raise OptimizerConvergenceError(
187 'Warning: Desired error not necessarily achieved '
188 'due to precision loss')
191class SciPyGradientlessOptimizer(Optimizer):
192 """General interface for gradient less SciPy optimizers
194 Only the call to the optimizer is still needed
196 Note: If you redefine x0() and f(), you don't even need an atoms object.
197 Redefining these also allows you to specify an arbitrary objective
198 function.
200 XXX: This is still a work in progress
201 """
203 def __init__(
204 self,
205 atoms: Atoms,
206 logfile: Union[IO, str] = '-',
207 trajectory: Optional[str] = None,
208 callback_always: bool = False,
209 **kwargs,
210 ):
211 """Initialize object
213 Parameters
214 ----------
215 atoms: :class:`~ase.Atoms`
216 The Atoms object to relax.
218 trajectory: str
219 Trajectory file used to store optimisation path.
221 logfile: file object or str
222 If *logfile* is a string, a file with that name will be opened.
223 Use '-' for stdout.
225 callback_always: bool
226 Should the callback be run after each force call (also in the
227 linesearch)
229 alpha: float
230 Initial guess for the Hessian (curvature of energy surface). A
231 conservative value of 70.0 is the default, but number of needed
232 steps to converge might be less if a lower value is used. However,
233 a lower value also means risk of instability.
235 kwargs : dict, optional
236 Extra arguments passed to
237 :class:`~ase.optimize.optimize.Optimizer`.
239 """
240 restart = None
241 Optimizer.__init__(self, atoms, restart, logfile, trajectory, **kwargs)
242 self.function_calls = 0
243 self.callback_always = callback_always
245 def x0(self):
246 """Return x0 in a way SciPy can use
248 This class is mostly usable for subclasses wanting to redefine the
249 parameters (and the objective function)"""
250 return self.optimizable.get_x().reshape(-1)
252 def f(self, x):
253 """Objective function for use of the optimizers"""
254 self.optimizable.set_x(x)
255 self.function_calls += 1
256 # Scale the problem as SciPy uses I as initial Hessian.
257 return self.optimizable.get_value()
259 def callback(self, x):
260 """Callback function to be run after each iteration by SciPy
262 This should also be called once before optimization starts, as SciPy
263 optimizers only calls it after each iteration, while ase optimizers
264 call something similar before as well.
265 """
266 # We can't assume that forces are available!
267 # f = self.optimizable.get_gradient().reshape(-1, 3)
268 # self.log(f)
269 self.call_observers()
270 # if self.converged(f):
271 # raise Converged
272 self.nsteps += 1
274 def run(self, ftol=0.01, xtol=0.01, steps=100000000):
275 self.xtol = xtol
276 self.ftol = ftol
277 # As SciPy does not log the zeroth iteration, we do that manually
278 self.callback(None)
279 try:
280 # Scale the problem as SciPy uses I as initial Hessian.
281 self.call_fmin(xtol, ftol, steps)
282 except Converged:
283 pass
284 return self.converged()
286 def dump(self, data):
287 pass
289 def load(self):
290 pass
292 def call_fmin(self, xtol, ftol, steps):
293 raise NotImplementedError
296class SciPyFmin(SciPyGradientlessOptimizer):
297 """Nelder-Mead Simplex algorithm
299 Uses only function calls.
301 XXX: This is still a work in progress
302 """
304 def call_fmin(self, xtol, ftol, steps):
305 opt.fmin(self.f,
306 self.x0(),
307 # args=(),
308 xtol=xtol,
309 ftol=ftol,
310 maxiter=steps,
311 # maxfun=None,
312 # full_output=1,
313 disp=0,
314 # retall=0,
315 callback=self.callback)
318class SciPyFminPowell(SciPyGradientlessOptimizer):
319 """Powell's (modified) level set method
321 Uses only function calls.
323 XXX: This is still a work in progress
324 """
326 def __init__(self, *args, **kwargs):
327 """Parameters:
329 direc: float
330 How much to change x to initially. Defaults to 0.04.
331 """
332 direc = kwargs.pop('direc', None)
333 SciPyGradientlessOptimizer.__init__(self, *args, **kwargs)
335 if direc is None:
336 self.direc = np.eye(len(self.x0()), dtype=float) * 0.04
337 else:
338 self.direc = np.eye(len(self.x0()), dtype=float) * direc
340 def call_fmin(self, xtol, ftol, steps):
341 opt.fmin_powell(self.f,
342 self.x0(),
343 # args=(),
344 xtol=xtol,
345 ftol=ftol,
346 maxiter=steps,
347 # maxfun=None,
348 # full_output=1,
349 disp=0,
350 # retall=0,
351 callback=self.callback,
352 direc=self.direc)