Coverage for /builds/ase/ase/ase/optimize/ode.py: 85.87%
92 statements
« prev ^ index » next coverage.py v7.5.3, created at 2025-08-02 00:12 +0000
« prev ^ index » next coverage.py v7.5.3, created at 2025-08-02 00:12 +0000
1# fmt: off
3from typing import IO, Optional, Union
5import numpy as np
7from ase import Atoms
8from ase.optimize.sciopt import OptimizerConvergenceError, SciPyOptimizer
11def ode12r(f, X0, h=None, verbose=1, fmax=1e-6, maxtol=1e3, steps=100,
12 rtol=1e-1, C1=1e-2, C2=2.0, hmin=1e-10, extrapolate=3,
13 callback=None, apply_precon=None, converged=None, residual=None):
14 """
15 Adaptive ODE solver, which uses 1st and 2nd order approximations to
16 estimate local error and choose a new step length.
18 This optimizer is described in detail in:
20 S. Makri, C. Ortner and J. R. Kermode, J. Chem. Phys.
21 150, 094109 (2019)
22 https://dx.doi.org/10.1063/1.5064465
24 Parameters
25 ----------
27 f : function
28 function returning driving force on system
29 X0 : 1-dimensional array
30 initial value of degrees of freedom
31 h : float
32 step size, if None an estimate is used based on ODE12
33 verbose: int
34 verbosity level. 0=no output, 1=log output (default), 2=debug output.
35 fmax : float
36 convergence tolerance for residual force
37 maxtol: float
38 terminate if reisdual exceeds this value
39 rtol : float
40 relative tolerance
41 C1 : float
42 sufficient contraction parameter
43 C2 : float
44 residual growth control (Inf means there is no control)
45 hmin : float
46 minimal allowed step size
47 extrapolate : int
48 extrapolation style (3 seems the most robust)
49 callback : function
50 optional callback function to call after each update step
51 apply_precon: function
52 apply a apply_preconditioner to the optimisation
53 converged: function
54 alternative function to check convergence, rather than
55 using a simple norm of the forces.
56 residual: function
57 compute the residual from the current forces
59 Returns
60 -------
62 X: array
63 final value of degrees of freedom
64 """
66 X = X0
67 Fn = f(X)
69 if callback is None:
70 def callback(X):
71 pass
73 if residual is None:
74 def residual(F, X):
75 return np.linalg.norm(F, np.inf)
76 Rn = residual(Fn, X)
78 if apply_precon is None:
79 def apply_precon(F, X):
80 return F, residual(F, X)
81 Fp, Rp = apply_precon(Fn, X)
83 def log(*args):
84 if verbose >= 1:
85 print(*args)
87 def debug(*args):
88 if verbose >= 2:
89 print(*args)
91 if converged is None:
92 def converged(F, X):
93 return residual(F, X) <= fmax
95 if converged(Fn, X):
96 log("ODE12r terminates successfully after 0 iterations")
97 return X
98 if Rn >= maxtol:
99 raise OptimizerConvergenceError(f"ODE12r: Residual {Rn} is too large "
100 "at iteration 0")
102 # computation of the initial step
103 r = residual(Fp, X) # pick the biggest force
104 if h is None:
105 h = 0.5 * rtol ** 0.5 / r # Chose a stepsize based on that force
106 h = max(h, hmin) # Make sure the step size is not too big
108 for nit in range(1, steps + 1):
109 Xnew = X + h * Fp # Pick a new position
110 Fn_new = f(Xnew) # Calculate the new forces at this position
111 Rn_new = residual(Fn_new, Xnew)
112 Fp_new, Rp_new = apply_precon(Fn_new, Xnew)
114 e = 0.5 * h * (Fp_new - Fp) # Estimate the area under the forces curve
115 err = np.linalg.norm(e, np.inf) # Error estimate
117 # Accept step if residual decreases sufficiently and/or error acceptable
118 accept = ((Rp_new <= Rp * (1 - C1 * h)) or
119 ((Rp_new <= Rp * C2) and err <= rtol))
121 # Pick an extrapolation scheme for the system & find new increment
122 y = Fp - Fp_new
123 if extrapolate == 1: # F(xn + h Fp)
124 h_ls = h * (Fp @ y) / (y @ y)
125 elif extrapolate == 2: # F(Xn + h Fp)
126 h_ls = h * (Fp @ Fp_new) / (Fp @ y + 1e-10)
127 elif extrapolate == 3: # min | F(Xn + h Fp) |
128 h_ls = h * (Fp @ y) / (y @ y + 1e-10)
129 else:
130 raise ValueError(f'invalid extrapolate value: {extrapolate}. '
131 'Must be 1, 2 or 3')
132 if np.isnan(h_ls) or h_ls < hmin: # Rejects if increment is too small
133 h_ls = np.inf
135 h_err = h * 0.5 * np.sqrt(rtol / err)
137 # Accept the step and do the update
138 if accept:
139 X = Xnew
140 Rn = Rn_new
141 Fn = Fn_new
142 Fp = Fp_new
143 Rp = Rp_new
144 callback(X)
146 # We check the residuals again
147 if Rn >= maxtol:
148 raise OptimizerConvergenceError(
149 f"ODE12r: Residual {Rn} is too "
150 f"large at iteration number {nit}")
152 if converged(Fn, X):
153 log("ODE12r: terminates successfully "
154 f"after {nit} iterations.")
155 return X
157 # Compute a new step size.
158 # Based on the extrapolation and some other heuristics
159 h = max(0.25 * h,
160 min(4 * h, h_err, h_ls)) # Log steep-size analytic results
162 debug(f"ODE12r: accept: new h = {h}, |F| = {Rp}")
163 debug(f"ODE12r: hls = {h_ls}")
164 debug(f"ODE12r: herr = {h_err}")
165 else:
166 # Compute a new step size.
167 h = max(0.1 * h, min(0.25 * h, h_err,
168 h_ls))
169 debug(f"ODE12r: reject: new h = {h}")
170 debug(f"ODE12r: |Fnew| = {Rp_new}")
171 debug(f"ODE12r: |Fold| = {Rp}")
172 debug(f"ODE12r: |Fnew|/|Fold| = {Rp_new / Rp}")
174 # abort if step size is too small
175 if abs(h) <= hmin:
176 raise OptimizerConvergenceError('ODE12r terminates unsuccessfully'
177 f' Step size {h} too small')
180class ODE12r(SciPyOptimizer):
181 """
182 Optimizer based on adaptive ODE solver :func:`ode12r`
183 """
185 def __init__(
186 self,
187 atoms: Atoms,
188 logfile: Union[IO, str] = '-',
189 trajectory: Optional[str] = None,
190 callback_always: bool = False,
191 alpha: float = 1.0,
192 precon: Optional[str] = None,
193 verbose: int = 0,
194 rtol: float = 1e-2,
195 **kwargs,
196 ):
197 SciPyOptimizer.__init__(self, atoms, logfile, trajectory,
198 callback_always, alpha, **kwargs)
199 self._actual_atoms = atoms
200 from ase.optimize.precon.precon import make_precon # avoid circular dep
201 self.precon = make_precon(precon)
202 self.verbose = verbose
203 self.rtol = rtol
205 def apply_precon(self, Fn, X):
206 self._actual_atoms.set_positions(X.reshape(len(self._actual_atoms), 3))
207 Fn, Rn = self.precon.apply(Fn, self._actual_atoms)
208 return Fn, Rn
210 def call_fmin(self, fmax, steps):
211 ode12r(lambda x: -self.fprime(x),
212 self.x0(),
213 fmax=fmax, steps=steps,
214 verbose=self.verbose,
215 apply_precon=self.apply_precon,
216 callback=self.callback,
217 converged=lambda gradient, X: self.converged(gradient),
218 rtol=self.rtol)