Coverage for ase / optimize / rfo.py: 84.21%
38 statements
« prev ^ index » next coverage.py v7.13.5, created at 2026-03-30 08:22 +0000
« prev ^ index » next coverage.py v7.13.5, created at 2026-03-30 08:22 +0000
1import numpy as np
2import scipy
4from ase.optimize.bfgs import BFGS
7class RFO(BFGS):
8 """RFO (Rational Function Optimizer) combined with BFGS-based Hessian
9 updates.
11 RFO will take quasi-Newton-like steps in quadratic regime and rational
12 function damped steps outside of it. The ``damping`` factor determines
13 the transition threshold between the regimes.
15 Read about this algorithm here:
17 | A. Banerjee, N. Adams, J. Simons, R. Shepard,
18 | :doi:`Search for Stationary Points on Surfaces <10.1021/j100247a015>`
19 | J. Phys. Chem. 1985, 89, 52-57.
21 Note that ``damping`` is the reciprocal of coordinate scale :math:`a` from
22 the reference.
23 """
25 # default parameters
26 defaults = {**BFGS.defaults, 'damping': 1.0, 'eigsh_maxiter': 50}
28 def __init__(
29 self,
30 *args,
31 damping: float | None = None,
32 eigsh_maxiter: int | None = None,
33 **kwargs,
34 ):
35 """
36 Parameters
37 ----------
38 *args
39 Positional arguments passed to :class:`~ase.optimize.BFGS`.
41 damping: float
42 Determines transition threshold between quasi-Newton-like and
43 rational function damped steps. The larger the value, the larger
44 and stronger the damped regime. (default is 1.0 Å^-1).
46 eigsh_maxiter: int
47 Maximum number of eigsh iterations done before falling back to eigh
48 for solving the RFO step.
50 **kwargs
51 Keyword arguments passed to :class:`~ase.optimize.BFGS`.
52 """
53 if damping is None:
54 self.damping = self.defaults['damping']
55 else:
56 self.damping = damping
58 if eigsh_maxiter is None:
59 self.eigsh_maxiter = self.defaults['eigsh_maxiter']
60 else:
61 self.eigsh_maxiter = eigsh_maxiter
63 super().__init__(*args, **kwargs)
65 def initialize(self):
66 # Initialize Hessian
67 super().initialize()
68 n = len(self.H0)
69 self.aug_H = np.zeros((n + 1, n + 1))
70 self.v0 = None
72 def read(self):
73 # Read Hessian
74 super().read()
75 n = len(self.H)
76 self.aug_H = np.zeros((n + 1, n + 1))
77 self.v0 = None
79 def prepare_step(self, pos, gradient):
80 """Compute step from first eigenvector of gradient-augmented Hessian"""
81 # Update Hessian BFGS-style
82 self.update(pos, -gradient, self.pos0, self.forces0)
83 self.aug_H[:-1, :-1] = self.H / self.damping**2
84 self.aug_H[-1, :-1] = gradient / self.damping
85 self.aug_H[:-1, -1] = gradient / self.damping
87 # Try Lanczos and fall back to dense solver in case of convergence
88 # issues
89 try:
90 V = scipy.sparse.linalg.eigsh(
91 self.aug_H,
92 k=1,
93 which='SA',
94 v0=self.v0,
95 maxiter=self.eigsh_maxiter,
96 )[1]
97 except scipy.sparse.linalg.ArpackNoConvergence:
98 V = scipy.linalg.eigh(self.aug_H, subset_by_index=(0, 0))[1]
100 self.v0 = V[:, 0]
101 dpos = self.v0[:-1] / self.v0[-1] / self.damping
102 steplengths = self.optimizable.gradient_norm(dpos)
103 self.pos0 = pos
104 self.forces0 = -gradient.copy()
105 return dpos, steplengths