Coverage for ase / optimize / bfgs.py: 78.16%
87 statements
« prev ^ index » next coverage.py v7.13.3, created at 2026-02-04 10:20 +0000
« prev ^ index » next coverage.py v7.13.3, created at 2026-02-04 10:20 +0000
1# fmt: off
3import warnings
4from pathlib import Path
5from typing import IO
7import numpy as np
8from numpy.linalg import eigh
10from ase import Atoms
11from ase.optimize.optimize import Optimizer, UnitCellFilter
14class BFGS(Optimizer):
15 # default parameters
16 defaults = {**Optimizer.defaults, 'alpha': 70.0}
18 def __init__(
19 self,
20 atoms: Atoms,
21 restart: str | Path | None = None,
22 logfile: IO | str | Path | None = '-',
23 trajectory: str | Path | None = None,
24 append_trajectory: bool = False,
25 maxstep: float | None = None,
26 alpha: float | None = None,
27 **kwargs,
28 ):
29 """BFGS optimizer.
31 Parameters
32 ----------
33 atoms: :class:`~ase.Atoms`
34 The Atoms object to relax.
36 restart: str | Path | None
37 JSON file used to store hessian matrix. If set, file with
38 such a name will be searched and hessian matrix stored will
39 be used, if the file exists.
41 trajectory: str or Path
42 Trajectory file used to store optimisation path.
44 logfile: file object, Path, or str
45 If *logfile* is a string, a file with that name will be opened.
46 Use '-' for stdout.
48 maxstep: float
49 Used to set the maximum distance an atom can move per
50 iteration (default value is 0.2 Å).
52 alpha: float
53 Initial guess for the Hessian (curvature of energy surface). A
54 conservative value of 70.0 is the default, but number of needed
55 steps to converge might be less if a lower value is used. However,
56 a lower value also means risk of instability.
58 kwargs : dict, optional
59 Extra arguments passed to
60 :class:`~ase.optimize.optimize.Optimizer`.
62 """
63 if maxstep is None:
64 self.maxstep = self.defaults['maxstep']
65 else:
66 self.maxstep = maxstep
68 if self.maxstep > 1.0:
69 warnings.warn('You are using a *very* large value for '
70 'the maximum step size: %.1f Å' % self.maxstep)
72 self.alpha = alpha
73 if self.alpha is None:
74 self.alpha = self.defaults['alpha']
75 super().__init__(
76 atoms=atoms, restart=restart,
77 logfile=logfile, trajectory=trajectory,
78 append_trajectory=append_trajectory,
79 **kwargs)
81 def initialize(self):
82 # initial hessian
83 self.H0 = np.eye(self.optimizable.ndofs()) * self.alpha
85 self.H = None
86 self.pos0 = None
87 self.forces0 = None
89 def read(self):
90 file = self.load()
91 if len(file) == 5:
92 (self.H, self.pos0, self.forces0, self.maxstep,
93 self.atoms.orig_cell) = file
94 else:
95 self.H, self.pos0, self.forces0, self.maxstep = file
97 def step(self, gradient=None):
98 gradient = self._get_gradient(gradient)
99 optimizable = self.optimizable
101 pos = optimizable.get_x()
102 dpos, steplengths = self.prepare_step(pos, gradient)
103 dpos = self.determine_step(dpos, steplengths)
104 optimizable.set_x(pos + dpos)
105 if isinstance(self.atoms, UnitCellFilter):
106 self.dump((self.H, self.pos0, self.forces0, self.maxstep,
107 self.atoms.orig_cell))
108 else:
109 self.dump((self.H, self.pos0, self.forces0, self.maxstep))
111 def prepare_step(self, pos, gradient):
112 pos = pos.ravel()
113 gradient = gradient.ravel()
114 self.update(pos, -gradient, self.pos0, self.forces0)
115 omega, V = eigh(self.H)
117 # FUTURE: Log this properly
118 # # check for negative eigenvalues of the hessian
119 # if any(omega < 0):
120 # n_negative = len(omega[omega < 0])
121 # msg = '\n** BFGS Hessian has {} negative eigenvalues.'.format(
122 # n_negative
123 # )
124 # print(msg, flush=True)
125 # if self.logfile is not None:
126 # self.logfile.write(msg)
127 # self.logfile.flush()
129 dpos = np.dot(V, -np.dot(gradient, V) / np.fabs(omega))
130 # XXX Here we are calling gradient_norm() on some positions.
131 # Should there be a general norm concept
132 steplengths = self.optimizable.gradient_norm(dpos)
133 self.pos0 = pos
134 self.forces0 = -gradient.copy()
135 return dpos, steplengths
137 def determine_step(self, dpos, steplengths):
138 """Determine step to take according to maxstep
140 Normalize all steps as the largest step. This way
141 we still move along the direction.
142 """
143 maxsteplength = np.max(steplengths)
144 if maxsteplength >= self.maxstep:
145 scale = self.maxstep / maxsteplength
146 # FUTURE: Log this properly
147 # msg = '\n** scale step by {:.3f} to be shorter than {}'.format(
148 # scale, self.maxstep
149 # )
150 # print(msg, flush=True)
152 dpos *= scale
153 return dpos
155 def update(self, pos, forces, pos0, forces0):
156 if self.H is None:
157 self.H = self.H0
158 return
159 dpos = pos - pos0
161 if np.abs(dpos).max() < 1e-7:
162 # Same configuration again (maybe a restart):
163 return
165 dforces = forces - forces0
166 a = np.dot(dpos, dforces)
167 dg = np.dot(self.H, dpos)
168 b = np.dot(dpos, dg)
169 self.H -= np.outer(dforces, dforces) / a + np.outer(dg, dg) / b
171 def replay_trajectory(self, traj):
172 """Initialize hessian from old trajectory."""
173 if isinstance(traj, str):
174 from ase.io.trajectory import Trajectory
175 traj = Trajectory(traj, 'r')
176 self.H = None
177 atoms = traj[0]
178 pos0 = atoms.get_positions().ravel()
179 forces0 = atoms.get_forces().ravel()
180 for atoms in traj:
181 pos = atoms.get_positions().ravel()
182 forces = atoms.get_forces().ravel()
183 self.update(pos, forces, pos0, forces0)
184 pos0 = pos
185 forces0 = forces
187 self.pos0 = pos0
188 self.forces0 = forces0
191class oldBFGS(BFGS):
192 def determine_step(self, dpos, steplengths):
193 """Old BFGS behaviour for scaling step lengths
195 This keeps the behaviour of truncating individual steps. Some might
196 depend of this as some absurd kind of stimulated annealing to find the
197 global minimum.
198 """
199 dpos /= np.maximum(steplengths / self.maxstep, 1.0).reshape(-1, 1)
200 return dpos