Coverage for ase / optimize / mdmin.py: 96.88%
32 statements
« prev ^ index » next coverage.py v7.13.3, created at 2026-02-04 10:20 +0000
« prev ^ index » next coverage.py v7.13.3, created at 2026-02-04 10:20 +0000
1from typing import IO, Optional, Union
3import numpy as np
5from ase import Atoms
6from ase.optimize.optimize import Optimizer
9class MDMin(Optimizer):
10 # default parameters
11 defaults = {**Optimizer.defaults, 'dt': 0.2}
13 def __init__(
14 self,
15 atoms: Atoms,
16 restart: Optional[str] = None,
17 logfile: Union[IO, str] = '-',
18 trajectory: Optional[str] = None,
19 dt: Optional[float] = None,
20 maxstep: Optional[float] = None,
21 **kwargs,
22 ):
23 """
25 Parameters
26 ----------
27 atoms: :class:`~ase.Atoms`
28 The Atoms object to relax.
30 restart: str
31 JSON file used to store hessian matrix. If set, file with
32 such a name will be searched and hessian matrix stored will
33 be used, if the file exists.
35 trajectory: str
36 Trajectory file used to store optimisation path.
38 logfile: str
39 Text file used to write summary information.
41 dt: float
42 Time step for integrating the equation of motion.
44 maxstep: float
45 Spatial step limit in Angstrom. This allows larger values of dt
46 while being more robust to instabilities in the optimization.
48 kwargs : dict, optional
49 Extra arguments passed to
50 :class:`~ase.optimize.optimize.Optimizer`.
52 """
53 super().__init__(atoms, restart, logfile, trajectory, **kwargs)
55 self.dt = dt or self.defaults['dt']
56 self.maxstep = maxstep or self.defaults['maxstep']
58 def initialize(self):
59 self.v = None
61 def read(self):
62 self.v, self.dt = self.load()
64 def step(self, forces=None):
65 forces = -self._get_gradient(forces)
67 optimizable = self.optimizable
69 if self.v is None:
70 self.v = np.zeros(optimizable.ndofs())
71 else:
72 self.v += 0.5 * self.dt * forces
73 # Correct velocities:
74 vf = np.vdot(self.v, forces)
75 if vf < 0.0:
76 self.v[:] = 0.0
77 else:
78 self.v[:] = forces * vf / np.vdot(forces, forces)
80 self.v += 0.5 * self.dt * forces
81 pos = optimizable.get_x()
82 dpos = self.dt * self.v
84 # For any dpos magnitude larger than maxstep, scaling
85 # is <1. We add a small float to prevent overflows/zero-div errors.
86 # All displacement vectors (rows) of dpos which have a norm larger
87 # than self.maxstep are scaled to it.
89 # XXX Here we are using gradient_norm() to get the norm of positions
90 maxstep = self.optimizable.gradient_norm(dpos)
91 scaling = self.maxstep / (1e-6 + maxstep)
92 dpos *= np.clip(scaling, 0.0, 1.0)
93 optimizable.set_x(pos + dpos)
94 self.dump((self.v, self.dt))