Coverage for ase / optimize / bfgslinesearch.py: 83.97%

131 statements  

« prev     ^ index     » next       coverage.py v7.13.3, created at 2026-02-04 10:20 +0000

1# fmt: off 

2 

3# ******NOTICE*************** 

4# optimize.py module by Travis E. Oliphant 

5# 

6# You may copy and use this module as you see fit with no 

7# guarantee implied provided you keep this notice in all copies. 

8# *****END NOTICE************ 

9 

10import time 

11from typing import IO, Optional, Union 

12 

13import numpy as np 

14from numpy import absolute, eye, isinf 

15 

16from ase import Atoms 

17from ase.optimize.optimize import Optimizer 

18from ase.utils.linesearch import LineSearch 

19 

20# These have been copied from Numeric's MLab.py 

21# I don't think they made the transition to scipy_core 

22 

23# Modified from scipy_optimize 

24abs = absolute 

25pymin = min 

26pymax = max 

27__version__ = '0.1' 

28 

29 

30class BFGSLineSearch(Optimizer): 

31 def __init__( 

32 self, 

33 atoms: Atoms, 

34 restart: Optional[str] = None, 

35 logfile: Union[IO, str] = '-', 

36 maxstep: float = None, 

37 trajectory: Optional[str] = None, 

38 c1: float = 0.23, 

39 c2: float = 0.46, 

40 alpha: float = 10.0, 

41 stpmax: float = 50.0, 

42 **kwargs, 

43 ): 

44 """Optimize atomic positions in the BFGSLineSearch algorithm, which 

45 uses both forces and potential energy information. 

46 

47 Parameters 

48 ---------- 

49 atoms: :class:`~ase.Atoms` 

50 The Atoms object to relax. 

51 

52 restart: str 

53 JSON file used to store hessian matrix. If set, file with 

54 such a name will be searched and hessian matrix stored will 

55 be used, if the file exists. 

56 

57 trajectory: str 

58 Trajectory file used to store optimisation path. 

59 

60 maxstep: float 

61 Used to set the maximum distance an atom can move per 

62 iteration (default value is 0.2 Angstroms). 

63 

64 logfile: file object or str 

65 If *logfile* is a string, a file with that name will be opened. 

66 Use '-' for stdout. 

67 

68 kwargs : dict, optional 

69 Extra arguments passed to 

70 :class:`~ase.optimize.optimize.Optimizer`. 

71 

72 """ 

73 if maxstep is None: 

74 self.maxstep = self.defaults['maxstep'] 

75 else: 

76 self.maxstep = maxstep 

77 self.stpmax = stpmax 

78 self.alpha = alpha 

79 self.H = None 

80 self.c1 = c1 

81 self.c2 = c2 

82 self.force_calls = 0 

83 self.function_calls = 0 

84 self.r0 = None 

85 self.g0 = None 

86 self.e0 = None 

87 self.load_restart = False 

88 self.task = 'START' 

89 self.rep_count = 0 

90 self.p = None 

91 self.alpha_k = None 

92 self.no_update = False 

93 self.replay = False 

94 

95 super().__init__(atoms, restart, logfile, trajectory, **kwargs) 

96 

97 def read(self): 

98 self.r0, self.g0, self.e0, self.task, self.H = self.load() 

99 self.load_restart = True 

100 

101 def reset(self): 

102 self.H = None 

103 self.r0 = None 

104 self.g0 = None 

105 self.e0 = None 

106 self.rep_count = 0 

107 

108 def step(self, gradient=None): 

109 optimizable = self.optimizable 

110 gradient = self._get_gradient(gradient) 

111 r = optimizable.get_x() 

112 g = gradient / self.alpha 

113 p0 = self.p 

114 self.update(r, g, self.r0, self.g0, p0) 

115 # o,v = np.linalg.eigh(self.B) 

116 e = self.func(r) 

117 

118 self.p = -np.dot(self.H, g) 

119 p_size = np.sqrt((self.p**2).sum()) 

120 if p_size <= np.sqrt(optimizable.ndofs() / 3 * 1e-10): 

121 self.p /= (p_size / np.sqrt(optimizable.ndofs() / 3 * 1e-10)) 

122 ls = LineSearch(get_gradient_norm=self.optimizable.gradient_norm) 

123 self.alpha_k, e, self.e0, self.no_update = \ 

124 ls._line_search(self.func, self.fprime, r, self.p, g, e, self.e0, 

125 maxstep=self.maxstep, c1=self.c1, 

126 c2=self.c2, stpmax=self.stpmax) 

127 if self.alpha_k is None: 

128 raise RuntimeError("LineSearch failed!") 

129 

130 dr = self.alpha_k * self.p 

131 optimizable.set_x(r + dr) 

132 self.r0 = r 

133 self.g0 = g 

134 self.dump((self.r0, self.g0, self.e0, self.task, self.H)) 

135 

136 def update(self, r, g, r0, g0, p0): 

137 self.I = eye(self.optimizable.ndofs(), dtype=int) 

138 if self.H is None: 

139 self.H = eye(self.optimizable.ndofs()) 

140 # self.B = np.linalg.inv(self.H) 

141 return 

142 else: 

143 dr = r - r0 

144 dg = g - g0 

145 # self.alpha_k can be None!!! 

146 if not (((self.alpha_k or 0) > 0 and 

147 abs(np.dot(g, p0)) - abs(np.dot(g0, p0)) < 0) or 

148 self.replay): 

149 return 

150 if self.no_update is True: 

151 print('skip update') 

152 return 

153 

154 try: # this was handled in numeric, let it remain for more safety 

155 rhok = 1.0 / (np.dot(dg, dr)) 

156 except ZeroDivisionError: 

157 rhok = 1000.0 

158 print("Divide-by-zero encountered: rhok assumed large") 

159 if isinf(rhok): # this is patch for np 

160 rhok = 1000.0 

161 print("Divide-by-zero encountered: rhok assumed large") 

162 A1 = self.I - dr[:, np.newaxis] * dg[np.newaxis, :] * rhok 

163 A2 = self.I - dg[:, np.newaxis] * dr[np.newaxis, :] * rhok 

164 self.H = (np.dot(A1, np.dot(self.H, A2)) + 

165 rhok * dr[:, np.newaxis] * dr[np.newaxis, :]) 

166 # self.B = np.linalg.inv(self.H) 

167 

168 def func(self, x): 

169 """Objective function for use of the optimizers""" 

170 self.optimizable.set_x(x) 

171 self.function_calls += 1 

172 # Scale the problem as SciPy uses I as initial Hessian. 

173 return self.optimizable.get_value() / self.alpha 

174 

175 def fprime(self, x): 

176 """Gradient of the objective function for use of the optimizers""" 

177 self.optimizable.set_x(x) 

178 self.force_calls += 1 

179 # Scale the problem as SciPy uses I as initial Hessian. 

180 gradient = self.optimizable.get_gradient() 

181 return gradient / self.alpha 

182 

183 def replay_trajectory(self, traj): 

184 """Initialize hessian from old trajectory.""" 

185 self.replay = True 

186 from ase.utils import IOContext 

187 

188 with IOContext() as files: 

189 if isinstance(traj, str): 

190 from ase.io.trajectory import Trajectory 

191 traj = files.closelater(Trajectory(traj, mode='r')) 

192 

193 r0 = None 

194 g0 = None 

195 for i in range(len(traj) - 1): 

196 r = traj[i].get_positions().ravel() 

197 g = - traj[i].get_forces().ravel() / self.alpha 

198 self.update(r, g, r0, g0, self.p) 

199 self.p = -np.dot(self.H, g) 

200 r0 = r.copy() 

201 g0 = g.copy() 

202 self.r0 = r0 

203 self.g0 = g0 

204 

205 def log(self, gradient): 

206 if self.logfile is None: 

207 return 

208 fmax = self.optimizable.gradient_norm(gradient) 

209 e = self.optimizable.get_value() 

210 T = time.localtime() 

211 name = self.__class__.__name__ 

212 w = self.logfile.write 

213 if self.nsteps == 0: 

214 w('%s %4s[%3s] %8s %15s %12s\n' % 

215 (' ' * len(name), 'Step', 'FC', 'Time', 'Energy', 'fmax')) 

216 w('%s: %3d[%3d] %02d:%02d:%02d %15.6f %12.4f\n' 

217 % (name, self.nsteps, self.force_calls, T[3], T[4], T[5], e, 

218 fmax)) 

219 

220 

221def wrap_function(function, args): 

222 ncalls = [0] 

223 

224 def function_wrapper(x): 

225 ncalls[0] += 1 

226 return function(x, *args) 

227 return ncalls, function_wrapper