Coverage for /builds/ase/ase/ase/optimize/bfgslinesearch.py: 84.21%

133 statements  

« prev     ^ index     » next       coverage.py v7.5.3, created at 2025-08-02 00:12 +0000

1# fmt: off 

2 

3# ******NOTICE*************** 

4# optimize.py module by Travis E. Oliphant 

5# 

6# You may copy and use this module as you see fit with no 

7# guarantee implied provided you keep this notice in all copies. 

8# *****END NOTICE************ 

9 

10import time 

11from typing import IO, Optional, Union 

12 

13import numpy as np 

14from numpy import absolute, eye, isinf 

15 

16from ase import Atoms 

17from ase.optimize.optimize import Optimizer 

18from ase.utils.linesearch import LineSearch 

19 

20# These have been copied from Numeric's MLab.py 

21# I don't think they made the transition to scipy_core 

22 

23# Modified from scipy_optimize 

24abs = absolute 

25pymin = min 

26pymax = max 

27__version__ = '0.1' 

28 

29 

30class BFGSLineSearch(Optimizer): 

31 def __init__( 

32 self, 

33 atoms: Atoms, 

34 restart: Optional[str] = None, 

35 logfile: Union[IO, str] = '-', 

36 maxstep: float = None, 

37 trajectory: Optional[str] = None, 

38 c1: float = 0.23, 

39 c2: float = 0.46, 

40 alpha: float = 10.0, 

41 stpmax: float = 50.0, 

42 **kwargs, 

43 ): 

44 """Optimize atomic positions in the BFGSLineSearch algorithm, which 

45 uses both forces and potential energy information. 

46 

47 Parameters 

48 ---------- 

49 atoms: :class:`~ase.Atoms` 

50 The Atoms object to relax. 

51 

52 restart: str 

53 JSON file used to store hessian matrix. If set, file with 

54 such a name will be searched and hessian matrix stored will 

55 be used, if the file exists. 

56 

57 trajectory: str 

58 Trajectory file used to store optimisation path. 

59 

60 maxstep: float 

61 Used to set the maximum distance an atom can move per 

62 iteration (default value is 0.2 Angstroms). 

63 

64 logfile: file object or str 

65 If *logfile* is a string, a file with that name will be opened. 

66 Use '-' for stdout. 

67 

68 kwargs : dict, optional 

69 Extra arguments passed to 

70 :class:`~ase.optimize.optimize.Optimizer`. 

71 

72 """ 

73 if maxstep is None: 

74 self.maxstep = self.defaults['maxstep'] 

75 else: 

76 self.maxstep = maxstep 

77 self.stpmax = stpmax 

78 self.alpha = alpha 

79 self.H = None 

80 self.c1 = c1 

81 self.c2 = c2 

82 self.force_calls = 0 

83 self.function_calls = 0 

84 self.r0 = None 

85 self.g0 = None 

86 self.e0 = None 

87 self.load_restart = False 

88 self.task = 'START' 

89 self.rep_count = 0 

90 self.p = None 

91 self.alpha_k = None 

92 self.no_update = False 

93 self.replay = False 

94 

95 Optimizer.__init__(self, atoms, restart, logfile, trajectory, **kwargs) 

96 

97 def read(self): 

98 self.r0, self.g0, self.e0, self.task, self.H = self.load() 

99 self.load_restart = True 

100 

101 def reset(self): 

102 self.H = None 

103 self.r0 = None 

104 self.g0 = None 

105 self.e0 = None 

106 self.rep_count = 0 

107 

108 def step(self, forces=None): 

109 optimizable = self.optimizable 

110 

111 if forces is None: 

112 forces = optimizable.get_gradient().reshape(-1, 3) 

113 

114 r = optimizable.get_x() 

115 g = -forces.reshape(-1) / self.alpha 

116 p0 = self.p 

117 self.update(r, g, self.r0, self.g0, p0) 

118 # o,v = np.linalg.eigh(self.B) 

119 e = self.func(r) 

120 

121 self.p = -np.dot(self.H, g) 

122 p_size = np.sqrt((self.p**2).sum()) 

123 if p_size <= np.sqrt(optimizable.ndofs() / 3 * 1e-10): 

124 self.p /= (p_size / np.sqrt(optimizable.ndofs() / 3 * 1e-10)) 

125 ls = LineSearch() 

126 self.alpha_k, e, self.e0, self.no_update = \ 

127 ls._line_search(self.func, self.fprime, r, self.p, g, e, self.e0, 

128 maxstep=self.maxstep, c1=self.c1, 

129 c2=self.c2, stpmax=self.stpmax) 

130 if self.alpha_k is None: 

131 raise RuntimeError("LineSearch failed!") 

132 

133 dr = self.alpha_k * self.p 

134 optimizable.set_x(r + dr) 

135 self.r0 = r 

136 self.g0 = g 

137 self.dump((self.r0, self.g0, self.e0, self.task, self.H)) 

138 

139 def update(self, r, g, r0, g0, p0): 

140 self.I = eye(self.optimizable.ndofs(), dtype=int) 

141 if self.H is None: 

142 self.H = eye(self.optimizable.ndofs()) 

143 # self.B = np.linalg.inv(self.H) 

144 return 

145 else: 

146 dr = r - r0 

147 dg = g - g0 

148 # self.alpha_k can be None!!! 

149 if not (((self.alpha_k or 0) > 0 and 

150 abs(np.dot(g, p0)) - abs(np.dot(g0, p0)) < 0) or 

151 self.replay): 

152 return 

153 if self.no_update is True: 

154 print('skip update') 

155 return 

156 

157 try: # this was handled in numeric, let it remain for more safety 

158 rhok = 1.0 / (np.dot(dg, dr)) 

159 except ZeroDivisionError: 

160 rhok = 1000.0 

161 print("Divide-by-zero encountered: rhok assumed large") 

162 if isinf(rhok): # this is patch for np 

163 rhok = 1000.0 

164 print("Divide-by-zero encountered: rhok assumed large") 

165 A1 = self.I - dr[:, np.newaxis] * dg[np.newaxis, :] * rhok 

166 A2 = self.I - dg[:, np.newaxis] * dr[np.newaxis, :] * rhok 

167 self.H = (np.dot(A1, np.dot(self.H, A2)) + 

168 rhok * dr[:, np.newaxis] * dr[np.newaxis, :]) 

169 # self.B = np.linalg.inv(self.H) 

170 

171 def func(self, x): 

172 """Objective function for use of the optimizers""" 

173 self.optimizable.set_x(x) 

174 self.function_calls += 1 

175 # Scale the problem as SciPy uses I as initial Hessian. 

176 return self.optimizable.get_value() / self.alpha 

177 

178 def fprime(self, x): 

179 """Gradient of the objective function for use of the optimizers""" 

180 self.optimizable.set_x(x) 

181 self.force_calls += 1 

182 # Remember that forces are minus the gradient! 

183 # Scale the problem as SciPy uses I as initial Hessian. 

184 forces = self.optimizable.get_gradient() 

185 return - forces / self.alpha 

186 

187 def replay_trajectory(self, traj): 

188 """Initialize hessian from old trajectory.""" 

189 self.replay = True 

190 from ase.utils import IOContext 

191 

192 with IOContext() as files: 

193 if isinstance(traj, str): 

194 from ase.io.trajectory import Trajectory 

195 traj = files.closelater(Trajectory(traj, mode='r')) 

196 

197 r0 = None 

198 g0 = None 

199 for i in range(len(traj) - 1): 

200 r = traj[i].get_positions().ravel() 

201 g = - traj[i].get_forces().ravel() / self.alpha 

202 self.update(r, g, r0, g0, self.p) 

203 self.p = -np.dot(self.H, g) 

204 r0 = r.copy() 

205 g0 = g.copy() 

206 self.r0 = r0 

207 self.g0 = g0 

208 

209 def log(self, gradient): 

210 if self.logfile is None: 

211 return 

212 fmax = self.optimizable.gradient_norm(gradient) 

213 e = self.optimizable.get_value() 

214 T = time.localtime() 

215 name = self.__class__.__name__ 

216 w = self.logfile.write 

217 if self.nsteps == 0: 

218 w('%s %4s[%3s] %8s %15s %12s\n' % 

219 (' ' * len(name), 'Step', 'FC', 'Time', 'Energy', 'fmax')) 

220 w('%s: %3d[%3d] %02d:%02d:%02d %15.6f %12.4f\n' 

221 % (name, self.nsteps, self.force_calls, T[3], T[4], T[5], e, 

222 fmax)) 

223 self.logfile.flush() 

224 

225 

226def wrap_function(function, args): 

227 ncalls = [0] 

228 

229 def function_wrapper(x): 

230 ncalls[0] += 1 

231 return function(x, *args) 

232 return ncalls, function_wrapper