lbfgs.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488
  1. # mypy: allow-untyped-defs
  2. from typing import Optional
  3. import torch
  4. from .optimizer import Optimizer, ParamsT
  5. __all__ = ["LBFGS"]
  6. def _cubic_interpolate(x1, f1, g1, x2, f2, g2, bounds=None):
  7. # ported from https://github.com/torch/optim/blob/master/polyinterp.lua
  8. # Compute bounds of interpolation area
  9. if bounds is not None:
  10. xmin_bound, xmax_bound = bounds
  11. else:
  12. xmin_bound, xmax_bound = (x1, x2) if x1 <= x2 else (x2, x1)
  13. # Code for most common case: cubic interpolation of 2 points
  14. # w/ function and derivative values for both
  15. # Solution in this case (where x2 is the farthest point):
  16. # d1 = g1 + g2 - 3*(f1-f2)/(x1-x2);
  17. # d2 = sqrt(d1^2 - g1*g2);
  18. # min_pos = x2 - (x2 - x1)*((g2 + d2 - d1)/(g2 - g1 + 2*d2));
  19. # t_new = min(max(min_pos,xmin_bound),xmax_bound);
  20. d1 = g1 + g2 - 3 * (f1 - f2) / (x1 - x2)
  21. d2_square = d1**2 - g1 * g2
  22. if d2_square >= 0:
  23. d2 = d2_square.sqrt()
  24. if x1 <= x2:
  25. min_pos = x2 - (x2 - x1) * ((g2 + d2 - d1) / (g2 - g1 + 2 * d2))
  26. else:
  27. min_pos = x1 - (x1 - x2) * ((g1 + d2 - d1) / (g1 - g2 + 2 * d2))
  28. return min(max(min_pos, xmin_bound), xmax_bound)
  29. else:
  30. return (xmin_bound + xmax_bound) / 2.0
  31. def _strong_wolfe(
  32. obj_func, x, t, d, f, g, gtd, c1=1e-4, c2=0.9, tolerance_change=1e-9, max_ls=25
  33. ):
  34. # ported from https://github.com/torch/optim/blob/master/lswolfe.lua
  35. d_norm = d.abs().max()
  36. g = g.clone(memory_format=torch.contiguous_format)
  37. # evaluate objective and gradient using initial step
  38. f_new, g_new = obj_func(x, t, d)
  39. ls_func_evals = 1
  40. gtd_new = g_new.dot(d)
  41. # bracket an interval containing a point satisfying the Wolfe criteria
  42. t_prev, f_prev, g_prev, gtd_prev = 0, f, g, gtd
  43. done = False
  44. ls_iter = 0
  45. while ls_iter < max_ls:
  46. # check conditions
  47. if f_new > (f + c1 * t * gtd) or (ls_iter > 1 and f_new >= f_prev):
  48. bracket = [t_prev, t]
  49. bracket_f = [f_prev, f_new]
  50. bracket_g = [g_prev, g_new.clone(memory_format=torch.contiguous_format)]
  51. bracket_gtd = [gtd_prev, gtd_new]
  52. break
  53. if abs(gtd_new) <= -c2 * gtd:
  54. bracket = [t]
  55. bracket_f = [f_new]
  56. bracket_g = [g_new]
  57. done = True
  58. break
  59. if gtd_new >= 0:
  60. bracket = [t_prev, t]
  61. bracket_f = [f_prev, f_new]
  62. bracket_g = [g_prev, g_new.clone(memory_format=torch.contiguous_format)]
  63. bracket_gtd = [gtd_prev, gtd_new]
  64. break
  65. # interpolate
  66. min_step = t + 0.01 * (t - t_prev)
  67. max_step = t * 10
  68. tmp = t
  69. t = _cubic_interpolate(
  70. t_prev, f_prev, gtd_prev, t, f_new, gtd_new, bounds=(min_step, max_step)
  71. )
  72. # next step
  73. t_prev = tmp
  74. f_prev = f_new
  75. g_prev = g_new.clone(memory_format=torch.contiguous_format)
  76. gtd_prev = gtd_new
  77. f_new, g_new = obj_func(x, t, d)
  78. ls_func_evals += 1
  79. gtd_new = g_new.dot(d)
  80. ls_iter += 1
  81. # reached max number of iterations?
  82. if ls_iter == max_ls:
  83. bracket = [0, t]
  84. bracket_f = [f, f_new]
  85. bracket_g = [g, g_new]
  86. # zoom phase: we now have a point satisfying the criteria, or
  87. # a bracket around it. We refine the bracket until we find the
  88. # exact point satisfying the criteria
  89. insuf_progress = False
  90. # find high and low points in bracket
  91. low_pos, high_pos = (0, 1) if bracket_f[0] <= bracket_f[-1] else (1, 0) # type: ignore[possibly-undefined]
  92. while not done and ls_iter < max_ls:
  93. # line-search bracket is so small
  94. if abs(bracket[1] - bracket[0]) * d_norm < tolerance_change: # type: ignore[possibly-undefined]
  95. break
  96. # compute new trial value
  97. t = _cubic_interpolate(
  98. bracket[0],
  99. bracket_f[0],
  100. bracket_gtd[0], # type: ignore[possibly-undefined]
  101. bracket[1],
  102. bracket_f[1],
  103. bracket_gtd[1],
  104. )
  105. # test that we are making sufficient progress:
  106. # in case `t` is so close to boundary, we mark that we are making
  107. # insufficient progress, and if
  108. # + we have made insufficient progress in the last step, or
  109. # + `t` is at one of the boundary,
  110. # we will move `t` to a position which is `0.1 * len(bracket)`
  111. # away from the nearest boundary point.
  112. eps = 0.1 * (max(bracket) - min(bracket))
  113. if min(max(bracket) - t, t - min(bracket)) < eps:
  114. # interpolation close to boundary
  115. if insuf_progress or t >= max(bracket) or t <= min(bracket):
  116. # evaluate at 0.1 away from boundary
  117. if abs(t - max(bracket)) < abs(t - min(bracket)):
  118. t = max(bracket) - eps
  119. else:
  120. t = min(bracket) + eps
  121. insuf_progress = False
  122. else:
  123. insuf_progress = True
  124. else:
  125. insuf_progress = False
  126. # Evaluate new point
  127. f_new, g_new = obj_func(x, t, d)
  128. ls_func_evals += 1
  129. gtd_new = g_new.dot(d)
  130. ls_iter += 1
  131. if f_new > (f + c1 * t * gtd) or f_new >= bracket_f[low_pos]:
  132. # Armijo condition not satisfied or not lower than lowest point
  133. bracket[high_pos] = t
  134. bracket_f[high_pos] = f_new
  135. bracket_g[high_pos] = g_new.clone(memory_format=torch.contiguous_format) # type: ignore[possibly-undefined]
  136. bracket_gtd[high_pos] = gtd_new
  137. low_pos, high_pos = (0, 1) if bracket_f[0] <= bracket_f[1] else (1, 0)
  138. else:
  139. if abs(gtd_new) <= -c2 * gtd:
  140. # Wolfe conditions satisfied
  141. done = True
  142. elif gtd_new * (bracket[high_pos] - bracket[low_pos]) >= 0:
  143. # old high becomes new low
  144. bracket[high_pos] = bracket[low_pos]
  145. bracket_f[high_pos] = bracket_f[low_pos]
  146. bracket_g[high_pos] = bracket_g[low_pos] # type: ignore[possibly-undefined]
  147. bracket_gtd[high_pos] = bracket_gtd[low_pos]
  148. # new point becomes new low
  149. bracket[low_pos] = t
  150. bracket_f[low_pos] = f_new
  151. bracket_g[low_pos] = g_new.clone(memory_format=torch.contiguous_format) # type: ignore[possibly-undefined]
  152. bracket_gtd[low_pos] = gtd_new
  153. # return stuff
  154. t = bracket[low_pos] # type: ignore[possibly-undefined]
  155. f_new = bracket_f[low_pos]
  156. g_new = bracket_g[low_pos] # type: ignore[possibly-undefined]
  157. return f_new, g_new, t, ls_func_evals
  158. class LBFGS(Optimizer):
  159. """Implements L-BFGS algorithm.
  160. Heavily inspired by `minFunc
  161. <https://www.cs.ubc.ca/~schmidtm/Software/minFunc.html>`_.
  162. .. warning::
  163. This optimizer doesn't support per-parameter options and parameter
  164. groups (there can be only one).
  165. .. warning::
  166. Right now all parameters have to be on a single device. This will be
  167. improved in the future.
  168. .. note::
  169. This is a very memory intensive optimizer (it requires additional
  170. ``param_bytes * (history_size + 1)`` bytes). If it doesn't fit in memory
  171. try reducing the history size, or use a different algorithm.
  172. Args:
  173. params (iterable): iterable of parameters to optimize. Parameters must be real.
  174. lr (float): learning rate (default: 1)
  175. max_iter (int): maximal number of iterations per optimization step
  176. (default: 20)
  177. max_eval (int): maximal number of function evaluations per optimization
  178. step (default: max_iter * 1.25).
  179. tolerance_grad (float): termination tolerance on first order optimality
  180. (default: 1e-7).
  181. tolerance_change (float): termination tolerance on function
  182. value/parameter changes (default: 1e-9).
  183. history_size (int): update history size (default: 100).
  184. line_search_fn (str): either 'strong_wolfe' or None (default: None).
  185. """
  186. def __init__(
  187. self,
  188. params: ParamsT,
  189. lr: float = 1,
  190. max_iter: int = 20,
  191. max_eval: Optional[int] = None,
  192. tolerance_grad: float = 1e-7,
  193. tolerance_change: float = 1e-9,
  194. history_size: int = 100,
  195. line_search_fn: Optional[str] = None,
  196. ):
  197. if max_eval is None:
  198. max_eval = max_iter * 5 // 4
  199. defaults = dict(
  200. lr=lr,
  201. max_iter=max_iter,
  202. max_eval=max_eval,
  203. tolerance_grad=tolerance_grad,
  204. tolerance_change=tolerance_change,
  205. history_size=history_size,
  206. line_search_fn=line_search_fn,
  207. )
  208. super().__init__(params, defaults)
  209. if len(self.param_groups) != 1:
  210. raise ValueError(
  211. "LBFGS doesn't support per-parameter options " "(parameter groups)"
  212. )
  213. self._params = self.param_groups[0]["params"]
  214. self._numel_cache = None
  215. def _numel(self):
  216. if self._numel_cache is None:
  217. self._numel_cache = sum(
  218. 2 * p.numel() if torch.is_complex(p) else p.numel()
  219. for p in self._params
  220. )
  221. return self._numel_cache
  222. def _gather_flat_grad(self):
  223. views = []
  224. for p in self._params:
  225. if p.grad is None:
  226. view = p.new(p.numel()).zero_()
  227. elif p.grad.is_sparse:
  228. view = p.grad.to_dense().view(-1)
  229. else:
  230. view = p.grad.view(-1)
  231. if torch.is_complex(view):
  232. view = torch.view_as_real(view).view(-1)
  233. views.append(view)
  234. return torch.cat(views, 0)
  235. def _add_grad(self, step_size, update):
  236. offset = 0
  237. for p in self._params:
  238. if torch.is_complex(p):
  239. p = torch.view_as_real(p)
  240. numel = p.numel()
  241. # view as to avoid deprecated pointwise semantics
  242. p.add_(update[offset : offset + numel].view_as(p), alpha=step_size)
  243. offset += numel
  244. assert offset == self._numel()
  245. def _clone_param(self):
  246. return [p.clone(memory_format=torch.contiguous_format) for p in self._params]
  247. def _set_param(self, params_data):
  248. for p, pdata in zip(self._params, params_data):
  249. p.copy_(pdata)
  250. def _directional_evaluate(self, closure, x, t, d):
  251. self._add_grad(t, d)
  252. loss = float(closure())
  253. flat_grad = self._gather_flat_grad()
  254. self._set_param(x)
  255. return loss, flat_grad
  256. @torch.no_grad()
  257. def step(self, closure):
  258. """Perform a single optimization step.
  259. Args:
  260. closure (Callable): A closure that reevaluates the model
  261. and returns the loss.
  262. """
  263. assert len(self.param_groups) == 1
  264. # Make sure the closure is always called with grad enabled
  265. closure = torch.enable_grad()(closure)
  266. group = self.param_groups[0]
  267. lr = group["lr"]
  268. max_iter = group["max_iter"]
  269. max_eval = group["max_eval"]
  270. tolerance_grad = group["tolerance_grad"]
  271. tolerance_change = group["tolerance_change"]
  272. line_search_fn = group["line_search_fn"]
  273. history_size = group["history_size"]
  274. # NOTE: LBFGS has only global state, but we register it as state for
  275. # the first param, because this helps with casting in load_state_dict
  276. state = self.state[self._params[0]]
  277. state.setdefault("func_evals", 0)
  278. state.setdefault("n_iter", 0)
  279. # evaluate initial f(x) and df/dx
  280. orig_loss = closure()
  281. loss = float(orig_loss)
  282. current_evals = 1
  283. state["func_evals"] += 1
  284. flat_grad = self._gather_flat_grad()
  285. opt_cond = flat_grad.abs().max() <= tolerance_grad
  286. # optimal condition
  287. if opt_cond:
  288. return orig_loss
  289. # tensors cached in state (for tracing)
  290. d = state.get("d")
  291. t = state.get("t")
  292. old_dirs = state.get("old_dirs")
  293. old_stps = state.get("old_stps")
  294. ro = state.get("ro")
  295. H_diag = state.get("H_diag")
  296. prev_flat_grad = state.get("prev_flat_grad")
  297. prev_loss = state.get("prev_loss")
  298. n_iter = 0
  299. # optimize for a max of max_iter iterations
  300. while n_iter < max_iter:
  301. # keep track of nb of iterations
  302. n_iter += 1
  303. state["n_iter"] += 1
  304. ############################################################
  305. # compute gradient descent direction
  306. ############################################################
  307. if state["n_iter"] == 1:
  308. d = flat_grad.neg()
  309. old_dirs = []
  310. old_stps = []
  311. ro = []
  312. H_diag = 1
  313. else:
  314. # do lbfgs update (update memory)
  315. y = flat_grad.sub(prev_flat_grad)
  316. s = d.mul(t)
  317. ys = y.dot(s) # y*s
  318. if ys > 1e-10:
  319. # updating memory
  320. if len(old_dirs) == history_size:
  321. # shift history by one (limited-memory)
  322. old_dirs.pop(0)
  323. old_stps.pop(0)
  324. ro.pop(0)
  325. # store new direction/step
  326. old_dirs.append(y)
  327. old_stps.append(s)
  328. ro.append(1.0 / ys)
  329. # update scale of initial Hessian approximation
  330. H_diag = ys / y.dot(y) # (y*y)
  331. # compute the approximate (L-BFGS) inverse Hessian
  332. # multiplied by the gradient
  333. num_old = len(old_dirs)
  334. if "al" not in state:
  335. state["al"] = [None] * history_size
  336. al = state["al"]
  337. # iteration in L-BFGS loop collapsed to use just one buffer
  338. q = flat_grad.neg()
  339. for i in range(num_old - 1, -1, -1):
  340. al[i] = old_stps[i].dot(q) * ro[i]
  341. q.add_(old_dirs[i], alpha=-al[i])
  342. # multiply by initial Hessian
  343. # r/d is the final direction
  344. d = r = torch.mul(q, H_diag)
  345. for i in range(num_old):
  346. be_i = old_dirs[i].dot(r) * ro[i]
  347. r.add_(old_stps[i], alpha=al[i] - be_i)
  348. if prev_flat_grad is None:
  349. prev_flat_grad = flat_grad.clone(memory_format=torch.contiguous_format)
  350. else:
  351. prev_flat_grad.copy_(flat_grad)
  352. prev_loss = loss
  353. ############################################################
  354. # compute step length
  355. ############################################################
  356. # reset initial guess for step size
  357. if state["n_iter"] == 1:
  358. t = min(1.0, 1.0 / flat_grad.abs().sum()) * lr
  359. else:
  360. t = lr
  361. # directional derivative
  362. gtd = flat_grad.dot(d) # g * d
  363. # directional derivative is below tolerance
  364. if gtd > -tolerance_change:
  365. break
  366. # optional line search: user function
  367. ls_func_evals = 0
  368. if line_search_fn is not None:
  369. # perform line search, using user function
  370. if line_search_fn != "strong_wolfe":
  371. raise RuntimeError("only 'strong_wolfe' is supported")
  372. else:
  373. x_init = self._clone_param()
  374. def obj_func(x, t, d):
  375. return self._directional_evaluate(closure, x, t, d)
  376. loss, flat_grad, t, ls_func_evals = _strong_wolfe(
  377. obj_func, x_init, t, d, loss, flat_grad, gtd
  378. )
  379. self._add_grad(t, d)
  380. opt_cond = flat_grad.abs().max() <= tolerance_grad
  381. else:
  382. # no line search, simply move with fixed-step
  383. self._add_grad(t, d)
  384. if n_iter != max_iter:
  385. # re-evaluate function only if not in last iteration
  386. # the reason we do this: in a stochastic setting,
  387. # no use to re-evaluate that function here
  388. with torch.enable_grad():
  389. loss = float(closure())
  390. flat_grad = self._gather_flat_grad()
  391. opt_cond = flat_grad.abs().max() <= tolerance_grad
  392. ls_func_evals = 1
  393. # update func eval
  394. current_evals += ls_func_evals
  395. state["func_evals"] += ls_func_evals
  396. ############################################################
  397. # check conditions
  398. ############################################################
  399. if n_iter == max_iter:
  400. break
  401. if current_evals >= max_eval:
  402. break
  403. # optimal condition
  404. if opt_cond:
  405. break
  406. # lack of progress
  407. if d.mul(t).abs().max() <= tolerance_change:
  408. break
  409. if abs(loss - prev_loss) < tolerance_change:
  410. break
  411. state["d"] = d
  412. state["t"] = t
  413. state["old_dirs"] = old_dirs
  414. state["old_stps"] = old_stps
  415. state["ro"] = ro
  416. state["H_diag"] = H_diag
  417. state["prev_flat_grad"] = prev_flat_grad
  418. state["prev_loss"] = prev_loss
  419. return orig_loss