sparse_adam.py 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181
  1. # mypy: allow-untyped-defs
  2. from typing import List, Tuple
  3. import torch
  4. from torch import Tensor
  5. from . import _functional as F
  6. from .optimizer import _maximize_doc, Optimizer, ParamsT
  7. __all__ = ["SparseAdam"]
  8. class SparseAdam(Optimizer):
  9. def __init__(
  10. self,
  11. params: ParamsT,
  12. lr: float = 1e-3,
  13. betas: Tuple[float, float] = (0.9, 0.999),
  14. eps: float = 1e-8,
  15. maximize: bool = False,
  16. ):
  17. if not 0.0 < lr:
  18. raise ValueError(f"Invalid learning rate: {lr}")
  19. if not 0.0 < eps:
  20. raise ValueError(f"Invalid epsilon value: {eps}")
  21. if not 0.0 <= betas[0] < 1.0:
  22. raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}")
  23. if not 0.0 <= betas[1] < 1.0:
  24. raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}")
  25. defaults = dict(lr=lr, betas=betas, eps=eps, maximize=maximize)
  26. super().__init__(params, defaults)
  27. sparse_params = []
  28. complex_params = []
  29. for index, param_group in enumerate(self.param_groups):
  30. assert isinstance(
  31. param_group, dict
  32. ), f"param_groups must be a list of dicts, but got {type(param_group)}"
  33. # given param group, convert given params to a list first before iterating
  34. for d_index, d_param in enumerate(param_group["params"]):
  35. if d_param.is_sparse:
  36. sparse_params.append([index, d_index])
  37. if d_param.is_complex():
  38. complex_params.append([index, d_index])
  39. if sparse_params:
  40. raise ValueError(
  41. f"Sparse params at indices {sparse_params}: SparseAdam requires dense parameter tensors"
  42. )
  43. if complex_params:
  44. raise ValueError(
  45. f"Complex params at indices {complex_params}: SparseAdam does not support complex parameters"
  46. )
  47. @torch.no_grad()
  48. def step(self, closure=None):
  49. """Perform a single optimization step.
  50. Args:
  51. closure (Callable, optional): A closure that reevaluates the model
  52. and returns the loss.
  53. """
  54. loss = None
  55. if closure is not None:
  56. with torch.enable_grad():
  57. loss = closure()
  58. for group in self.param_groups:
  59. params_with_grad: List[Tensor] = []
  60. grads: List[Tensor] = []
  61. exp_avgs: List[Tensor] = []
  62. exp_avg_sqs: List[Tensor] = []
  63. state_steps: List[int] = []
  64. beta1, beta2 = group["betas"]
  65. maximize = group.get("maximize", False)
  66. for p in group["params"]:
  67. if p.grad is not None:
  68. params_with_grad.append(p)
  69. if not p.grad.is_sparse:
  70. raise RuntimeError(
  71. "SparseAdam does not support dense gradients, please consider Adam instead"
  72. )
  73. grads.append(p.grad)
  74. state = self.state[p]
  75. # State initialization
  76. if len(state) == 0:
  77. state["step"] = 0
  78. # Exponential moving average of gradient values
  79. state["exp_avg"] = torch.zeros_like(
  80. p, memory_format=torch.preserve_format
  81. )
  82. # Exponential moving average of squared gradient values
  83. state["exp_avg_sq"] = torch.zeros_like(
  84. p, memory_format=torch.preserve_format
  85. )
  86. exp_avgs.append(state["exp_avg"])
  87. exp_avg_sqs.append(state["exp_avg_sq"])
  88. # update the steps for each param group update
  89. state["step"] += 1
  90. # record the step after step update
  91. state_steps.append(state["step"])
  92. F.sparse_adam(
  93. params_with_grad,
  94. grads,
  95. exp_avgs,
  96. exp_avg_sqs,
  97. state_steps,
  98. eps=group["eps"],
  99. beta1=beta1,
  100. beta2=beta2,
  101. lr=group["lr"],
  102. maximize=maximize,
  103. )
  104. return loss
  105. SparseAdam.__doc__ = rf"""SparseAdam implements a masked version of the Adam algorithm
  106. suitable for sparse gradients. Currently, due to implementation constraints (explained
  107. below), SparseAdam is only intended for a narrow subset of use cases, specifically
  108. parameters of a dense layout with gradients of a sparse layout. This occurs in a
  109. special case where the module backwards produces grads already in a sparse layout.
  110. One example NN module that behaves as such is ``nn.Embedding(sparse=True)``.
  111. SparseAdam approximates the Adam algorithm by masking out the parameter and moment
  112. updates corresponding to the zero values in the gradients. Whereas the Adam algorithm
  113. will update the first moment, the second moment, and the parameters based on all values
  114. of the gradients, SparseAdam only updates the moments and parameters corresponding
  115. to the non-zero values of the gradients.
  116. A simplified way of thinking about the `intended` implementation is as such:
  117. 1. Create a mask of the non-zero values in the sparse gradients. For example,
  118. if your gradient looks like [0, 5, 0, 0, 9], the mask would be [0, 1, 0, 0, 1].
  119. 2. Apply this mask over the running moments and do computation on only the
  120. non-zero values.
  121. 3. Apply this mask over the parameters and only apply an update on non-zero values.
  122. In actuality, we use sparse layout Tensors to optimize this approximation, which means the
  123. more gradients that are masked by not being materialized, the more performant the optimization.
  124. Since we rely on using sparse layout tensors, we infer that any materialized value in the
  125. sparse layout is non-zero and we do NOT actually verify that all values are not zero!
  126. It is important to not conflate a semantically sparse tensor (a tensor where many
  127. of its values are zeros) with a sparse layout tensor (a tensor where ``.is_sparse``
  128. returns ``True``). The SparseAdam approximation is intended for `semantically` sparse
  129. tensors and the sparse layout is only a implementation detail. A clearer implementation
  130. would be to use MaskedTensors, but those are experimental.
  131. .. note::
  132. If you suspect your gradients are semantically sparse (but do not have sparse
  133. layout), this variant may not be the best for you. Ideally, you want to avoid
  134. materializing anything that is suspected to be sparse in the first place, since
  135. needing to convert all your grads from dense layout to sparse layout may outweigh
  136. the performance gain. Here, using Adam may be the best alternative, unless you
  137. can easily rig up your module to output sparse grads similar to
  138. ``nn.Embedding(sparse=True)``. If you insist on converting your grads, you can do
  139. so by manually overriding your parameters' ``.grad`` fields with their sparse
  140. equivalents before calling ``.step()``.
  141. Args:
  142. params (iterable): iterable of parameters to optimize or dicts defining
  143. parameter groups
  144. lr (float, optional): learning rate (default: 1e-3)
  145. betas (Tuple[float, float], optional): coefficients used for computing
  146. running averages of gradient and its square (default: (0.9, 0.999))
  147. eps (float, optional): term added to the denominator to improve
  148. numerical stability (default: 1e-8)
  149. {_maximize_doc}
  150. .. _Adam\: A Method for Stochastic Optimization:
  151. https://arxiv.org/abs/1412.6980
  152. """