pytorch_utils.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331
  1. # Copyright 2022 The HuggingFace Team. All rights reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import annotations
  15. import inspect
  16. from typing import Callable, List, Optional, Set, Tuple, Union
  17. import torch
  18. from packaging import version
  19. from safetensors.torch import storage_ptr, storage_size
  20. from torch import nn
  21. from .utils import is_torch_xla_available, logging
  22. ALL_LAYERNORM_LAYERS = [nn.LayerNorm]
  23. logger = logging.get_logger(__name__)
  24. parsed_torch_version_base = version.parse(version.parse(torch.__version__).base_version)
  25. is_torch_greater_or_equal_than_2_4 = parsed_torch_version_base >= version.parse("2.4")
  26. is_torch_greater_or_equal_than_2_3 = parsed_torch_version_base >= version.parse("2.3")
  27. is_torch_greater_or_equal_than_2_2 = parsed_torch_version_base >= version.parse("2.2")
  28. is_torch_greater_or_equal_than_2_1 = parsed_torch_version_base >= version.parse("2.1")
  29. is_torch_greater_or_equal_than_2_0 = parsed_torch_version_base >= version.parse("2.0")
  30. is_torch_greater_or_equal_than_1_13 = parsed_torch_version_base >= version.parse("1.13")
  31. is_torch_greater_or_equal_than_1_12 = parsed_torch_version_base >= version.parse("1.12")
  32. def softmax_backward_data(parent, grad_output, output, dim, self):
  33. """
  34. A function that calls the internal `_softmax_backward_data` PyTorch method and that adjusts the arguments according
  35. to the torch version detected.
  36. """
  37. from torch import _softmax_backward_data
  38. return _softmax_backward_data(grad_output, output, parent.dim, self.dtype)
  39. def prune_linear_layer(layer: nn.Linear, index: torch.LongTensor, dim: int = 0) -> nn.Linear:
  40. """
  41. Prune a linear layer to keep only entries in index.
  42. Used to remove heads.
  43. Args:
  44. layer (`torch.nn.Linear`): The layer to prune.
  45. index (`torch.LongTensor`): The indices to keep in the layer.
  46. dim (`int`, *optional*, defaults to 0): The dimension on which to keep the indices.
  47. Returns:
  48. `torch.nn.Linear`: The pruned layer as a new layer with `requires_grad=True`.
  49. """
  50. index = index.to(layer.weight.device)
  51. W = layer.weight.index_select(dim, index).clone().detach()
  52. if layer.bias is not None:
  53. if dim == 1:
  54. b = layer.bias.clone().detach()
  55. else:
  56. b = layer.bias[index].clone().detach()
  57. new_size = list(layer.weight.size())
  58. new_size[dim] = len(index)
  59. new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device)
  60. new_layer.weight.requires_grad = False
  61. new_layer.weight.copy_(W.contiguous())
  62. new_layer.weight.requires_grad = True
  63. if layer.bias is not None:
  64. new_layer.bias.requires_grad = False
  65. new_layer.bias.copy_(b.contiguous())
  66. new_layer.bias.requires_grad = True
  67. return new_layer
  68. class Conv1D(nn.Module):
  69. """
  70. 1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).
  71. Basically works like a linear layer but the weights are transposed.
  72. Args:
  73. nf (`int`): The number of output features.
  74. nx (`int`): The number of input features.
  75. """
  76. def __init__(self, nf, nx):
  77. super().__init__()
  78. self.nf = nf
  79. self.nx = nx
  80. self.weight = nn.Parameter(torch.empty(nx, nf))
  81. self.bias = nn.Parameter(torch.zeros(nf))
  82. nn.init.normal_(self.weight, std=0.02)
  83. def __repr__(self) -> str:
  84. return "Conv1D(nf={nf}, nx={nx})".format(**self.__dict__)
  85. def forward(self, x):
  86. size_out = x.size()[:-1] + (self.nf,)
  87. x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
  88. x = x.view(size_out)
  89. return x
  90. def prune_conv1d_layer(layer: Conv1D, index: torch.LongTensor, dim: int = 1) -> Conv1D:
  91. """
  92. Prune a Conv1D layer to keep only entries in index. A Conv1D work as a Linear layer (see e.g. BERT) but the weights
  93. are transposed.
  94. Used to remove heads.
  95. Args:
  96. layer ([`~pytorch_utils.Conv1D`]): The layer to prune.
  97. index (`torch.LongTensor`): The indices to keep in the layer.
  98. dim (`int`, *optional*, defaults to 1): The dimension on which to keep the indices.
  99. Returns:
  100. [`~pytorch_utils.Conv1D`]: The pruned layer as a new layer with `requires_grad=True`.
  101. """
  102. index = index.to(layer.weight.device)
  103. W = layer.weight.index_select(dim, index).clone().detach()
  104. if dim == 0:
  105. b = layer.bias.clone().detach()
  106. else:
  107. b = layer.bias[index].clone().detach()
  108. new_size = list(layer.weight.size())
  109. new_size[dim] = len(index)
  110. new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device)
  111. new_layer.weight.requires_grad = False
  112. new_layer.weight.copy_(W.contiguous())
  113. new_layer.weight.requires_grad = True
  114. new_layer.bias.requires_grad = False
  115. new_layer.bias.copy_(b.contiguous())
  116. new_layer.bias.requires_grad = True
  117. return new_layer
  118. def prune_layer(
  119. layer: Union[nn.Linear, Conv1D], index: torch.LongTensor, dim: Optional[int] = None
  120. ) -> Union[nn.Linear, Conv1D]:
  121. """
  122. Prune a Conv1D or linear layer to keep only entries in index.
  123. Used to remove heads.
  124. Args:
  125. layer (`Union[torch.nn.Linear, Conv1D]`): The layer to prune.
  126. index (`torch.LongTensor`): The indices to keep in the layer.
  127. dim (`int`, *optional*): The dimension on which to keep the indices.
  128. Returns:
  129. `torch.nn.Linear` or [`~pytorch_utils.Conv1D`]: The pruned layer as a new layer with `requires_grad=True`.
  130. """
  131. if isinstance(layer, nn.Linear):
  132. return prune_linear_layer(layer, index, dim=0 if dim is None else dim)
  133. elif isinstance(layer, Conv1D):
  134. return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim)
  135. else:
  136. raise ValueError(f"Can't prune layer of class {layer.__class__}")
  137. def apply_chunking_to_forward(
  138. forward_fn: Callable[..., torch.Tensor],
  139. chunk_size: int,
  140. chunk_dim: int,
  141. *input_tensors,
  142. ) -> torch.Tensor:
  143. """
  144. This function chunks the `input_tensors` into smaller input tensor parts of size `chunk_size` over the dimension
  145. `chunk_dim`. It then applies a layer `forward_fn` to each chunk independently to save memory.
  146. If the `forward_fn` is independent across the `chunk_dim` this function will yield the same result as directly
  147. applying `forward_fn` to `input_tensors`.
  148. Args:
  149. forward_fn (`Callable[..., torch.Tensor]`):
  150. The forward function of the model.
  151. chunk_size (`int`):
  152. The chunk size of a chunked tensor: `num_chunks = len(input_tensors[0]) / chunk_size`.
  153. chunk_dim (`int`):
  154. The dimension over which the `input_tensors` should be chunked.
  155. input_tensors (`Tuple[torch.Tensor]`):
  156. The input tensors of `forward_fn` which will be chunked
  157. Returns:
  158. `torch.Tensor`: A tensor with the same shape as the `forward_fn` would have given if applied`.
  159. Examples:
  160. ```python
  161. # rename the usual forward() fn to forward_chunk()
  162. def forward_chunk(self, hidden_states):
  163. hidden_states = self.decoder(hidden_states)
  164. return hidden_states
  165. # implement a chunked forward function
  166. def forward(self, hidden_states):
  167. return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states)
  168. ```"""
  169. assert len(input_tensors) > 0, f"{input_tensors} has to be a tuple/list of tensors"
  170. # inspect.signature exist since python 3.5 and is a python method -> no problem with backward compatibility
  171. num_args_in_forward_chunk_fn = len(inspect.signature(forward_fn).parameters)
  172. if num_args_in_forward_chunk_fn != len(input_tensors):
  173. raise ValueError(
  174. f"forward_chunk_fn expects {num_args_in_forward_chunk_fn} arguments, but only {len(input_tensors)} input "
  175. "tensors are given"
  176. )
  177. if chunk_size > 0:
  178. tensor_shape = input_tensors[0].shape[chunk_dim]
  179. for input_tensor in input_tensors:
  180. if input_tensor.shape[chunk_dim] != tensor_shape:
  181. raise ValueError(
  182. f"All input tenors have to be of the same shape: {tensor_shape}, "
  183. f"found shape {input_tensor.shape[chunk_dim]}"
  184. )
  185. if input_tensors[0].shape[chunk_dim] % chunk_size != 0:
  186. raise ValueError(
  187. f"The dimension to be chunked {input_tensors[0].shape[chunk_dim]} has to be a multiple of the chunk "
  188. f"size {chunk_size}"
  189. )
  190. num_chunks = input_tensors[0].shape[chunk_dim] // chunk_size
  191. # chunk input tensor into tuples
  192. input_tensors_chunks = tuple(input_tensor.chunk(num_chunks, dim=chunk_dim) for input_tensor in input_tensors)
  193. # apply forward fn to every tuple
  194. output_chunks = tuple(forward_fn(*input_tensors_chunk) for input_tensors_chunk in zip(*input_tensors_chunks))
  195. # concatenate output at same dimension
  196. return torch.cat(output_chunks, dim=chunk_dim)
  197. return forward_fn(*input_tensors)
  198. def find_pruneable_heads_and_indices(
  199. heads: List[int], n_heads: int, head_size: int, already_pruned_heads: Set[int]
  200. ) -> Tuple[Set[int], torch.LongTensor]:
  201. """
  202. Finds the heads and their indices taking `already_pruned_heads` into account.
  203. Args:
  204. heads (`List[int]`): List of the indices of heads to prune.
  205. n_heads (`int`): The number of heads in the model.
  206. head_size (`int`): The size of each head.
  207. already_pruned_heads (`Set[int]`): A set of already pruned heads.
  208. Returns:
  209. `Tuple[Set[int], torch.LongTensor]`: A tuple with the indices of heads to prune taking `already_pruned_heads`
  210. into account and the indices of rows/columns to keep in the layer weight.
  211. """
  212. mask = torch.ones(n_heads, head_size)
  213. heads = set(heads) - already_pruned_heads # Convert to set and remove already pruned heads
  214. for head in heads:
  215. # Compute how many pruned heads are before the head and move the index accordingly
  216. head = head - sum(1 if h < head else 0 for h in already_pruned_heads)
  217. mask[head] = 0
  218. mask = mask.view(-1).contiguous().eq(1)
  219. index: torch.LongTensor = torch.arange(len(mask))[mask].long()
  220. return heads, index
  221. def meshgrid(
  222. *tensors: Union[torch.Tensor, List[torch.Tensor]], indexing: Optional[str] = None
  223. ) -> Tuple[torch.Tensor, ...]:
  224. """
  225. Wrapper around torch.meshgrid to avoid warning messages about the introduced `indexing` argument.
  226. Reference: https://pytorch.org/docs/1.13/generated/torch.meshgrid.html
  227. """
  228. return torch.meshgrid(*tensors, indexing=indexing)
  229. def id_tensor_storage(tensor: torch.Tensor) -> Tuple[torch.device, int, int]:
  230. """
  231. Unique identifier to a tensor storage. Multiple different tensors can share the same underlying storage. For
  232. example, "meta" tensors all share the same storage, and thus their identifier will all be equal. This identifier is
  233. guaranteed to be unique and constant for this tensor's storage during its lifetime. Two tensor storages with
  234. non-overlapping lifetimes may have the same id.
  235. """
  236. if tensor.device.type == "xla" and is_torch_xla_available():
  237. # NOTE: xla tensors dont have storage
  238. # use some other unique id to distinguish.
  239. # this is a XLA tensor, it must be created using torch_xla's
  240. # device. So the following import is safe:
  241. import torch_xla
  242. unique_id = torch_xla._XLAC._xla_get_tensor_id(tensor)
  243. else:
  244. unique_id = storage_ptr(tensor)
  245. return tensor.device, unique_id, storage_size(tensor)
  246. def isin_mps_friendly(elements: torch.Tensor, test_elements: torch.Tensor | int) -> torch.Tensor:
  247. """
  248. Same as `torch.isin` without flags, but MPS-friendly. We can remove this function when we stop supporting
  249. torch <= 2.3. See https://github.com/pytorch/pytorch/issues/77764#issuecomment-2067838075
  250. Args:
  251. elements (`torch.Tensor`): Input elements
  252. test_elements (`torch.Tensor` or `int`): The elements to check against.
  253. Returns:
  254. `torch.Tensor`: A boolean tensor of the same shape as `elements` that is True for `elements` in `test_elements`
  255. and False otherwise
  256. """
  257. if elements.device.type == "mps" and not is_torch_greater_or_equal_than_2_4:
  258. test_elements = torch.tensor(test_elements)
  259. if test_elements.ndim == 0:
  260. test_elements = test_elements.unsqueeze(0)
  261. return elements.tile(test_elements.shape[0], 1).eq(test_elements.unsqueeze(1)).sum(dim=0).bool().squeeze()
  262. else:
  263. # Note: don't use named arguments in `torch.isin`, see https://github.com/pytorch/pytorch/issues/126045
  264. return torch.isin(elements, test_elements)