trainer_pt_utils.py 59 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397
  1. # coding=utf-8
  2. # Copyright 2020-present the HuggingFace Inc. team.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """
  16. Torch utilities for the Trainer class.
  17. """
  18. import copy
  19. import datetime
  20. import io
  21. import json
  22. import math
  23. import os
  24. import sys
  25. import warnings
  26. from collections.abc import Mapping
  27. from contextlib import contextmanager
  28. from dataclasses import dataclass, field
  29. from itertools import chain
  30. from logging import StreamHandler
  31. from typing import Any, Dict, Iterator, List, Optional, Union
  32. import numpy as np
  33. import torch
  34. import torch.distributed as dist
  35. from torch import nn
  36. from torch.utils.data import Dataset, IterableDataset, RandomSampler, Sampler
  37. from torch.utils.data.distributed import DistributedSampler
  38. from .integrations.deepspeed import is_deepspeed_zero3_enabled
  39. from .tokenization_utils_base import BatchEncoding
  40. from .utils import (
  41. is_sagemaker_mp_enabled,
  42. is_torch_available,
  43. is_torch_xla_available,
  44. is_training_run_on_sagemaker,
  45. logging,
  46. )
  47. if is_training_run_on_sagemaker():
  48. logging.add_handler(StreamHandler(sys.stdout))
  49. if is_torch_xla_available():
  50. import torch_xla.core.xla_model as xm
  51. if is_torch_available():
  52. from .pytorch_utils import is_torch_greater_or_equal_than_2_0
  53. if is_torch_greater_or_equal_than_2_0:
  54. from torch.optim.lr_scheduler import LRScheduler
  55. else:
  56. from torch.optim.lr_scheduler import _LRScheduler as LRScheduler
  57. logger = logging.get_logger(__name__)
  58. def get_dataloader_sampler(dataloader):
  59. if hasattr(dataloader, "batch_sampler") and dataloader.batch_sampler is not None:
  60. return get_dataloader_sampler(dataloader.batch_sampler)
  61. elif hasattr(dataloader, "sampler"):
  62. return dataloader.sampler
  63. def atleast_1d(tensor_or_array: Union[torch.Tensor, np.ndarray]):
  64. if isinstance(tensor_or_array, torch.Tensor):
  65. if hasattr(torch, "atleast_1d"):
  66. tensor_or_array = torch.atleast_1d(tensor_or_array)
  67. elif tensor_or_array.ndim < 1:
  68. tensor_or_array = tensor_or_array[None]
  69. else:
  70. tensor_or_array = np.atleast_1d(tensor_or_array)
  71. return tensor_or_array
  72. def torch_pad_and_concatenate(tensor1, tensor2, padding_index=-100):
  73. """Concatenates `tensor1` and `tensor2` on first axis, applying padding on the second if necessary."""
  74. tensor1 = atleast_1d(tensor1)
  75. tensor2 = atleast_1d(tensor2)
  76. if len(tensor1.shape) == 1 or tensor1.shape[1] == tensor2.shape[1]:
  77. return torch.cat((tensor1, tensor2), dim=0)
  78. # Let's figure out the new shape
  79. new_shape = (tensor1.shape[0] + tensor2.shape[0], max(tensor1.shape[1], tensor2.shape[1])) + tensor1.shape[2:]
  80. # Now let's fill the result tensor
  81. result = tensor1.new_full(new_shape, padding_index)
  82. result[: tensor1.shape[0], : tensor1.shape[1]] = tensor1
  83. result[tensor1.shape[0] :, : tensor2.shape[1]] = tensor2
  84. return result
  85. def numpy_pad_and_concatenate(array1, array2, padding_index=-100):
  86. """Concatenates `array1` and `array2` on first axis, applying padding on the second if necessary."""
  87. array1 = atleast_1d(array1)
  88. array2 = atleast_1d(array2)
  89. if len(array1.shape) == 1 or array1.shape[1] == array2.shape[1]:
  90. return np.concatenate((array1, array2), axis=0)
  91. # Let's figure out the new shape
  92. new_shape = (array1.shape[0] + array2.shape[0], max(array1.shape[1], array2.shape[1])) + array1.shape[2:]
  93. # Now let's fill the result tensor
  94. result = np.full_like(array1, padding_index, shape=new_shape)
  95. result[: array1.shape[0], : array1.shape[1]] = array1
  96. result[array1.shape[0] :, : array2.shape[1]] = array2
  97. return result
  98. def nested_concat(tensors, new_tensors, padding_index=-100):
  99. """
  100. Concat the `new_tensors` to `tensors` on the first dim and pad them on the second if needed. Works for tensors or
  101. nested list/tuples/dict of tensors.
  102. """
  103. if not (isinstance(tensors, torch.Tensor) and isinstance(new_tensors, torch.Tensor)):
  104. assert (
  105. type(tensors) is type(new_tensors)
  106. ), f"Expected `tensors` and `new_tensors` to have the same type but found {type(tensors)} and {type(new_tensors)}."
  107. if isinstance(tensors, (list, tuple)):
  108. return type(tensors)(nested_concat(t, n, padding_index=padding_index) for t, n in zip(tensors, new_tensors))
  109. elif isinstance(tensors, torch.Tensor):
  110. return torch_pad_and_concatenate(tensors, new_tensors, padding_index=padding_index)
  111. elif isinstance(tensors, Mapping):
  112. return type(tensors)(
  113. {k: nested_concat(t, new_tensors[k], padding_index=padding_index) for k, t in tensors.items()}
  114. )
  115. elif isinstance(tensors, np.ndarray):
  116. return numpy_pad_and_concatenate(tensors, new_tensors, padding_index=padding_index)
  117. else:
  118. raise TypeError(f"Unsupported type for concatenation: got {type(tensors)}")
  119. def find_batch_size(tensors):
  120. """
  121. Find the first dimension of a tensor in a nested list/tuple/dict of tensors.
  122. """
  123. if isinstance(tensors, (list, tuple)):
  124. for t in tensors:
  125. result = find_batch_size(t)
  126. if result is not None:
  127. return result
  128. elif isinstance(tensors, Mapping):
  129. for key, value in tensors.items():
  130. result = find_batch_size(value)
  131. if result is not None:
  132. return result
  133. elif isinstance(tensors, torch.Tensor):
  134. return tensors.shape[0] if len(tensors.shape) >= 1 else None
  135. elif isinstance(tensors, np.ndarray):
  136. return tensors.shape[0] if len(tensors.shape) >= 1 else None
  137. def nested_numpify(tensors):
  138. "Numpify `tensors` (even if it's a nested list/tuple/dict of tensors)."
  139. if isinstance(tensors, (list, tuple)):
  140. return type(tensors)(nested_numpify(t) for t in tensors)
  141. if isinstance(tensors, Mapping):
  142. return type(tensors)({k: nested_numpify(t) for k, t in tensors.items()})
  143. t = tensors.cpu()
  144. if t.dtype == torch.bfloat16:
  145. # As of Numpy 1.21.4, NumPy does not support bfloat16 (see
  146. # https://github.com/numpy/numpy/blob/a47ecdea856986cd60eabbd53265c2ca5916ad5d/doc/source/user/basics.types.rst ).
  147. # Until Numpy adds bfloat16, we must convert float32.
  148. t = t.to(torch.float32)
  149. return t.numpy()
  150. def nested_detach(tensors):
  151. "Detach `tensors` (even if it's a nested list/tuple/dict of tensors)."
  152. if isinstance(tensors, (list, tuple)):
  153. return type(tensors)(nested_detach(t) for t in tensors)
  154. elif isinstance(tensors, Mapping):
  155. return type(tensors)({k: nested_detach(t) for k, t in tensors.items()})
  156. return tensors.detach() if isinstance(tensors, torch.Tensor) else tensors
  157. def nested_xla_mesh_reduce(tensors, name):
  158. if is_torch_xla_available():
  159. import torch_xla.core.xla_model as xm
  160. if isinstance(tensors, (list, tuple)):
  161. return type(tensors)(nested_xla_mesh_reduce(t, f"{name}_{i}") for i, t in enumerate(tensors))
  162. if isinstance(tensors, Mapping):
  163. return type(tensors)(
  164. {k: nested_xla_mesh_reduce(t, f"{name}_{i}") for i, (k, t) in enumerate(tensors.items())}
  165. )
  166. tensors = atleast_1d(tensors)
  167. return xm.mesh_reduce(name, tensors, torch.cat)
  168. else:
  169. raise ImportError("Torch xla must be installed to use `nested_xla_mesh_reduce`")
  170. def distributed_concat(tensor: Any, num_total_examples: Optional[int] = None) -> Any:
  171. try:
  172. if isinstance(tensor, (tuple, list)):
  173. return type(tensor)(distributed_concat(t, num_total_examples) for t in tensor)
  174. if isinstance(tensor, Mapping):
  175. return type(tensor)({k: distributed_concat(t, num_total_examples) for k, t in tensor.items()})
  176. tensor = atleast_1d(tensor).contiguous()
  177. output_tensors = [tensor.clone() for _ in range(dist.get_world_size())]
  178. dist.all_gather(output_tensors, tensor)
  179. concat = torch.cat(output_tensors, dim=0)
  180. # truncate the dummy elements added by SequentialDistributedSampler
  181. if num_total_examples is not None:
  182. concat = concat[:num_total_examples]
  183. return concat
  184. except AssertionError:
  185. raise AssertionError("Not currently using distributed training")
  186. def distributed_broadcast_scalars(
  187. scalars: List[Union[int, float]],
  188. num_total_examples: Optional[int] = None,
  189. device: Optional[torch.device] = torch.device("cuda"),
  190. ) -> torch.Tensor:
  191. try:
  192. tensorized_scalar = torch.tensor(scalars).to(device)
  193. output_tensors = [tensorized_scalar.clone() for _ in range(dist.get_world_size())]
  194. dist.all_gather(output_tensors, tensorized_scalar)
  195. concat = torch.cat(output_tensors, dim=0)
  196. # truncate the dummy elements added by SequentialDistributedSampler
  197. if num_total_examples is not None:
  198. concat = concat[:num_total_examples]
  199. return concat
  200. except AssertionError:
  201. raise AssertionError("Not currently using distributed training")
  202. def reissue_pt_warnings(caught_warnings):
  203. # Reissue warnings
  204. if len(caught_warnings) > 1:
  205. for w in caught_warnings:
  206. if w.category is not UserWarning:
  207. warnings.warn(w.message, w.category)
  208. @contextmanager
  209. def torch_distributed_zero_first(local_rank: int):
  210. """
  211. Decorator to make all processes in distributed training wait for each local_master to do something.
  212. Args:
  213. local_rank (`int`): The rank of the local process.
  214. """
  215. if local_rank not in [-1, 0]:
  216. dist.barrier()
  217. yield
  218. if local_rank == 0:
  219. dist.barrier()
  220. class DistributedSamplerWithLoop(DistributedSampler):
  221. """
  222. Like a torch.utils.data.distributed.DistributedSampler` but loops at the end back to the beginning of the shuffled
  223. samples to make each process have a round multiple of batch_size samples.
  224. Args:
  225. dataset (`torch.utils.data.Dataset`):
  226. Dataset used for sampling.
  227. batch_size (`int`):
  228. The batch size used with this sampler
  229. kwargs (`Dict[str, Any]`, *optional*):
  230. All other keyword arguments passed to `DistributedSampler`.
  231. """
  232. def __init__(self, dataset, batch_size, **kwargs):
  233. super().__init__(dataset, **kwargs)
  234. self.batch_size = batch_size
  235. def __iter__(self):
  236. indices = list(super().__iter__())
  237. remainder = 0 if len(indices) % self.batch_size == 0 else self.batch_size - len(indices) % self.batch_size
  238. # DistributedSampler already added samples from the beginning to make the number of samples a round multiple
  239. # of the world size, so we skip those.
  240. start_remainder = 1 if self.rank < len(self.dataset) % self.num_replicas else 0
  241. indices += indices[start_remainder : start_remainder + remainder]
  242. return iter(indices)
  243. class EvalLoopContainer:
  244. """
  245. Container to store intermediate results of evaluation loop
  246. Args:
  247. do_nested_concat (`bool`, *optional*, defaults to `True`):
  248. If set to `True`, each iteration will recursively concatenate a new object containing tensors to
  249. the existing stored tensors, provided that the structure of the existing object and the new one
  250. are identical. If set to `False`, all newly added tensors will be stored in a list.
  251. padding_index (`int`, *optional*, defaults to -100):
  252. Value used to pad tensors of different shapes when `do_nested_concat=True`.
  253. """
  254. def __init__(self, do_nested_concat: bool = True, padding_index: int = -100):
  255. self.do_nested_concat = do_nested_concat
  256. self.padding_index = padding_index
  257. self.tensors = None
  258. self.arrays = None
  259. def add(self, tensors) -> None:
  260. """Add tensors to the stored objects. If `do_nested_concat=True`, the tensors will be concatenated recursively."""
  261. if self.tensors is None:
  262. self.tensors = tensors if self.do_nested_concat else [tensors]
  263. elif self.do_nested_concat:
  264. self.tensors = nested_concat(self.tensors, tensors, padding_index=self.padding_index)
  265. else:
  266. self.tensors.append(tensors)
  267. def to_cpu_and_numpy(self) -> None:
  268. """Move tensors in stored objects to CPU and convert them to numpy arrays."""
  269. # Check if we have something to add, if not just return
  270. if self.tensors is None:
  271. return
  272. new_arrays = nested_numpify(self.tensors)
  273. if self.arrays is None:
  274. self.arrays = new_arrays
  275. elif self.do_nested_concat:
  276. self.arrays = nested_concat(self.arrays, new_arrays, padding_index=self.padding_index)
  277. else:
  278. self.arrays.extend(new_arrays)
  279. # reset device tensors after adding to cpu
  280. self.tensors = None
  281. def get_arrays(self):
  282. """Returns the numpified and moved to CPU stored objects."""
  283. self.to_cpu_and_numpy()
  284. return self.arrays
  285. class SequentialDistributedSampler(Sampler):
  286. """
  287. Distributed Sampler that subsamples indices sequentially, making it easier to collate all results at the end.
  288. Even though we only use this sampler for eval and predict (no training), which means that the model params won't
  289. have to be synced (i.e. will not hang for synchronization even if varied number of forward passes), we still add
  290. extra samples to the sampler to make it evenly divisible (like in `DistributedSampler`) to make it easy to `gather`
  291. or `reduce` resulting tensors at the end of the loop.
  292. """
  293. def __init__(self, dataset, num_replicas=None, rank=None, batch_size=None):
  294. warnings.warn(
  295. "SequentialDistributedSampler is deprecated and will be removed in v5 of Transformers.",
  296. FutureWarning,
  297. )
  298. if num_replicas is None:
  299. if not dist.is_available():
  300. raise RuntimeError("Requires distributed package to be available")
  301. num_replicas = dist.get_world_size()
  302. if rank is None:
  303. if not dist.is_available():
  304. raise RuntimeError("Requires distributed package to be available")
  305. rank = dist.get_rank()
  306. self.dataset = dataset
  307. self.num_replicas = num_replicas
  308. self.rank = rank
  309. num_samples = len(self.dataset)
  310. # Add extra samples to make num_samples a multiple of batch_size if passed
  311. if batch_size is not None:
  312. self.num_samples = int(math.ceil(num_samples / (batch_size * num_replicas))) * batch_size
  313. else:
  314. self.num_samples = int(math.ceil(num_samples / num_replicas))
  315. self.total_size = self.num_samples * self.num_replicas
  316. self.batch_size = batch_size
  317. def __iter__(self):
  318. indices = list(range(len(self.dataset)))
  319. # add extra samples to make it evenly divisible
  320. indices += indices[: (self.total_size - len(indices))]
  321. assert (
  322. len(indices) == self.total_size
  323. ), f"Indices length {len(indices)} and total size {self.total_size} mismatched"
  324. # subsample
  325. indices = indices[self.rank * self.num_samples : (self.rank + 1) * self.num_samples]
  326. assert (
  327. len(indices) == self.num_samples
  328. ), f"Indices length {len(indices)} and sample number {self.num_samples} mismatched"
  329. return iter(indices)
  330. def __len__(self):
  331. return self.num_samples
  332. def get_tpu_sampler(dataset: torch.utils.data.Dataset, batch_size: int):
  333. if xm.xrt_world_size() <= 1:
  334. return RandomSampler(dataset)
  335. return DistributedSampler(dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())
  336. def nested_new_like(arrays, num_samples, padding_index=-100):
  337. """Create the same nested structure as `arrays` with a first dimension always at `num_samples`."""
  338. if isinstance(arrays, (list, tuple)):
  339. return type(arrays)(nested_new_like(x, num_samples) for x in arrays)
  340. return np.full_like(arrays, padding_index, shape=(num_samples, *arrays.shape[1:]))
  341. def expand_like(arrays, new_seq_length, padding_index=-100):
  342. """Expand the `arrays` so that the second dimension grows to `new_seq_length`. Uses `padding_index` for padding."""
  343. result = np.full_like(arrays, padding_index, shape=(arrays.shape[0], new_seq_length) + arrays.shape[2:])
  344. result[:, : arrays.shape[1]] = arrays
  345. return result
  346. def nested_truncate(tensors, limit):
  347. "Truncate `tensors` at `limit` (even if it's a nested list/tuple/dict of tensors)."
  348. if isinstance(tensors, (list, tuple)):
  349. return type(tensors)(nested_truncate(t, limit) for t in tensors)
  350. if isinstance(tensors, Mapping):
  351. return type(tensors)({k: nested_truncate(t, limit) for k, t in tensors.items()})
  352. return tensors[:limit]
  353. class DistributedTensorGatherer:
  354. """
  355. A class responsible for properly gathering tensors (or nested list/tuple of tensors) on the CPU by chunks.
  356. If our dataset has 16 samples with a batch size of 2 on 3 processes and we gather then transfer on CPU at every
  357. step, our sampler will generate the following indices:
  358. `[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1]`
  359. to get something of size a multiple of 3 (so that each process gets the same dataset length). Then process 0, 1 and
  360. 2 will be responsible of making predictions for the following samples:
  361. - P0: `[0, 1, 2, 3, 4, 5]`
  362. - P1: `[6, 7, 8, 9, 10, 11]`
  363. - P2: `[12, 13, 14, 15, 0, 1]`
  364. The first batch treated on each process will be
  365. - P0: `[0, 1]`
  366. - P1: `[6, 7]`
  367. - P2: `[12, 13]`
  368. So if we gather at the end of the first batch, we will get a tensor (nested list/tuple of tensor) corresponding to
  369. the following indices:
  370. `[0, 1, 6, 7, 12, 13]`
  371. If we directly concatenate our results without taking any precautions, the user will then get the predictions for
  372. the indices in this order at the end of the prediction loop:
  373. `[0, 1, 6, 7, 12, 13, 2, 3, 8, 9, 14, 15, 4, 5, 10, 11, 0, 1]`
  374. For some reason, that's not going to roll their boat. This class is there to solve that problem.
  375. Args:
  376. world_size (`int`):
  377. The number of processes used in the distributed training.
  378. num_samples (`int`):
  379. The number of samples in our dataset.
  380. make_multiple_of (`int`, *optional*):
  381. If passed, the class assumes the datasets passed to each process are made to be a multiple of this argument
  382. (by adding samples).
  383. padding_index (`int`, *optional*, defaults to -100):
  384. The padding index to use if the arrays don't all have the same sequence length.
  385. """
  386. def __init__(self, world_size, num_samples, make_multiple_of=None, padding_index=-100):
  387. warnings.warn(
  388. "DistributedTensorGatherer is deprecated and will be removed in v5 of Transformers.",
  389. FutureWarning,
  390. )
  391. self.world_size = world_size
  392. self.num_samples = num_samples
  393. total_size = world_size if make_multiple_of is None else world_size * make_multiple_of
  394. self.total_samples = int(np.ceil(num_samples / total_size)) * total_size
  395. self.process_length = self.total_samples // world_size
  396. self._storage = None
  397. self._offsets = None
  398. self.padding_index = padding_index
  399. def add_arrays(self, arrays):
  400. """
  401. Add `arrays` to the internal storage, Will initialize the storage to the full size at the first arrays passed
  402. so that if we're bound to get an OOM, it happens at the beginning.
  403. """
  404. if arrays is None:
  405. return
  406. if self._storage is None:
  407. self._storage = nested_new_like(arrays, self.total_samples, padding_index=self.padding_index)
  408. self._offsets = list(range(0, self.total_samples, self.process_length))
  409. slice_len, self._storage = self._nested_set_tensors(self._storage, arrays)
  410. for i in range(self.world_size):
  411. self._offsets[i] += slice_len
  412. def _nested_set_tensors(self, storage, arrays):
  413. if isinstance(arrays, (list, tuple)):
  414. result = [self._nested_set_tensors(x, y) for x, y in zip(storage, arrays)]
  415. return result[0][0], type(arrays)(r[1] for r in result)
  416. assert (
  417. arrays.shape[0] % self.world_size == 0
  418. ), f"Arrays passed should all have a first dimension multiple of {self.world_size}, found {arrays.shape[0]}."
  419. slice_len = arrays.shape[0] // self.world_size
  420. for i in range(self.world_size):
  421. if len(arrays.shape) == 1:
  422. storage[self._offsets[i] : self._offsets[i] + slice_len] = arrays[i * slice_len : (i + 1) * slice_len]
  423. else:
  424. # Expand the array on the fly if needed.
  425. if len(storage.shape) > 1 and storage.shape[1] < arrays.shape[1]:
  426. storage = expand_like(storage, arrays.shape[1], padding_index=self.padding_index)
  427. storage[self._offsets[i] : self._offsets[i] + slice_len, : arrays.shape[1]] = arrays[
  428. i * slice_len : (i + 1) * slice_len
  429. ]
  430. return slice_len, storage
  431. def finalize(self):
  432. """
  433. Return the properly gathered arrays and truncate to the number of samples (since the sampler added some extras
  434. to get each process a dataset of the same length).
  435. """
  436. if self._storage is None:
  437. return
  438. if self._offsets[0] != self.process_length:
  439. logger.warning("Not all data has been set. Are you sure you passed all values?")
  440. return nested_truncate(self._storage, self.num_samples)
  441. @dataclass
  442. class LabelSmoother:
  443. """
  444. Adds label-smoothing on a pre-computed output from a Transformers model.
  445. Args:
  446. epsilon (`float`, *optional*, defaults to 0.1):
  447. The label smoothing factor.
  448. ignore_index (`int`, *optional*, defaults to -100):
  449. The index in the labels to ignore when computing the loss.
  450. """
  451. epsilon: float = 0.1
  452. ignore_index: int = -100
  453. def __call__(self, model_output, labels, shift_labels=False):
  454. logits = model_output["logits"] if isinstance(model_output, dict) else model_output[0]
  455. if shift_labels:
  456. logits = logits[..., :-1, :].contiguous()
  457. labels = labels[..., 1:].contiguous()
  458. log_probs = -nn.functional.log_softmax(logits, dim=-1)
  459. if labels.dim() == log_probs.dim() - 1:
  460. labels = labels.unsqueeze(-1)
  461. padding_mask = labels.eq(self.ignore_index)
  462. # In case the ignore_index is -100, the gather will fail, so we replace labels by 0. The padding_mask
  463. # will ignore them in any case.
  464. labels = torch.clamp(labels, min=0)
  465. nll_loss = log_probs.gather(dim=-1, index=labels)
  466. # works for fp16 input tensor too, by internally upcasting it to fp32
  467. smoothed_loss = log_probs.sum(dim=-1, keepdim=True, dtype=torch.float32)
  468. nll_loss.masked_fill_(padding_mask, 0.0)
  469. smoothed_loss.masked_fill_(padding_mask, 0.0)
  470. # Take the mean over the label dimensions, then divide by the number of active elements (i.e. not-padded):
  471. num_active_elements = padding_mask.numel() - padding_mask.long().sum()
  472. nll_loss = nll_loss.sum() / num_active_elements
  473. smoothed_loss = smoothed_loss.sum() / (num_active_elements * log_probs.shape[-1])
  474. return (1 - self.epsilon) * nll_loss + self.epsilon * smoothed_loss
  475. def get_length_grouped_indices(lengths, batch_size, mega_batch_mult=None, generator=None):
  476. """
  477. Return a list of indices so that each slice of `batch_size` consecutive indices correspond to elements of similar
  478. lengths. To do this, the indices are:
  479. - randomly permuted
  480. - grouped in mega-batches of size `mega_batch_mult * batch_size`
  481. - sorted by length in each mega-batch
  482. The result is the concatenation of all mega-batches, with the batch of `batch_size` containing the element of
  483. maximum length placed first, so that an OOM happens sooner rather than later.
  484. """
  485. # Default for mega_batch_mult: 50 or the number to get 4 megabatches, whichever is smaller.
  486. if mega_batch_mult is None:
  487. mega_batch_mult = min(len(lengths) // (batch_size * 4), 50)
  488. # Just in case, for tiny datasets
  489. if mega_batch_mult == 0:
  490. mega_batch_mult = 1
  491. # We need to use torch for the random part as a distributed sampler will set the random seed for torch.
  492. indices = torch.randperm(len(lengths), generator=generator)
  493. megabatch_size = mega_batch_mult * batch_size
  494. megabatches = [indices[i : i + megabatch_size].tolist() for i in range(0, len(lengths), megabatch_size)]
  495. megabatches = [sorted(megabatch, key=lambda i: lengths[i], reverse=True) for megabatch in megabatches]
  496. # The rest is to get the biggest batch first.
  497. # Since each megabatch is sorted by descending length, the longest element is the first
  498. megabatch_maximums = [lengths[megabatch[0]] for megabatch in megabatches]
  499. max_idx = torch.argmax(torch.tensor(megabatch_maximums)).item()
  500. # Switch to put the longest element in first position
  501. megabatches[0][0], megabatches[max_idx][0] = megabatches[max_idx][0], megabatches[0][0]
  502. return [i for megabatch in megabatches for i in megabatch]
  503. class LengthGroupedSampler(Sampler):
  504. r"""
  505. Sampler that samples indices in a way that groups together features of the dataset of roughly the same length while
  506. keeping a bit of randomness.
  507. """
  508. def __init__(
  509. self,
  510. batch_size: int,
  511. dataset: Optional[Dataset] = None,
  512. lengths: Optional[List[int]] = None,
  513. model_input_name: Optional[str] = None,
  514. generator=None,
  515. ):
  516. if dataset is None and lengths is None:
  517. raise ValueError("One of dataset and lengths must be provided.")
  518. self.batch_size = batch_size
  519. if lengths is None:
  520. model_input_name = model_input_name if model_input_name is not None else "input_ids"
  521. if (
  522. not (isinstance(dataset[0], dict) or isinstance(dataset[0], BatchEncoding))
  523. or model_input_name not in dataset[0]
  524. ):
  525. raise ValueError(
  526. "Can only automatically infer lengths for datasets whose items are dictionaries with an "
  527. f"'{model_input_name}' key."
  528. )
  529. lengths = [len(feature[model_input_name]) for feature in dataset]
  530. elif isinstance(lengths, torch.Tensor):
  531. logger.info(
  532. "If lengths is a torch.Tensor, LengthGroupedSampler will be slow. Converting lengths to List[int]..."
  533. )
  534. lengths = lengths.tolist()
  535. self.lengths = lengths
  536. self.generator = generator
  537. def __len__(self):
  538. return len(self.lengths)
  539. def __iter__(self):
  540. indices = get_length_grouped_indices(self.lengths, self.batch_size, generator=self.generator)
  541. return iter(indices)
  542. class DistributedLengthGroupedSampler(DistributedSampler):
  543. r"""
  544. Distributed Sampler that samples indices in a way that groups together features of the dataset of roughly the same
  545. length while keeping a bit of randomness.
  546. """
  547. # Copied and adapted from PyTorch DistributedSampler.
  548. def __init__(
  549. self,
  550. batch_size: int,
  551. dataset: Optional[Dataset] = None,
  552. num_replicas: Optional[int] = None,
  553. rank: Optional[int] = None,
  554. seed: int = 0,
  555. drop_last: bool = False,
  556. lengths: Optional[List[int]] = None,
  557. model_input_name: Optional[str] = None,
  558. ):
  559. if dataset is None and lengths is None:
  560. raise ValueError("One of dataset and lengths must be provided.")
  561. if num_replicas is None:
  562. if not dist.is_available():
  563. raise RuntimeError("Requires distributed package to be available")
  564. num_replicas = dist.get_world_size()
  565. if rank is None:
  566. if not dist.is_available():
  567. raise RuntimeError("Requires distributed package to be available")
  568. rank = dist.get_rank()
  569. self.batch_size = batch_size
  570. self.num_replicas = num_replicas
  571. self.rank = rank
  572. self.epoch = 0
  573. self.drop_last = drop_last
  574. if lengths is None:
  575. model_input_name = model_input_name if model_input_name is not None else "input_ids"
  576. if (
  577. not (isinstance(dataset[0], dict) or isinstance(dataset[0], BatchEncoding))
  578. or model_input_name not in dataset[0]
  579. ):
  580. raise ValueError(
  581. "Can only automatically infer lengths for datasets whose items are dictionaries with an "
  582. f"'{model_input_name}' key."
  583. )
  584. lengths = [len(feature[model_input_name]) for feature in dataset]
  585. elif isinstance(lengths, torch.Tensor):
  586. logger.info(
  587. "If lengths is a torch.Tensor, DistributedLengthGroupedSampler will be slow. Converting lengths to"
  588. " List[int]..."
  589. )
  590. lengths = lengths.tolist()
  591. self.lengths = lengths
  592. # If the dataset length is evenly divisible by # of replicas, then there
  593. # is no need to drop any data, since the dataset will be split equally.
  594. if self.drop_last and len(self.lengths) % self.num_replicas != 0:
  595. # Split to nearest available length that is evenly divisible.
  596. # This is to ensure each rank receives the same amount of data when
  597. # using this Sampler.
  598. self.num_samples = math.ceil((len(self.lengths) - self.num_replicas) / self.num_replicas)
  599. else:
  600. self.num_samples = math.ceil(len(self.lengths) / self.num_replicas)
  601. self.total_size = self.num_samples * self.num_replicas
  602. self.seed = seed
  603. def __iter__(self) -> Iterator:
  604. # Deterministically shuffle based on epoch and seed
  605. g = torch.Generator()
  606. g.manual_seed(self.seed + self.epoch)
  607. indices = get_length_grouped_indices(self.lengths, self.batch_size, generator=g)
  608. if not self.drop_last:
  609. # add extra samples to make it evenly divisible
  610. indices += indices[: (self.total_size - len(indices))]
  611. else:
  612. # remove tail of data to make it evenly divisible.
  613. indices = indices[: self.total_size]
  614. assert len(indices) == self.total_size
  615. # subsample
  616. indices = indices[self.rank : self.total_size : self.num_replicas]
  617. assert len(indices) == self.num_samples
  618. return iter(indices)
  619. class ShardSampler(Sampler):
  620. """
  621. Sampler that shards batches between several processes. Dispatches indices batch by batch: on 2 processes with batch
  622. size 4, the first two batches are `[0, 1, 2, 3, 4, 5, 6, 7]` and `[8, 9, 10, 11, 12, 13, 14, 15]`, which shard into
  623. `[0, 1, 2, 3]` and `[8, 9, 10, 11]` for GPU-0 and `[4, 5, 6, 7]` and `[12, 13, 14, 15]` for GPU-1.
  624. The sampler thus yields `[0, 1, 2, 3, 8, 9, 10, 11]` on GPU-0 and `[4, 5, 6, 7, 12, 13, 14, 15]` on GPU-1.
  625. """
  626. def __init__(
  627. self,
  628. dataset: Dataset,
  629. batch_size: int = 1,
  630. drop_last: bool = False,
  631. num_processes: int = 1,
  632. process_index: int = 0,
  633. ):
  634. self.dataset = dataset
  635. self.batch_size = batch_size
  636. self.drop_last = drop_last
  637. self.num_processes = num_processes
  638. self.process_index = process_index
  639. self.total_batch_size = total_batch_size = batch_size * num_processes
  640. num_batches = len(dataset) // total_batch_size if drop_last else math.ceil(len(dataset) / total_batch_size)
  641. self.total_num_samples = num_batches * total_batch_size
  642. def __iter__(self):
  643. indices = list(range(len(self.dataset)))
  644. # Add extra samples to make it evenly divisible. While loop is there in the edge case we have a tiny dataset
  645. # and it needs to be done several times.
  646. while len(indices) < self.total_num_samples:
  647. indices += indices[: (self.total_num_samples - len(indices))]
  648. result = []
  649. for batch_start in range(self.batch_size * self.process_index, self.total_num_samples, self.total_batch_size):
  650. result += indices[batch_start : batch_start + self.batch_size]
  651. return iter(result)
  652. def __len__(self):
  653. # Each shard only sees a fraction of total_num_samples.
  654. return self.total_num_samples // self.num_processes
  655. class IterableDatasetShard(IterableDataset):
  656. """
  657. Wraps a PyTorch `IterableDataset` to generate samples for one of the processes only. Instances of this class will
  658. always yield a number of samples that is a round multiple of the actual batch size (which is `batch_size x
  659. num_processes`). Depending on the value of the `drop_last` attribute, it will either stop the iteration at the
  660. first batch that would be too small or loop with indices from the beginning.
  661. On two processes with an iterable dataset yielding of `[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]` with a batch size of
  662. 2:
  663. - the shard on process 0 will yield `[0, 1, 4, 5, 8, 9]` so will see batches `[0, 1]`, `[4, 5]`, `[8, 9]`
  664. - the shard on process 1 will yield `[2, 3, 6, 7, 10, 11]` so will see batches `[2, 3]`, `[6, 7]`, `[10, 11]`
  665. <Tip warning={true}>
  666. If your IterableDataset implements some randomization that needs to be applied the same way on all processes
  667. (for instance, a shuffling), you should use a `torch.Generator` in a `generator` attribute of the `dataset` to
  668. generate your random numbers and call the [`~trainer_pt_utils.IterableDatasetShard.set_epoch`] method of this
  669. object. It will set the seed of this `generator` to `seed + epoch` on all processes before starting the
  670. iteration. Alternatively, you can also implement a `set_epoch()` method in your iterable dataset to deal with
  671. this.
  672. </Tip>
  673. Args:
  674. dataset (`torch.utils.data.IterableDataset`):
  675. The batch sampler to split in several shards.
  676. batch_size (`int`, *optional*, defaults to 1):
  677. The size of the batches per shard.
  678. drop_last (`bool`, *optional*, defaults to `False`):
  679. Whether or not to drop the last incomplete batch or complete the last batches by using the samples from the
  680. beginning.
  681. num_processes (`int`, *optional*, defaults to 1):
  682. The number of processes running concurrently.
  683. process_index (`int`, *optional*, defaults to 0):
  684. The index of the current process.
  685. seed (`int`, *optional*, defaults to 0):
  686. A random seed that will be used for the random number generation in
  687. [`~trainer_pt_utils.IterableDatasetShard.set_epoch`].
  688. """
  689. def __init__(
  690. self,
  691. dataset: IterableDataset,
  692. batch_size: int = 1,
  693. drop_last: bool = False,
  694. num_processes: int = 1,
  695. process_index: int = 0,
  696. seed: int = 0,
  697. ):
  698. self.dataset = dataset
  699. self.batch_size = batch_size
  700. self.drop_last = drop_last
  701. self.num_processes = num_processes
  702. self.process_index = process_index
  703. self.seed = seed
  704. self.epoch = 0
  705. self.num_examples = 0
  706. def set_epoch(self, epoch):
  707. self.epoch = epoch
  708. if hasattr(self.dataset, "set_epoch"):
  709. self.dataset.set_epoch(epoch)
  710. def __iter__(self):
  711. self.num_examples = 0
  712. if (
  713. not hasattr(self.dataset, "set_epoch")
  714. and hasattr(self.dataset, "generator")
  715. and isinstance(self.dataset.generator, torch.Generator)
  716. ):
  717. self.dataset.generator.manual_seed(self.seed + self.epoch)
  718. real_batch_size = self.batch_size * self.num_processes
  719. process_slice = range(self.process_index * self.batch_size, (self.process_index + 1) * self.batch_size)
  720. first_batch = None
  721. current_batch = []
  722. for element in self.dataset:
  723. self.num_examples += 1
  724. current_batch.append(element)
  725. # Wait to have a full batch before yielding elements.
  726. if len(current_batch) == real_batch_size:
  727. for i in process_slice:
  728. yield current_batch[i]
  729. if first_batch is None:
  730. first_batch = current_batch.copy()
  731. current_batch = []
  732. # Finished if drop_last is True, otherwise complete the last batch with elements from the beginning.
  733. if not self.drop_last and len(current_batch) > 0:
  734. if first_batch is None:
  735. first_batch = current_batch.copy()
  736. while len(current_batch) < real_batch_size:
  737. current_batch += first_batch
  738. for i in process_slice:
  739. yield current_batch[i]
  740. def __len__(self):
  741. # Will raise an error if the underlying dataset is not sized.
  742. if self.drop_last:
  743. return (len(self.dataset) // (self.batch_size * self.num_processes)) * self.batch_size
  744. else:
  745. return math.ceil(len(self.dataset) / (self.batch_size * self.num_processes)) * self.batch_size
  746. # In order to keep `trainer.py` compact and easy to understand, place any secondary PT Trainer
  747. # helper methods here
  748. def _get_learning_rate(self):
  749. if self.is_deepspeed_enabled:
  750. # with deepspeed's fp16 and dynamic loss scale enabled the optimizer/scheduler steps may
  751. # not run for the first few dozen steps while loss scale is too large, and thus during
  752. # that time `get_last_lr` will fail if called during that warm up stage, so work around it:
  753. try:
  754. last_lr = self.lr_scheduler.get_last_lr()[0]
  755. except AssertionError as e:
  756. if "need to call step" in str(e):
  757. logger.warning("tried to get lr value before scheduler/optimizer started stepping, returning lr=0")
  758. last_lr = 0
  759. else:
  760. raise
  761. else:
  762. if isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):
  763. last_lr = self.optimizer.param_groups[0]["lr"]
  764. else:
  765. last_lr = self.lr_scheduler.get_last_lr()[0]
  766. if torch.is_tensor(last_lr):
  767. last_lr = last_lr.item()
  768. return last_lr
  769. def _secs2timedelta(secs):
  770. """
  771. convert seconds to hh:mm:ss.msec, msecs rounded to 2 decimals
  772. """
  773. msec = int(abs(secs - int(secs)) * 100)
  774. return f"{datetime.timedelta(seconds=int(secs))}.{msec:02d}"
  775. def metrics_format(self, metrics: Dict[str, float]) -> Dict[str, float]:
  776. """
  777. Reformat Trainer metrics values to a human-readable format
  778. Args:
  779. metrics (`Dict[str, float]`):
  780. The metrics returned from train/evaluate/predict
  781. Returns:
  782. metrics (`Dict[str, float]`): The reformatted metrics
  783. """
  784. metrics_copy = metrics.copy()
  785. for k, v in metrics_copy.items():
  786. if "_mem_" in k:
  787. metrics_copy[k] = f"{ v >> 20 }MB"
  788. elif "_runtime" in k:
  789. metrics_copy[k] = _secs2timedelta(v)
  790. elif k == "total_flos":
  791. metrics_copy[k] = f"{ int(v) >> 30 }GF"
  792. elif isinstance(metrics_copy[k], float):
  793. metrics_copy[k] = round(v, 4)
  794. return metrics_copy
  795. def log_metrics(self, split, metrics):
  796. """
  797. Log metrics in a specially formatted way
  798. Under distributed environment this is done only for a process with rank 0.
  799. Args:
  800. split (`str`):
  801. Mode/split name: one of `train`, `eval`, `test`
  802. metrics (`Dict[str, float]`):
  803. The metrics returned from train/evaluate/predictmetrics: metrics dict
  804. Notes on memory reports:
  805. In order to get memory usage report you need to install `psutil`. You can do that with `pip install psutil`.
  806. Now when this method is run, you will see a report that will include: :
  807. ```
  808. init_mem_cpu_alloc_delta = 1301MB
  809. init_mem_cpu_peaked_delta = 154MB
  810. init_mem_gpu_alloc_delta = 230MB
  811. init_mem_gpu_peaked_delta = 0MB
  812. train_mem_cpu_alloc_delta = 1345MB
  813. train_mem_cpu_peaked_delta = 0MB
  814. train_mem_gpu_alloc_delta = 693MB
  815. train_mem_gpu_peaked_delta = 7MB
  816. ```
  817. **Understanding the reports:**
  818. - the first segment, e.g., `train__`, tells you which stage the metrics are for. Reports starting with `init_`
  819. will be added to the first stage that gets run. So that if only evaluation is run, the memory usage for the
  820. `__init__` will be reported along with the `eval_` metrics.
  821. - the third segment, is either `cpu` or `gpu`, tells you whether it's the general RAM or the gpu0 memory
  822. metric.
  823. - `*_alloc_delta` - is the difference in the used/allocated memory counter between the end and the start of the
  824. stage - it can be negative if a function released more memory than it allocated.
  825. - `*_peaked_delta` - is any extra memory that was consumed and then freed - relative to the current allocated
  826. memory counter - it is never negative. When you look at the metrics of any stage you add up `alloc_delta` +
  827. `peaked_delta` and you know how much memory was needed to complete that stage.
  828. The reporting happens only for process of rank 0 and gpu 0 (if there is a gpu). Typically this is enough since the
  829. main process does the bulk of work, but it could be not quite so if model parallel is used and then other GPUs may
  830. use a different amount of gpu memory. This is also not the same under DataParallel where gpu0 may require much more
  831. memory than the rest since it stores the gradient and optimizer states for all participating GPUS. Perhaps in the
  832. future these reports will evolve to measure those too.
  833. The CPU RAM metric measures RSS (Resident Set Size) includes both the memory which is unique to the process and the
  834. memory shared with other processes. It is important to note that it does not include swapped out memory, so the
  835. reports could be imprecise.
  836. The CPU peak memory is measured using a sampling thread. Due to python's GIL it may miss some of the peak memory if
  837. that thread didn't get a chance to run when the highest memory was used. Therefore this report can be less than
  838. reality. Using `tracemalloc` would have reported the exact peak memory, but it doesn't report memory allocations
  839. outside of python. So if some C++ CUDA extension allocated its own memory it won't be reported. And therefore it
  840. was dropped in favor of the memory sampling approach, which reads the current process memory usage.
  841. The GPU allocated and peak memory reporting is done with `torch.cuda.memory_allocated()` and
  842. `torch.cuda.max_memory_allocated()`. This metric reports only "deltas" for pytorch-specific allocations, as
  843. `torch.cuda` memory management system doesn't track any memory allocated outside of pytorch. For example, the very
  844. first cuda call typically loads CUDA kernels, which may take from 0.5 to 2GB of GPU memory.
  845. Note that this tracker doesn't account for memory allocations outside of [`Trainer`]'s `__init__`, `train`,
  846. `evaluate` and `predict` calls.
  847. Because `evaluation` calls may happen during `train`, we can't handle nested invocations because
  848. `torch.cuda.max_memory_allocated` is a single counter, so if it gets reset by a nested eval call, `train`'s tracker
  849. will report incorrect info. If this [pytorch issue](https://github.com/pytorch/pytorch/issues/16266) gets resolved
  850. it will be possible to change this class to be re-entrant. Until then we will only track the outer level of
  851. `train`, `evaluate` and `predict` methods. Which means that if `eval` is called during `train`, it's the latter
  852. that will account for its memory usage and that of the former.
  853. This also means that if any other tool that is used along the [`Trainer`] calls
  854. `torch.cuda.reset_peak_memory_stats`, the gpu peak memory stats could be invalid. And the [`Trainer`] will disrupt
  855. the normal behavior of any such tools that rely on calling `torch.cuda.reset_peak_memory_stats` themselves.
  856. For best performance you may want to consider turning the memory profiling off for production runs.
  857. """
  858. if not self.is_world_process_zero():
  859. return
  860. print(f"***** {split} metrics *****")
  861. metrics_formatted = self.metrics_format(metrics)
  862. k_width = max(len(str(x)) for x in metrics_formatted.keys())
  863. v_width = max(len(str(x)) for x in metrics_formatted.values())
  864. for key in sorted(metrics_formatted.keys()):
  865. print(f" {key: <{k_width}} = {metrics_formatted[key]:>{v_width}}")
  866. def save_metrics(self, split, metrics, combined=True):
  867. """
  868. Save metrics into a json file for that split, e.g. `train_results.json`.
  869. Under distributed environment this is done only for a process with rank 0.
  870. Args:
  871. split (`str`):
  872. Mode/split name: one of `train`, `eval`, `test`, `all`
  873. metrics (`Dict[str, float]`):
  874. The metrics returned from train/evaluate/predict
  875. combined (`bool`, *optional*, defaults to `True`):
  876. Creates combined metrics by updating `all_results.json` with metrics of this call
  877. To understand the metrics please read the docstring of [`~Trainer.log_metrics`]. The only difference is that raw
  878. unformatted numbers are saved in the current method.
  879. """
  880. if not self.is_world_process_zero():
  881. return
  882. path = os.path.join(self.args.output_dir, f"{split}_results.json")
  883. with open(path, "w") as f:
  884. json.dump(metrics, f, indent=4, sort_keys=True)
  885. if combined:
  886. path = os.path.join(self.args.output_dir, "all_results.json")
  887. if os.path.exists(path):
  888. with open(path, "r") as f:
  889. all_metrics = json.load(f)
  890. else:
  891. all_metrics = {}
  892. all_metrics.update(metrics)
  893. with open(path, "w") as f:
  894. json.dump(all_metrics, f, indent=4, sort_keys=True)
  895. def save_state(self):
  896. """
  897. Saves the Trainer state, since Trainer.save_model saves only the tokenizer with the model
  898. Under distributed environment this is done only for a process with rank 0.
  899. """
  900. if not self.is_world_process_zero():
  901. return
  902. path = os.path.join(self.args.output_dir, "trainer_state.json")
  903. self.state.save_to_json(path)
  904. def get_model_param_count(model, trainable_only=False):
  905. """
  906. Calculate model's total param count. If trainable_only is True then count only those requiring grads
  907. """
  908. if is_deepspeed_zero3_enabled():
  909. def numel(p):
  910. return p.ds_numel if hasattr(p, "ds_numel") else p.numel()
  911. else:
  912. def numel(p):
  913. return p.numel()
  914. return sum(numel(p) for p in model.parameters() if not trainable_only or p.requires_grad)
  915. def get_parameter_names(model, forbidden_layer_types):
  916. """
  917. Returns the names of the model parameters that are not inside a forbidden layer.
  918. """
  919. result = []
  920. for name, child in model.named_children():
  921. result += [
  922. f"{name}.{n}"
  923. for n in get_parameter_names(child, forbidden_layer_types)
  924. if not isinstance(child, tuple(forbidden_layer_types))
  925. ]
  926. # Add model specific parameters (defined with nn.Parameter) since they are not in any child.
  927. result += list(model._parameters.keys())
  928. return result
  929. def get_module_class_from_name(module, name):
  930. """
  931. Gets a class from a module by its name.
  932. Args:
  933. module (`torch.nn.Module`): The module to get the class from.
  934. name (`str`): The name of the class.
  935. """
  936. modules_children = list(module.children())
  937. if module.__class__.__name__ == name:
  938. return module.__class__
  939. elif len(modules_children) == 0:
  940. return
  941. else:
  942. for child_module in modules_children:
  943. module_class = get_module_class_from_name(child_module, name)
  944. if module_class is not None:
  945. return module_class
  946. def remove_dummy_checkpoint(is_main_process, output_dir, filenames):
  947. if is_main_process:
  948. for filename in filenames:
  949. file = os.path.join(output_dir, filename)
  950. if os.path.isfile(file):
  951. os.remove(file)
  952. if is_sagemaker_mp_enabled():
  953. import smdistributed.modelparallel.torch as smp
  954. @smp.step()
  955. def smp_forward_backward(model, inputs, gradient_accumulation_steps=1):
  956. outputs = model(**inputs)
  957. loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
  958. loss /= gradient_accumulation_steps
  959. model.backward(loss)
  960. return loss
  961. @smp.step()
  962. def smp_forward_only(model, inputs):
  963. return model(**inputs)
  964. def smp_gather(tensor):
  965. if isinstance(tensor, (list, tuple)):
  966. return type(tensor)(smp_gather(t) for t in tensor)
  967. elif isinstance(tensor, dict):
  968. return type(tensor)({k: smp_gather(v) for k, v in tensor.items()})
  969. elif not isinstance(tensor, torch.Tensor):
  970. raise TypeError(
  971. f"Can't gather the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors."
  972. )
  973. all_tensors = smp.allgather(tensor, smp.CommGroup.DP_GROUP)
  974. all_tensors = [atleast_1d(t) for t in all_tensors]
  975. return torch.cat([t.cpu() for t in all_tensors], dim=0)
  976. def smp_nested_concat(tensor):
  977. if isinstance(tensor, (list, tuple)):
  978. return type(tensor)(smp_nested_concat(t) for t in tensor)
  979. elif isinstance(tensor, dict):
  980. return type(tensor)({k: smp_nested_concat(v) for k, v in tensor.items()})
  981. # It doesn't seem possible to check here if `tensor` is a StepOutput because StepOutput lives in `smp.step`
  982. # which is also the name of the decorator so Python is confused.
  983. return tensor.concat().detach().cpu()
  984. @dataclass
  985. class AcceleratorConfig:
  986. """
  987. A subset of arguments relating to the underlying [`accelerate.Accelerator`]
  988. implementation utilized in the `Trainer` that can be customized.
  989. Mostly relating to data.
  990. Parameters:
  991. split_batches (`bool`, *optional*, defaults to `False`):
  992. Whether or not the accelerator should split the batches yielded by the dataloaders across the devices. If
  993. `True` the actual batch size used will be the same on any kind of distributed processes, but it must be a
  994. round multiple of the `num_processes` you are using. If `False`, actual batch size used will be the one set
  995. in your script multiplied by the number of processes.
  996. dispatch_batches (`bool`, *optional*):
  997. If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process
  998. and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose
  999. underlying dataset is an `IterableDataset`, `False` otherwise.
  1000. even_batches (`bool`, *optional*, defaults to `True`):
  1001. If set to `True`, in cases where the total batch size across all processes does not exactly divide the
  1002. dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among
  1003. all workers.
  1004. use_seedable_sampler (`bool`, *optional*, defaults to `True`):
  1005. Whether or not use a fully seedable random sampler ([`accelerate.data_loader.SeedableRandomSampler`]). Ensures
  1006. training results are fully reproducable using a different sampling technique. While seed-to-seed results
  1007. may differ, on average the differences are neglible when using multiple different seeds to compare. Should
  1008. also be ran with [`~utils.set_seed`] for the best results.
  1009. gradient_accumulation_kwargs (`dict`, *optional*):
  1010. Additional kwargs to configure gradient accumulation, see [`accelerate.utils.GradientAccumulationPlugin`].
  1011. Any of the following (optional) keys are acceptable:
  1012. num_steps (`int`): Will take precedence over [`~.TrainingArguments.gradient_accumulation_steps`] if
  1013. the latter is set to 1, otherwise an exception will be raised.
  1014. adjust_scheduler (`bool`): Whether to adjust the scheduler steps to account for [`~.TrainingArguments.gradient_accumulation_steps`].
  1015. The [`accelerate.utils.GradientAccumulationPlugin`] default is `True`.
  1016. sync_each_batch (`bool`): Whether to synchronize the gradients at each data batch.
  1017. The [`accelerate.utils.GradientAccumulationPlugin`] default is `False`.
  1018. non_blocking (`bool`, *optional*, defaults to `False`):
  1019. Whether to use non-blocking CUDA calls to help minimize synchronization during
  1020. distributed training with prepared `DataLoader` inputs being moved to device.
  1021. Best if used with `pin_memory=True` in the `TrainingArguments`.
  1022. use_configured_state (`bool*, *optional*, defaults to `False`):
  1023. Whether or not to use a pre-configured `AcceleratorState` or `PartialState` defined
  1024. before calling `TrainingArguments`. If `True`, an `Accelerator` or `PartialState`
  1025. must be initialized. May lead to issues using sweeps or hyperparameter tuning.
  1026. """
  1027. # Data related arguments
  1028. split_batches: bool = field(
  1029. default=False,
  1030. metadata={
  1031. "help": "Whether or not the accelerator should split the batches yielded by the dataloaders across the devices. If"
  1032. " `True` the actual batch size used will be the same on any kind of distributed processes, but it must be a"
  1033. " round multiple of the `num_processes` you are using. If `False`, actual batch size used will be the one set"
  1034. " in your script multiplied by the number of processes."
  1035. },
  1036. )
  1037. dispatch_batches: bool = field(
  1038. default=None,
  1039. metadata={
  1040. "help": "If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process"
  1041. " and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose"
  1042. " underlying dataset is an `IterableDataslet`, `False` otherwise."
  1043. },
  1044. )
  1045. even_batches: bool = field(
  1046. default=True,
  1047. metadata={
  1048. "help": "If set to `True`, in cases where the total batch size across all processes does not exactly divide the"
  1049. " dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among"
  1050. " all workers."
  1051. },
  1052. )
  1053. use_seedable_sampler: bool = field(
  1054. default=True,
  1055. metadata={
  1056. "help": "Whether or not use a fully seedable random sampler ([`accelerate.data_loader.SeedableRandomSampler`])."
  1057. "Ensures training results are fully reproducable using a different sampling technique. "
  1058. "While seed-to-seed results may differ, on average the differences are neglible when using"
  1059. "multiple different seeds to compare. Should also be ran with [`~utils.set_seed`] for the best results."
  1060. },
  1061. )
  1062. non_blocking: Optional[bool] = field(
  1063. default=False,
  1064. metadata={
  1065. "help": "Whether to use non-blocking CUDA calls to help minimize synchronization during "
  1066. "distributed training with prepared `DataLoader` inputs being moved to device. "
  1067. "Best if used with `pin_memory=True` in the `TrainingArguments`. Requires accelerate "
  1068. "v0.30.0."
  1069. },
  1070. )
  1071. gradient_accumulation_kwargs: Optional[Dict] = field(
  1072. default=None,
  1073. metadata={
  1074. "help": "Additional kwargs to configure gradient accumulation, see [`accelerate.utils.GradientAccumulationPlugin`]. "
  1075. "Any of the following (optional) keys are acceptable: "
  1076. " num_steps (`int`): Will take precedence over [`~.TrainingArguments.gradient_accumulation_steps`] if "
  1077. " the latter is set to 1, otherwise an exception will be raised. "
  1078. " adjust_scheduler (`bool`): Whether to adjust the scheduler steps to account for [`~.TrainingArguments.gradient_accumulation_steps`]. "
  1079. " The [`accelerate.utils.GradientAccumulationPlugin`] default is `True`. "
  1080. " sync_each_batch (`bool`): Whether to synchronize the gradients at each data batch. "
  1081. " The [`accelerate.utils.GradientAccumulationPlugin`] default is `False`."
  1082. },
  1083. )
  1084. use_configured_state: bool = field(
  1085. default=False,
  1086. metadata={
  1087. "help": "Whether or not to use a pre-configured `AcceleratorState` or `PartialState` defined before calling `TrainingArguments`."
  1088. "If `True`, an `Accelerator` or `PartialState` must be initialized. May lead to issues using sweeps or hyperparameter tuning."
  1089. },
  1090. )
  1091. @classmethod
  1092. def from_json_file(cls, json_file):
  1093. # Check if exists
  1094. open_file = io.open if os.path.exists(json_file) else open
  1095. with open_file(json_file, "r", encoding="utf-8") as f:
  1096. config_dict = json.load(f)
  1097. # Check for keys and load sensible defaults
  1098. extra_keys = sorted(key for key in config_dict.keys() if key not in cls.__dataclass_fields__.keys())
  1099. if len(extra_keys) > 0:
  1100. raise ValueError(
  1101. f"The config file at {json_file} had unknown keys ({extra_keys}), please try upgrading your `transformers`"
  1102. " version or fix (and potentially remove these keys) from your config file."
  1103. )
  1104. return cls(**config_dict)
  1105. def to_dict(self):
  1106. return copy.deepcopy(self.__dict__)
  1107. def pop(self, key, default=None):
  1108. return self.__dict__.pop(key, default)
  1109. class LayerWiseDummyOptimizer(torch.optim.Optimizer):
  1110. """
  1111. For Layer-wise optimizers such as GaLoRE optimizer, the optimization
  1112. step is already done through the post gradient hooks. Therefore
  1113. the trick is to create a dummy optimizer that can take arbitrary
  1114. args and kwargs and return a no-op during training.
  1115. Initial idea from @hiyouga in LLaMA-Factory:
  1116. https://github.com/hiyouga/LLaMA-Factory/commit/8664262cde3919e10eaecbd66e8c5d356856362e#diff-ebe08ab14496dfb9e06075f0fdd36799ef6d1535cc4dd4715b74c4e3e06fe3ba
  1117. """
  1118. def __init__(self, optimizer_dict=None, *args, **kwargs):
  1119. dummy_tensor = torch.randn(1, 1)
  1120. self.optimizer_dict = optimizer_dict
  1121. super().__init__([dummy_tensor], {"lr": kwargs.get("lr", 1e-03)})
  1122. def zero_grad(self, set_to_none: bool = True) -> None:
  1123. pass
  1124. def step(self, closure=None) -> Optional[float]:
  1125. pass
  1126. class LayerWiseDummyScheduler(LRScheduler):
  1127. """
  1128. For Layer-wise optimizers such as GaLoRE optimizer, the optimization and scheduling step
  1129. are already done through the post gradient hooks. Therefore
  1130. the trick is to create a dummy scheduler that can take arbitrary
  1131. args and kwargs and return a no-op during training.
  1132. """
  1133. def __init__(self, *args, **kwargs):
  1134. self.default_lr = kwargs["lr"]
  1135. optimizer = LayerWiseDummyOptimizer(**kwargs)
  1136. last_epoch = -1
  1137. verbose = False
  1138. super().__init__(optimizer, last_epoch, verbose)
  1139. def get_lr(self):
  1140. # default value
  1141. lrs = [self.default_lr]
  1142. # we take each lr in the parameters if they exist, assumes the optimizer to be the `LayerWiseDummyOptimizer`
  1143. if self.optimizer is not None:
  1144. param_wise_lrs = [
  1145. [group["lr"] for group in optim.param_groups] for optim in self.optimizer.optimizer_dict.values()
  1146. ]
  1147. lrs = list(chain(*param_wise_lrs))
  1148. return lrs
  1149. def _get_closed_form_lr(self):
  1150. return self.base_lrs