trainer_seq2seq.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388
  1. # Copyright 2020 The HuggingFace Team. All rights reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import contextlib
  15. import warnings
  16. from copy import deepcopy
  17. from pathlib import Path
  18. from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
  19. import torch
  20. from torch import nn
  21. from torch.distributed.fsdp import FullyShardedDataParallel
  22. from torch.utils.data import Dataset
  23. from .generation.configuration_utils import GenerationConfig
  24. from .integrations.deepspeed import is_deepspeed_zero3_enabled
  25. from .integrations.fsdp import is_fsdp_managed_module
  26. from .trainer import Trainer
  27. from .utils import is_datasets_available, logging
  28. from .utils.deprecation import deprecate_kwarg
  29. if is_datasets_available():
  30. import datasets
  31. if TYPE_CHECKING:
  32. from torch.utils.data import IterableDataset
  33. from .data.data_collator import DataCollator
  34. from .feature_extraction_utils import FeatureExtractionMixin
  35. from .image_processing_utils import BaseImageProcessor
  36. from .modeling_utils import PreTrainedModel
  37. from .processing_utils import ProcessorMixin
  38. from .tokenization_utils_base import PreTrainedTokenizerBase
  39. from .trainer_callback import TrainerCallback
  40. from .trainer_utils import EvalPrediction, PredictionOutput
  41. from .training_args import TrainingArguments
  42. logger = logging.get_logger(__name__)
  43. class Seq2SeqTrainer(Trainer):
  44. @deprecate_kwarg("tokenizer", new_name="processing_class", version="5.0.0", raise_if_both_names=True)
  45. def __init__(
  46. self,
  47. model: Union["PreTrainedModel", nn.Module] = None,
  48. args: "TrainingArguments" = None,
  49. data_collator: Optional["DataCollator"] = None,
  50. train_dataset: Optional[Union[Dataset, "IterableDataset", "datasets.Dataset"]] = None,
  51. eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None,
  52. processing_class: Optional[
  53. Union["PreTrainedTokenizerBase", "BaseImageProcessor", "FeatureExtractionMixin", "ProcessorMixin"]
  54. ] = None,
  55. model_init: Optional[Callable[[], "PreTrainedModel"]] = None,
  56. compute_metrics: Optional[Callable[["EvalPrediction"], Dict]] = None,
  57. callbacks: Optional[List["TrainerCallback"]] = None,
  58. optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
  59. preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
  60. ):
  61. super().__init__(
  62. model=model,
  63. args=args,
  64. data_collator=data_collator,
  65. train_dataset=train_dataset,
  66. eval_dataset=eval_dataset,
  67. processing_class=processing_class,
  68. model_init=model_init,
  69. compute_metrics=compute_metrics,
  70. callbacks=callbacks,
  71. optimizers=optimizers,
  72. preprocess_logits_for_metrics=preprocess_logits_for_metrics,
  73. )
  74. # Override self.model.generation_config if a GenerationConfig is specified in args.
  75. # Priority: args.generation_config > model.generation_config > default GenerationConfig.
  76. if self.args.generation_config is not None:
  77. gen_config = self.load_generation_config(self.args.generation_config)
  78. self.model.generation_config = gen_config
  79. @staticmethod
  80. def load_generation_config(gen_config_arg: Union[str, GenerationConfig]) -> GenerationConfig:
  81. """
  82. Loads a `~generation.GenerationConfig` from the `Seq2SeqTrainingArguments.generation_config` arguments.
  83. Args:
  84. gen_config_arg (`str` or [`~generation.GenerationConfig]`):
  85. `Seq2SeqTrainingArguments.generation_config` argument.
  86. Returns:
  87. A `~generation.GenerationConfig`.
  88. """
  89. # GenerationConfig provided, nothing to do
  90. if isinstance(gen_config_arg, GenerationConfig):
  91. gen_config = deepcopy(gen_config_arg)
  92. else:
  93. # str or Path
  94. pretrained_model_name = Path(gen_config_arg) if isinstance(gen_config_arg, str) else gen_config_arg
  95. config_file_name = None
  96. # Figuring if it is path pointing to a file, pointing to a directory or else a model id or URL
  97. # This step is required in order to determine config_file_name
  98. if pretrained_model_name.is_file():
  99. config_file_name = pretrained_model_name.name
  100. pretrained_model_name = pretrained_model_name.parent
  101. # dir path
  102. elif pretrained_model_name.is_dir():
  103. pass
  104. # model id or URL
  105. else:
  106. pretrained_model_name = gen_config_arg
  107. gen_config = GenerationConfig.from_pretrained(pretrained_model_name, config_file_name)
  108. # Strict validation to fail early. `GenerationConfig.save_pretrained()`, run at the end of training, throws
  109. # an exception if there are warnings at validation time.
  110. try:
  111. with warnings.catch_warnings(record=True) as caught_warnings:
  112. gen_config.validate()
  113. if len(caught_warnings) > 0:
  114. raise ValueError(str([w.message for w in caught_warnings]))
  115. except ValueError as exc:
  116. raise ValueError(
  117. "The loaded generation config instance is invalid -- `GenerationConfig.validate()` throws warnings "
  118. "and/or exceptions. Fix these issues to train your model.\n\nThrown during validation:\n" + str(exc)
  119. )
  120. return gen_config
  121. def evaluate(
  122. self,
  123. eval_dataset: Optional[Dataset] = None,
  124. ignore_keys: Optional[List[str]] = None,
  125. metric_key_prefix: str = "eval",
  126. **gen_kwargs,
  127. ) -> Dict[str, float]:
  128. """
  129. Run evaluation and returns metrics.
  130. The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
  131. (pass it to the init `compute_metrics` argument).
  132. You can also subclass and override this method to inject custom behavior.
  133. Args:
  134. eval_dataset (`Dataset`, *optional*):
  135. Pass a dataset if you wish to override `self.eval_dataset`. If it is an [`~datasets.Dataset`], columns
  136. not accepted by the `model.forward()` method are automatically removed. It must implement the `__len__`
  137. method.
  138. ignore_keys (`List[str]`, *optional*):
  139. A list of keys in the output of your model (if it is a dictionary) that should be ignored when
  140. gathering predictions.
  141. metric_key_prefix (`str`, *optional*, defaults to `"eval"`):
  142. An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
  143. "eval_bleu" if the prefix is `"eval"` (default)
  144. max_length (`int`, *optional*):
  145. The maximum target length to use when predicting with the generate method.
  146. num_beams (`int`, *optional*):
  147. Number of beams for beam search that will be used when predicting with the generate method. 1 means no
  148. beam search.
  149. gen_kwargs:
  150. Additional `generate` specific kwargs.
  151. Returns:
  152. A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
  153. dictionary also contains the epoch number which comes from the training state.
  154. """
  155. gen_kwargs = gen_kwargs.copy()
  156. # Use legacy argument setting if a) the option is not explicitly passed; and b) the argument is set in the
  157. # training args
  158. if (
  159. gen_kwargs.get("max_length") is None
  160. and gen_kwargs.get("max_new_tokens") is None
  161. and self.args.generation_max_length is not None
  162. ):
  163. gen_kwargs["max_length"] = self.args.generation_max_length
  164. if gen_kwargs.get("num_beams") is None and self.args.generation_num_beams is not None:
  165. gen_kwargs["num_beams"] = self.args.generation_num_beams
  166. # We don't want to drop samples in general
  167. self.gather_function = self.accelerator.gather
  168. self._gen_kwargs = gen_kwargs
  169. return super().evaluate(eval_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix)
  170. def predict(
  171. self,
  172. test_dataset: Dataset,
  173. ignore_keys: Optional[List[str]] = None,
  174. metric_key_prefix: str = "test",
  175. **gen_kwargs,
  176. ) -> "PredictionOutput":
  177. """
  178. Run prediction and returns predictions and potential metrics.
  179. Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
  180. will also return metrics, like in `evaluate()`.
  181. Args:
  182. test_dataset (`Dataset`):
  183. Dataset to run the predictions on. If it is a [`~datasets.Dataset`], columns not accepted by the
  184. `model.forward()` method are automatically removed. Has to implement the method `__len__`
  185. ignore_keys (`List[str]`, *optional*):
  186. A list of keys in the output of your model (if it is a dictionary) that should be ignored when
  187. gathering predictions.
  188. metric_key_prefix (`str`, *optional*, defaults to `"eval"`):
  189. An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
  190. "eval_bleu" if the prefix is `"eval"` (default)
  191. max_length (`int`, *optional*):
  192. The maximum target length to use when predicting with the generate method.
  193. num_beams (`int`, *optional*):
  194. Number of beams for beam search that will be used when predicting with the generate method. 1 means no
  195. beam search.
  196. gen_kwargs:
  197. Additional `generate` specific kwargs.
  198. <Tip>
  199. If your predictions or labels have different sequence lengths (for instance because you're doing dynamic
  200. padding in a token classification task) the predictions will be padded (on the right) to allow for
  201. concatenation into one array. The padding index is -100.
  202. </Tip>
  203. Returns: *NamedTuple* A namedtuple with the following keys:
  204. - predictions (`np.ndarray`): The predictions on `test_dataset`.
  205. - label_ids (`np.ndarray`, *optional*): The labels (if the dataset contained some).
  206. - metrics (`Dict[str, float]`, *optional*): The potential dictionary of metrics (if the dataset contained
  207. labels).
  208. """
  209. gen_kwargs = gen_kwargs.copy()
  210. # Use legacy argument setting if a) the option is not explicitly passed; and b) the argument is set in the
  211. # training args
  212. if (
  213. gen_kwargs.get("max_length") is None
  214. and gen_kwargs.get("max_new_tokens") is None
  215. and self.args.generation_max_length is not None
  216. ):
  217. gen_kwargs["max_length"] = self.args.generation_max_length
  218. if gen_kwargs.get("num_beams") is None and self.args.generation_num_beams is not None:
  219. gen_kwargs["num_beams"] = self.args.generation_num_beams
  220. self.gather_function = self.accelerator.gather
  221. self._gen_kwargs = gen_kwargs
  222. return super().predict(test_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix)
  223. def prediction_step(
  224. self,
  225. model: nn.Module,
  226. inputs: Dict[str, Union[torch.Tensor, Any]],
  227. prediction_loss_only: bool,
  228. ignore_keys: Optional[List[str]] = None,
  229. **gen_kwargs,
  230. ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
  231. """
  232. Perform an evaluation step on `model` using `inputs`.
  233. Subclass and override to inject custom behavior.
  234. Args:
  235. model (`nn.Module`):
  236. The model to evaluate.
  237. inputs (`Dict[str, Union[torch.Tensor, Any]]`):
  238. The inputs and targets of the model.
  239. The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
  240. argument `labels`. Check your model's documentation for all accepted arguments.
  241. prediction_loss_only (`bool`):
  242. Whether or not to return the loss only.
  243. gen_kwargs:
  244. Additional `generate` specific kwargs.
  245. Return:
  246. Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and
  247. labels (each being optional).
  248. """
  249. if not self.args.predict_with_generate or prediction_loss_only:
  250. return super().prediction_step(
  251. model, inputs, prediction_loss_only=prediction_loss_only, ignore_keys=ignore_keys
  252. )
  253. has_labels = "labels" in inputs
  254. inputs = self._prepare_inputs(inputs)
  255. # Priority (handled in generate):
  256. # non-`None` gen_kwargs > model.generation_config > default GenerationConfig()
  257. if len(gen_kwargs) == 0 and hasattr(self, "_gen_kwargs"):
  258. gen_kwargs = self._gen_kwargs.copy()
  259. if "num_beams" in gen_kwargs and gen_kwargs["num_beams"] is None:
  260. gen_kwargs.pop("num_beams")
  261. if "max_length" in gen_kwargs and gen_kwargs["max_length"] is None:
  262. gen_kwargs.pop("max_length")
  263. default_synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self.model)
  264. gen_kwargs["synced_gpus"] = gen_kwargs.get("synced_gpus", default_synced_gpus)
  265. generation_inputs = inputs.copy()
  266. # If the `decoder_input_ids` was created from `labels`, evict the former, so that the model can freely generate
  267. # (otherwise, it would continue generating from the padded `decoder_input_ids`)
  268. if (
  269. "labels" in generation_inputs
  270. and "decoder_input_ids" in generation_inputs
  271. and generation_inputs["labels"].shape == generation_inputs["decoder_input_ids"].shape
  272. ):
  273. generation_inputs = {
  274. k: v for k, v in inputs.items() if k not in ("decoder_input_ids", "decoder_attention_mask")
  275. }
  276. summon_full_params_context = (
  277. FullyShardedDataParallel.summon_full_params(self.model)
  278. if isinstance(self.model, FullyShardedDataParallel)
  279. else contextlib.nullcontext()
  280. )
  281. with summon_full_params_context:
  282. generated_tokens = self.model.generate(**generation_inputs, **gen_kwargs)
  283. # Temporary hack to ensure the generation config is not initialized for each iteration of the evaluation loop
  284. # TODO: remove this hack when the legacy code that initializes generation_config from a model config is
  285. # removed in https://github.com/huggingface/transformers/blob/98d88b23f54e5a23e741833f1e973fdf600cc2c5/src/transformers/generation/utils.py#L1183
  286. if self.model.generation_config._from_model_config:
  287. self.model.generation_config._from_model_config = False
  288. # Retrieves GenerationConfig from model.generation_config
  289. gen_config = self.model.generation_config
  290. # in case the batch is shorter than max length, the output should be padded
  291. if generated_tokens.shape[-1] < gen_config.max_length:
  292. generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_config.max_length)
  293. elif gen_config.max_new_tokens is not None and generated_tokens.shape[-1] < gen_config.max_new_tokens + 1:
  294. generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_config.max_new_tokens + 1)
  295. with torch.no_grad():
  296. if has_labels:
  297. with self.compute_loss_context_manager():
  298. outputs = model(**inputs)
  299. if self.label_smoother is not None:
  300. loss = self.label_smoother(outputs, inputs["labels"]).mean().detach()
  301. else:
  302. loss = (outputs["loss"] if isinstance(outputs, dict) else outputs[0]).mean().detach()
  303. else:
  304. loss = None
  305. if self.args.prediction_loss_only:
  306. return loss, None, None
  307. if has_labels:
  308. labels = inputs["labels"]
  309. if labels.shape[-1] < gen_config.max_length:
  310. labels = self._pad_tensors_to_max_len(labels, gen_config.max_length)
  311. elif gen_config.max_new_tokens is not None and labels.shape[-1] < gen_config.max_new_tokens + 1:
  312. labels = self._pad_tensors_to_max_len(labels, gen_config.max_new_tokens + 1)
  313. else:
  314. labels = None
  315. return loss, generated_tokens, labels
  316. def _pad_tensors_to_max_len(self, tensor, max_length):
  317. if self.tokenizer is not None and hasattr(self.tokenizer, "pad_token_id"):
  318. # If PAD token is not defined at least EOS token has to be defined
  319. pad_token_id = (
  320. self.tokenizer.pad_token_id if self.tokenizer.pad_token_id is not None else self.tokenizer.eos_token_id
  321. )
  322. else:
  323. if self.model.config.pad_token_id is not None:
  324. pad_token_id = self.model.config.pad_token_id
  325. else:
  326. raise ValueError("Pad_token_id must be set in the configuration of the model, in order to pad tensors")
  327. padded_tensor = pad_token_id * torch.ones(
  328. (tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device
  329. )
  330. padded_tensor[:, : tensor.shape[-1]] = tensor
  331. return padded_tensor