training_args_seq2seq.py 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596
  1. # Copyright 2020 The HuggingFace Team. All rights reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import logging
  15. from dataclasses import dataclass, field
  16. from pathlib import Path
  17. from typing import Optional, Union
  18. from .generation.configuration_utils import GenerationConfig
  19. from .training_args import TrainingArguments
  20. from .utils import add_start_docstrings
  21. logger = logging.getLogger(__name__)
  22. @dataclass
  23. @add_start_docstrings(TrainingArguments.__doc__)
  24. class Seq2SeqTrainingArguments(TrainingArguments):
  25. """
  26. Args:
  27. sortish_sampler (`bool`, *optional*, defaults to `False`):
  28. Whether to use a *sortish sampler* or not. Only possible if the underlying datasets are *Seq2SeqDataset*
  29. for now but will become generally available in the near future.
  30. It sorts the inputs according to lengths in order to minimize the padding size, with a bit of randomness
  31. for the training set.
  32. predict_with_generate (`bool`, *optional*, defaults to `False`):
  33. Whether to use generate to calculate generative metrics (ROUGE, BLEU).
  34. generation_max_length (`int`, *optional*):
  35. The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default to the
  36. `max_length` value of the model configuration.
  37. generation_num_beams (`int`, *optional*):
  38. The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default to the
  39. `num_beams` value of the model configuration.
  40. generation_config (`str` or `Path` or [`~generation.GenerationConfig`], *optional*):
  41. Allows to load a [`~generation.GenerationConfig`] from the `from_pretrained` method. This can be either:
  42. - a string, the *model id* of a pretrained model configuration hosted inside a model repo on
  43. huggingface.co.
  44. - a path to a *directory* containing a configuration file saved using the
  45. [`~GenerationConfig.save_pretrained`] method, e.g., `./my_model_directory/`.
  46. - a [`~generation.GenerationConfig`] object.
  47. """
  48. sortish_sampler: bool = field(default=False, metadata={"help": "Whether to use SortishSampler or not."})
  49. predict_with_generate: bool = field(
  50. default=False, metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."}
  51. )
  52. generation_max_length: Optional[int] = field(
  53. default=None,
  54. metadata={
  55. "help": (
  56. "The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
  57. "to the `max_length` value of the model configuration."
  58. )
  59. },
  60. )
  61. generation_num_beams: Optional[int] = field(
  62. default=None,
  63. metadata={
  64. "help": (
  65. "The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
  66. "to the `num_beams` value of the model configuration."
  67. )
  68. },
  69. )
  70. generation_config: Optional[Union[str, Path, GenerationConfig]] = field(
  71. default=None,
  72. metadata={
  73. "help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
  74. },
  75. )
  76. def to_dict(self):
  77. """
  78. Serializes this instance while replace `Enum` by their values and `GenerationConfig` by dictionaries (for JSON
  79. serialization support). It obfuscates the token values by removing their value.
  80. """
  81. # filter out fields that are defined as field(init=False)
  82. d = super().to_dict()
  83. for k, v in d.items():
  84. if isinstance(v, GenerationConfig):
  85. d[k] = v.to_dict()
  86. return d