configuration_plbart.py 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. # coding=utf-8
  2. # Copyright 2022, UCLA NLP, The Facebook AI Research Team and The HuggingFace Inc. team. All rights reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """PLBART model configuration"""
  16. from collections import OrderedDict
  17. from typing import Mapping
  18. from ...configuration_utils import PretrainedConfig
  19. from ...onnx import OnnxConfigWithPast
  20. from ...utils import logging
  21. logger = logging.get_logger(__name__)
  22. class PLBartConfig(PretrainedConfig):
  23. r"""
  24. This is the configuration class to store the configuration of a [`PLBartModel`]. It is used to instantiate an
  25. PLBART model according to the specified arguments, defining the model architecture. Instantiating a configuration
  26. with the defaults will yield a similar configuration to that of the PLBART
  27. [uclanlp/plbart-base](https://huggingface.co/uclanlp/plbart-base) architecture.
  28. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
  29. documentation from [`PretrainedConfig`] for more information.
  30. Args:
  31. vocab_size (`int`, *optional*, defaults to 50005):
  32. Vocabulary size of the PLBART model. Defines the number of different tokens that can be represented by the
  33. `inputs_ids` passed when calling [`PLBartModel`].
  34. d_model (`int`, *optional*, defaults to 768):
  35. Dimensionality of the layers and the pooler layer.
  36. encoder_layers (`int`, *optional*, defaults to 6):
  37. Number of encoder layers.
  38. decoder_layers (`int`, *optional*, defaults to 6):
  39. Number of decoder layers.
  40. encoder_attention_heads (`int`, *optional*, defaults to 12):
  41. Number of attention heads for each attention layer in the Transformer encoder.
  42. decoder_attention_heads (`int`, *optional*, defaults to 12):
  43. Number of attention heads for each attention layer in the Transformer decoder.
  44. decoder_ffn_dim (`int`, *optional*, defaults to 3072):
  45. Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
  46. encoder_ffn_dim (`int`, *optional*, defaults to 3072):
  47. Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
  48. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
  49. The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
  50. `"relu"`, `"silu"` and `"gelu_new"` are supported.
  51. dropout (`float`, *optional*, defaults to 0.1):
  52. The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
  53. attention_dropout (`float`, *optional*, defaults to 0.1):
  54. The dropout ratio for the attention probabilities.
  55. activation_dropout (`float`, *optional*, defaults to 0.0):
  56. The dropout ratio for activations inside the fully connected layer.
  57. classifier_dropout (`float`, *optional*, defaults to 0.0):
  58. The dropout ratio for classifier.
  59. max_position_embeddings (`int`, *optional*, defaults to 1024):
  60. The maximum sequence length that this model might ever be used with. Typically set this to something large
  61. just in case (e.g., 512 or 1024 or 2048).
  62. init_std (`float`, *optional*, defaults to 0.02):
  63. The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
  64. encoder_layerdrop (`float`, *optional*, defaults to 0.0):
  65. The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
  66. for more details.
  67. decoder_layerdrop (`float`, *optional*, defaults to 0.0):
  68. The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
  69. for more details.
  70. scale_embedding (`bool`, *optional*, defaults to `True`):
  71. Scale embeddings by diving by sqrt(d_model).
  72. use_cache (`bool`, *optional*, defaults to `True`):
  73. Whether or not the model should return the last key/values attentions (not used by all models)
  74. forced_eos_token_id (`int`, *optional*, defaults to 2):
  75. The id of the token to force as the last generated token when `max_length` is reached. Usually set to
  76. `eos_token_id`.
  77. Example:
  78. ```python
  79. >>> from transformers import PLBartConfig, PLBartModel
  80. >>> # Initializing a PLBART uclanlp/plbart-base style configuration
  81. >>> configuration = PLBartConfig()
  82. >>> # Initializing a model (with random weights) from the uclanlp/plbart-base style configuration
  83. >>> model = PLBartModel(configuration)
  84. >>> # Accessing the model configuration
  85. >>> configuration = model.config
  86. ```"""
  87. model_type = "plbart"
  88. keys_to_ignore_at_inference = ["past_key_values"]
  89. attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
  90. def __init__(
  91. self,
  92. vocab_size=50005,
  93. max_position_embeddings=1024,
  94. encoder_layers=6,
  95. encoder_ffn_dim=3072,
  96. encoder_attention_heads=12,
  97. decoder_layers=6,
  98. decoder_ffn_dim=3072,
  99. decoder_attention_heads=12,
  100. encoder_layerdrop=0.0,
  101. decoder_layerdrop=0.0,
  102. use_cache=True,
  103. is_encoder_decoder=True,
  104. activation_function="gelu",
  105. d_model=768,
  106. dropout=0.1,
  107. attention_dropout=0.1,
  108. activation_dropout=0.0,
  109. init_std=0.02,
  110. classifier_dropout=0.0,
  111. scale_embedding=True,
  112. pad_token_id=1,
  113. bos_token_id=0,
  114. eos_token_id=2,
  115. forced_eos_token_id=2,
  116. **kwargs,
  117. ):
  118. self.vocab_size = vocab_size
  119. self.max_position_embeddings = max_position_embeddings
  120. self.d_model = d_model
  121. self.encoder_ffn_dim = encoder_ffn_dim
  122. self.encoder_layers = encoder_layers
  123. self.encoder_attention_heads = encoder_attention_heads
  124. self.decoder_ffn_dim = decoder_ffn_dim
  125. self.decoder_layers = decoder_layers
  126. self.decoder_attention_heads = decoder_attention_heads
  127. self.dropout = dropout
  128. self.attention_dropout = attention_dropout
  129. self.activation_dropout = activation_dropout
  130. self.activation_function = activation_function
  131. self.init_std = init_std
  132. self.encoder_layerdrop = encoder_layerdrop
  133. self.decoder_layerdrop = decoder_layerdrop
  134. self.classifier_dropout = classifier_dropout
  135. self.use_cache = use_cache
  136. self.num_hidden_layers = encoder_layers
  137. self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
  138. super().__init__(
  139. pad_token_id=pad_token_id,
  140. bos_token_id=bos_token_id,
  141. eos_token_id=eos_token_id,
  142. is_encoder_decoder=is_encoder_decoder,
  143. forced_eos_token_id=forced_eos_token_id,
  144. **kwargs,
  145. )
  146. class PLBartOnnxConfig(OnnxConfigWithPast):
  147. @property
  148. def inputs(self) -> Mapping[str, Mapping[int, str]]:
  149. return OrderedDict(
  150. [
  151. ("input_ids", {0: "batch", 1: "sequence"}),
  152. ("attention_mask", {0: "batch", 1: "sequence"}),
  153. ]
  154. )
  155. @property
  156. def outputs(self) -> Mapping[str, Mapping[int, str]]:
  157. if self.use_past:
  158. return OrderedDict(
  159. [
  160. ("last_hidden_state", {0: "batch", 1: "sequence"}),
  161. ("past_keys", {0: "batch", 2: "sequence"}),
  162. ("encoder_last_hidden_state", {0: "batch", 1: "sequence"}),
  163. ]
  164. )
  165. else:
  166. return OrderedDict(
  167. [
  168. ("last_hidden_state", {0: "batch", 1: "sequence"}),
  169. ("encoder_last_hidden_state", {0: "batch", 1: "sequence"}),
  170. ]
  171. )