configuration_longt5.py 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. # coding=utf-8
  2. # Copyright 2022, The LongT5 Authors and HuggingFace Inc.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """LongT5 model configuration"""
  16. from typing import Mapping
  17. from ...configuration_utils import PretrainedConfig
  18. from ...onnx import OnnxSeq2SeqConfigWithPast
  19. from ...utils import logging
  20. logger = logging.get_logger(__name__)
  21. class LongT5Config(PretrainedConfig):
  22. r"""
  23. This is the configuration class to store the configuration of a [`LongT5Model`] or a [`FlaxLongT5Model`]. It is
  24. used to instantiate a LongT5 model according to the specified arguments, defining the model architecture.
  25. Instantiating a configuration with the defaults will yield a similar configuration to that of the LongT5
  26. [google/long-t5-local-base](https://huggingface.co/google/long-t5-local-base) architecture.
  27. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
  28. documentation from [`PretrainedConfig`] for more information.
  29. Arguments:
  30. vocab_size (`int`, *optional*, defaults to 32128):
  31. Vocabulary size of the LongT5 model. Defines the number of different tokens that can be represented by the
  32. `inputs_ids` passed when calling [`LongT5Model`].
  33. d_model (`int`, *optional*, defaults to 512):
  34. Size of the encoder layers and the pooler layer.
  35. d_kv (`int`, *optional*, defaults to 64):
  36. Size of the key, query, value projections per attention head. `d_kv` has to be equal to `d_model //
  37. num_heads`.
  38. d_ff (`int`, *optional*, defaults to 2048):
  39. Size of the intermediate feed forward layer in each `LongT5Block`.
  40. num_layers (`int`, *optional*, defaults to 6):
  41. Number of hidden layers in the Transformer encoder.
  42. num_decoder_layers (`int`, *optional*):
  43. Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set.
  44. num_heads (`int`, *optional*, defaults to 8):
  45. Number of attention heads for each attention layer in the Transformer encoder.
  46. local_radius (`int`, *optional*, defaults to 127)
  47. Number of tokens to the left/right for each token to locally self-attend in a local attention mechanism.
  48. global_block_size (`int`, *optional*, defaults to 16)
  49. Lenght of blocks an input sequence is divided into for a global token representation. Used only for
  50. `encoder_attention_type = "transient-global"`.
  51. relative_attention_num_buckets (`int`, *optional*, defaults to 32):
  52. The number of buckets to use for each attention layer.
  53. relative_attention_max_distance (`int`, *optional*, defaults to 128):
  54. The maximum distance of the longer sequences for the bucket separation.
  55. dropout_rate (`float`, *optional*, defaults to 0.1):
  56. The ratio for all dropout layers.
  57. layer_norm_eps (`float`, *optional*, defaults to 1e-6):
  58. The epsilon used by the layer normalization layers.
  59. initializer_factor (`float`, *optional*, defaults to 1):
  60. A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
  61. testing).
  62. feed_forward_proj (`string`, *optional*, defaults to `"relu"`):
  63. Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. LongT5v1.1 uses the
  64. `"gated-gelu"` feed forward projection. Original LongT5 implementation uses `"gated-gelu"`.
  65. encoder_attention_type (`string`, *optional*, defaults to `"local"`):
  66. Type of encoder attention to be used. Should be one of `"local"` or `"transient-global"`, which are
  67. supported by LongT5 implementation.
  68. use_cache (`bool`, *optional*, defaults to `True`):
  69. Whether or not the model should return the last key/values attentions (not used by all models).
  70. """
  71. model_type = "longt5"
  72. keys_to_ignore_at_inference = ["past_key_values"]
  73. attribute_map = {
  74. "hidden_size": "d_model",
  75. "num_attention_heads": "num_heads",
  76. "num_hidden_layers": "num_layers",
  77. "head_dim": "d_kv",
  78. }
  79. def __init__(
  80. self,
  81. vocab_size=32128,
  82. d_model=512,
  83. d_kv=64,
  84. d_ff=2048,
  85. num_layers=6,
  86. num_decoder_layers=None,
  87. num_heads=8,
  88. local_radius=127,
  89. global_block_size=16,
  90. relative_attention_num_buckets=32,
  91. relative_attention_max_distance=128,
  92. dropout_rate=0.1,
  93. layer_norm_epsilon=1e-6,
  94. initializer_factor=1.0,
  95. feed_forward_proj="relu",
  96. is_encoder_decoder=True,
  97. encoder_attention_type="local",
  98. use_cache=True,
  99. pad_token_id=0,
  100. eos_token_id=1,
  101. **kwargs,
  102. ):
  103. self.vocab_size = vocab_size
  104. self.d_model = d_model
  105. self.d_kv = d_kv
  106. self.d_ff = d_ff
  107. self.num_layers = num_layers
  108. # default = symmetry
  109. self.num_decoder_layers = num_decoder_layers if num_decoder_layers is not None else self.num_layers
  110. self.num_heads = num_heads
  111. self.local_radius = local_radius
  112. self.global_block_size = global_block_size
  113. self.relative_attention_num_buckets = relative_attention_num_buckets
  114. self.relative_attention_max_distance = relative_attention_max_distance
  115. self.dropout_rate = dropout_rate
  116. self.layer_norm_epsilon = layer_norm_epsilon
  117. self.initializer_factor = initializer_factor
  118. self.feed_forward_proj = feed_forward_proj
  119. self.encoder_attention_type = encoder_attention_type
  120. self.use_cache = use_cache
  121. act_info = self.feed_forward_proj.split("-")
  122. self.dense_act_fn = act_info[-1]
  123. self.is_gated_act = act_info[0] == "gated"
  124. if len(act_info) > 1 and act_info[0] != "gated" or len(act_info) > 2:
  125. raise ValueError(
  126. f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer. "
  127. "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
  128. "'gated-gelu' or 'relu'"
  129. )
  130. # for backwards compatibility
  131. if feed_forward_proj == "gated-gelu":
  132. self.dense_act_fn = "gelu_new"
  133. super().__init__(
  134. pad_token_id=pad_token_id,
  135. eos_token_id=eos_token_id,
  136. is_encoder_decoder=is_encoder_decoder,
  137. **kwargs,
  138. )
  139. class LongT5OnnxConfig(OnnxSeq2SeqConfigWithPast):
  140. @property
  141. def inputs(self) -> Mapping[str, Mapping[int, str]]:
  142. common_inputs = {
  143. "input_ids": {0: "batch", 1: "encoder_sequence"},
  144. "attention_mask": {0: "batch", 1: "encoder_sequence"},
  145. }
  146. if self.use_past:
  147. common_inputs["attention_mask"][1] = "past_encoder_sequence + sequence"
  148. common_inputs["decoder_input_ids"] = {0: "batch"}
  149. common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"}
  150. else:
  151. common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"}
  152. common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"}
  153. if self.use_past:
  154. self.fill_with_past_key_values_(common_inputs, direction="inputs")
  155. return common_inputs
  156. @property
  157. def default_onnx_opset(self) -> int:
  158. return 13