configuration_mega.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240
  1. # coding=utf-8
  2. # Copyright 2023 The Mega Authors and The HuggingFace Inc. team.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """MEGA configuration"""
  16. from collections import OrderedDict
  17. from typing import Mapping
  18. from ....configuration_utils import PretrainedConfig
  19. from ....onnx import OnnxConfig
  20. from ....utils import logging
  21. logger = logging.get_logger(__name__)
  22. class MegaConfig(PretrainedConfig):
  23. r"""
  24. This is the configuration class to store the configuration of a [`MegaModel`]. It is used to instantiate a Mega
  25. model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
  26. defaults will yield a similar configuration to that of the Mega
  27. [mnaylor/mega-base-wikitext](https://huggingface.co/mnaylor/mega-base-wikitext) architecture.
  28. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
  29. documentation from [`PretrainedConfig`] for more information.
  30. Args:
  31. vocab_size (`int`, *optional*, defaults to 30522):
  32. Vocabulary size of the Mega model. Defines the number of different tokens that can be represented by the
  33. `inputs_ids` passed when calling [`MegaModel`].
  34. hidden_size (`int`, *optional*, defaults to 128):
  35. Dimensionality of the encoder layers and the pooler layer.
  36. num_hidden_layers (`int`, *optional*, defaults to 4):
  37. Number of hidden layers in the Mega encoder.
  38. intermediate_size (`int`, *optional*, defaults to 256):
  39. Dimensionality of the hidden size (self-attention value projection) within the Mega encoder
  40. ema_projection_size (`int`, *optional*, defaults to 16):
  41. Dimensionality of the MegaMultiDimensionDampedEma
  42. bidirectional (`bool`, *optional*, defaults to `True`):
  43. Whether the MegaMultiDimensionDampedEma used in Mega's self-attention should work bidirectionally (`True`)
  44. or unidirectionally (`False`). Bidirectional EMA is incompatible with causal decoding, so this should be
  45. False if you intend to use the model as a decoder.
  46. shared_representation_size (`int`, *optional*, defaults to 64):
  47. Dimensionality of the linear projection for shared representation of self-attention queries and keys
  48. use_chunking (`bool`, *optional*, defaults to `False`):
  49. Whether to chunk inputs for linear self-attention complexity (described as Mega-chunk in the paper)
  50. chunk_size (`int`, *optional*, defaults to -1):
  51. If `use_chunking` is set to `True`, determines the size of the chunks to apply to the input sequence. If
  52. chunking is used, input sequences must be padded to a multiple of `chunk_size`
  53. truncation (`int`, *optional*):
  54. If specified, the sequence length for which to truncate MegaMultiDimensionDampedEma
  55. normalize_before_mega (`bool`, *optional*, defaults to `True`):
  56. Whether to normalize before (`True`) or after (`False`) passing through Mega encoder blocks
  57. normalization_type (`str`, *optional*, defaults to `"scalenorm"`):
  58. Type of normalization to use in Mega encoder blocks. Choose one of `"scalenorm"`, `"layernorm"`,
  59. `"rmsnorm"`, `"batchnorm"`, or `"syncbatchnorm"` (GPU required for syncbatchnorm)
  60. norm_affine (`bool`, *optional*, defaults to `True`):
  61. If `True`, applies a parameterized affine transformation to inputs during normalization
  62. activation (`str`, *optional*, defaults to `"silu"`):
  63. Activation function to apply within Mega encoder blocks. Choose one of `"silu"`, `"relu"`, `"linear"`,
  64. `"gelu"`, or `"gelu_accurate"`
  65. attention_activation (`str`, *optional*, defaults to `"softmax"`):
  66. Activation function to apply for single-headed self-attention (a la Transformer). Choose one of
  67. `"softmax"`, `"laplace"`, or `"relu2"`
  68. dropout_prob (`float`, *optional*, defaults to 0.1):
  69. The dropout probability for EMA self-attention
  70. hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
  71. The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
  72. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
  73. The dropout ratio for the attention probabilities.
  74. use_feature_dropout (`bool`, *optional*, defaults to `False`):
  75. Whether to use feature-based (`True`) or standard dropout (`False`)
  76. use_normalized_ffn (`bool`, *optional*, defaults to `True`):
  77. Whether to use the normalized feed-forward sub-layer in Mega blocks (`True`) or pass Mega encoder output
  78. as-is (`False`)
  79. nffn_hidden_size (`int`, *optional*, defaults to 256):
  80. If using the normalized feed-forward network (NFFN) layer within Mega (`use_normalized_ffn = True`), this
  81. is the hidden size of the NFFN
  82. normalize_before_ffn (`bool`, *optional*, defaults to `True`):
  83. Whether to normalize before (`True`) or after (`False`) the feed-forward portion of NFFN
  84. nffn_activation_dropout_prob (`float`, *optional*, defaults to 0.1):
  85. The dropout ratio for the NFFN component.
  86. max_positions (`int`, *optional*, defaults to 2048):
  87. The maximum sequence length to use for positional representations. For `"simple"` relative positional bias,
  88. this is a hard limit on input length; `"rotary"` relative positional bias will extrapolate to longer
  89. sequences
  90. add_token_type_embeddings (`bool`, *optional*, defaults to `True`):
  91. Whether to account for token types in embeddings. Left as optional to maintain compatibility with original
  92. implementation while adding support for token types.
  93. type_vocab_size (`int`, *optional*, defaults to 2):
  94. The vocabulary size of the `token_type_ids` passed when calling [`MegaModel`]. Only used if
  95. `add_token_type_embeddings = True`
  96. initializer_range (`float`, *optional*, defaults to 0.02):
  97. The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
  98. ema_delta_alpha_range (`float`, *optional*, defaults to 0.2):
  99. The standard deviation for initializing the delta (damping factor) and alpha (decay factor) parameters in
  100. MegaMultiDimensionDampedEma.
  101. ema_beta_range (`float`, *optional*, defaults to 0.02):
  102. The standard deviation for initializing the beta parameter (expansion matrix) in
  103. MegaMultiDimensionDampedEma.
  104. ema_gamma_omega_range (`float`, *optional*, defaults to 1.0):
  105. The standard deviation for initializing the gamma (projection matrix) and omega (residual weight)
  106. parameters in MultiDimensionEMA.
  107. relative_positional_bias (`str`, *optional*, defaults to `"rotary"`):
  108. Type of relative positional encoding. Choose one of `"rotary"` or `"simple"`. If `"simple"` is selected,
  109. `max_positions` is used as a limit on input size, while `"rotary"` extrapolates beyond `max_positions`.
  110. is_decoder (`bool`, *optional*, defaults to `False`):
  111. Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
  112. use_cache (`bool`, *optional*, defaults to `True`):
  113. Whether or not the model should return the last key/values attentions (not used by all models). Only
  114. relevant if `config.is_decoder=True`.
  115. classifier_dropout (`float`, *optional*):
  116. The dropout ratio for the classification head.
  117. add_lm_hidden_dense_layer (`bool`, *optional*, defaults to `True`):
  118. Whether to include a hidden layer for projection between encoder outputs and LM heads (`True`) or pass
  119. hidden states directly to LM head (`False`). Remains optional for compatibility with original
  120. implementation
  121. Examples:
  122. ```python
  123. >>> from transformers import MegaConfig, MegaModel
  124. >>> # Initializing a Mega configuration
  125. >>> configuration = MegaConfig()
  126. >>> # Initializing a model (with random weights) from the configuration
  127. >>> model = MegaModel(configuration)
  128. >>> # Accessing the model configuration
  129. >>> configuration = model.config
  130. ```"""
  131. model_type = "mega"
  132. def __init__(
  133. self,
  134. vocab_size=30522,
  135. hidden_size=128,
  136. num_hidden_layers=4,
  137. intermediate_size=256,
  138. ema_projection_size=16,
  139. bidirectional=True,
  140. shared_representation_size=64,
  141. use_chunking=False,
  142. chunk_size=-1,
  143. truncation=None,
  144. normalize_before_mega=True,
  145. normalization_type="scalenorm",
  146. norm_affine=True,
  147. activation="silu",
  148. attention_activation="softmax",
  149. dropout_prob=0.1,
  150. hidden_dropout_prob=0.1,
  151. attention_probs_dropout_prob=0.1,
  152. use_feature_dropout=False,
  153. use_normalized_ffn=True,
  154. nffn_hidden_size=256,
  155. normalize_before_ffn=True,
  156. nffn_activation_dropout_prob=0.1,
  157. max_positions=2048,
  158. add_token_type_embeddings=False,
  159. type_vocab_size=2,
  160. initializer_range=0.02,
  161. ema_delta_alpha_range=0.2,
  162. ema_beta_range=0.02,
  163. ema_gamma_omega_range=1.0,
  164. pad_token_id=1,
  165. bos_token_id=0,
  166. eos_token_id=2,
  167. relative_positional_bias="rotary",
  168. classifier_dropout=None,
  169. use_cache=True,
  170. add_lm_hidden_dense_layer=True,
  171. **kwargs,
  172. ):
  173. super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
  174. self.vocab_size = vocab_size
  175. self.hidden_size = hidden_size
  176. self.num_hidden_layers = num_hidden_layers
  177. self.activation = activation
  178. self.attention_activation = attention_activation
  179. self.intermediate_size = intermediate_size
  180. self.ema_projection_size = ema_projection_size
  181. self.bidirectional = bidirectional
  182. self.shared_representation_size = shared_representation_size
  183. self.use_chunking = use_chunking
  184. self.chunk_size = chunk_size
  185. self.truncation = truncation
  186. self.normalize_before_mega = normalize_before_mega
  187. self.normalization_type = normalization_type
  188. self.norm_affine = norm_affine
  189. self.dropout_prob = dropout_prob
  190. self.hidden_dropout_prob = hidden_dropout_prob
  191. self.attention_probs_dropout_prob = attention_probs_dropout_prob
  192. self.use_feature_dropout = use_feature_dropout
  193. self.use_normalized_ffn = use_normalized_ffn
  194. self.nffn_hidden_size = nffn_hidden_size
  195. self.normalize_before_ffn = normalize_before_ffn
  196. self.nffn_activation_dropout_prob = nffn_activation_dropout_prob
  197. self.max_positions = max_positions
  198. self.add_token_type_embeddings = add_token_type_embeddings
  199. self.type_vocab_size = type_vocab_size
  200. self.initializer_range = initializer_range
  201. self.ema_delta_alpha_range = ema_delta_alpha_range
  202. self.ema_beta_range = ema_beta_range
  203. self.ema_gamma_omega_range = ema_gamma_omega_range
  204. self.relative_positional_bias = relative_positional_bias
  205. self.use_cache = use_cache
  206. self.classifier_dropout = classifier_dropout
  207. self.add_lm_hidden_dense_layer = add_lm_hidden_dense_layer
  208. self.num_attention_heads = 1 # not used but required by Hugging Face
  209. class MegaOnnxConfig(OnnxConfig):
  210. @property
  211. def inputs(self) -> Mapping[str, Mapping[int, str]]:
  212. if self.task == "multiple-choice":
  213. dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
  214. else:
  215. dynamic_axis = {0: "batch", 1: "sequence"}
  216. return OrderedDict(
  217. [
  218. ("input_ids", dynamic_axis),
  219. ("attention_mask", dynamic_axis),
  220. ]
  221. )