configuration_electra.py 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. # coding=utf-8
  2. # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
  3. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
  4. #
  5. # Licensed under the Apache License, Version 2.0 (the "License");
  6. # you may not use this file except in compliance with the License.
  7. # You may obtain a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS IS" BASIS,
  13. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. # See the License for the specific language governing permissions and
  15. # limitations under the License.
  16. """ELECTRA model configuration"""
  17. from collections import OrderedDict
  18. from typing import Mapping
  19. from ...configuration_utils import PretrainedConfig
  20. from ...onnx import OnnxConfig
  21. from ...utils import logging
  22. logger = logging.get_logger(__name__)
  23. class ElectraConfig(PretrainedConfig):
  24. r"""
  25. This is the configuration class to store the configuration of a [`ElectraModel`] or a [`TFElectraModel`]. It is
  26. used to instantiate a ELECTRA model according to the specified arguments, defining the model architecture.
  27. Instantiating a configuration with the defaults will yield a similar configuration to that of the ELECTRA
  28. [google/electra-small-discriminator](https://huggingface.co/google/electra-small-discriminator) architecture.
  29. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
  30. documentation from [`PretrainedConfig`] for more information.
  31. Args:
  32. vocab_size (`int`, *optional*, defaults to 30522):
  33. Vocabulary size of the ELECTRA model. Defines the number of different tokens that can be represented by the
  34. `inputs_ids` passed when calling [`ElectraModel`] or [`TFElectraModel`].
  35. embedding_size (`int`, *optional*, defaults to 128):
  36. Dimensionality of the encoder layers and the pooler layer.
  37. hidden_size (`int`, *optional*, defaults to 256):
  38. Dimensionality of the encoder layers and the pooler layer.
  39. num_hidden_layers (`int`, *optional*, defaults to 12):
  40. Number of hidden layers in the Transformer encoder.
  41. num_attention_heads (`int`, *optional*, defaults to 4):
  42. Number of attention heads for each attention layer in the Transformer encoder.
  43. intermediate_size (`int`, *optional*, defaults to 1024):
  44. Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
  45. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
  46. The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
  47. `"relu"`, `"silu"` and `"gelu_new"` are supported.
  48. hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
  49. The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
  50. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
  51. The dropout ratio for the attention probabilities.
  52. max_position_embeddings (`int`, *optional*, defaults to 512):
  53. The maximum sequence length that this model might ever be used with. Typically set this to something large
  54. just in case (e.g., 512 or 1024 or 2048).
  55. type_vocab_size (`int`, *optional*, defaults to 2):
  56. The vocabulary size of the `token_type_ids` passed when calling [`ElectraModel`] or [`TFElectraModel`].
  57. initializer_range (`float`, *optional*, defaults to 0.02):
  58. The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
  59. layer_norm_eps (`float`, *optional*, defaults to 1e-12):
  60. The epsilon used by the layer normalization layers.
  61. summary_type (`str`, *optional*, defaults to `"first"`):
  62. Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
  63. Has to be one of the following options:
  64. - `"last"`: Take the last token hidden state (like XLNet).
  65. - `"first"`: Take the first token hidden state (like BERT).
  66. - `"mean"`: Take the mean of all tokens hidden states.
  67. - `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
  68. - `"attn"`: Not implemented now, use multi-head attention.
  69. summary_use_proj (`bool`, *optional*, defaults to `True`):
  70. Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
  71. Whether or not to add a projection after the vector extraction.
  72. summary_activation (`str`, *optional*):
  73. Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
  74. Pass `"gelu"` for a gelu activation to the output, any other value will result in no activation.
  75. summary_last_dropout (`float`, *optional*, defaults to 0.0):
  76. Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
  77. The dropout ratio to be used after the projection and activation.
  78. position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
  79. Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
  80. positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
  81. [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
  82. For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
  83. with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
  84. use_cache (`bool`, *optional*, defaults to `True`):
  85. Whether or not the model should return the last key/values attentions (not used by all models). Only
  86. relevant if `config.is_decoder=True`.
  87. classifier_dropout (`float`, *optional*):
  88. The dropout ratio for the classification head.
  89. Examples:
  90. ```python
  91. >>> from transformers import ElectraConfig, ElectraModel
  92. >>> # Initializing a ELECTRA electra-base-uncased style configuration
  93. >>> configuration = ElectraConfig()
  94. >>> # Initializing a model (with random weights) from the electra-base-uncased style configuration
  95. >>> model = ElectraModel(configuration)
  96. >>> # Accessing the model configuration
  97. >>> configuration = model.config
  98. ```"""
  99. model_type = "electra"
  100. def __init__(
  101. self,
  102. vocab_size=30522,
  103. embedding_size=128,
  104. hidden_size=256,
  105. num_hidden_layers=12,
  106. num_attention_heads=4,
  107. intermediate_size=1024,
  108. hidden_act="gelu",
  109. hidden_dropout_prob=0.1,
  110. attention_probs_dropout_prob=0.1,
  111. max_position_embeddings=512,
  112. type_vocab_size=2,
  113. initializer_range=0.02,
  114. layer_norm_eps=1e-12,
  115. summary_type="first",
  116. summary_use_proj=True,
  117. summary_activation="gelu",
  118. summary_last_dropout=0.1,
  119. pad_token_id=0,
  120. position_embedding_type="absolute",
  121. use_cache=True,
  122. classifier_dropout=None,
  123. **kwargs,
  124. ):
  125. super().__init__(pad_token_id=pad_token_id, **kwargs)
  126. self.vocab_size = vocab_size
  127. self.embedding_size = embedding_size
  128. self.hidden_size = hidden_size
  129. self.num_hidden_layers = num_hidden_layers
  130. self.num_attention_heads = num_attention_heads
  131. self.intermediate_size = intermediate_size
  132. self.hidden_act = hidden_act
  133. self.hidden_dropout_prob = hidden_dropout_prob
  134. self.attention_probs_dropout_prob = attention_probs_dropout_prob
  135. self.max_position_embeddings = max_position_embeddings
  136. self.type_vocab_size = type_vocab_size
  137. self.initializer_range = initializer_range
  138. self.layer_norm_eps = layer_norm_eps
  139. self.summary_type = summary_type
  140. self.summary_use_proj = summary_use_proj
  141. self.summary_activation = summary_activation
  142. self.summary_last_dropout = summary_last_dropout
  143. self.position_embedding_type = position_embedding_type
  144. self.use_cache = use_cache
  145. self.classifier_dropout = classifier_dropout
  146. class ElectraOnnxConfig(OnnxConfig):
  147. @property
  148. def inputs(self) -> Mapping[str, Mapping[int, str]]:
  149. if self.task == "multiple-choice":
  150. dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
  151. else:
  152. dynamic_axis = {0: "batch", 1: "sequence"}
  153. return OrderedDict(
  154. [
  155. ("input_ids", dynamic_axis),
  156. ("attention_mask", dynamic_axis),
  157. ("token_type_ids", dynamic_axis),
  158. ]
  159. )