configuration_data2vec_audio.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285
  1. # coding=utf-8
  2. # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """Data2VecText configuration"""
  16. import math
  17. from ...configuration_utils import PretrainedConfig
  18. from ...utils import logging
  19. logger = logging.get_logger(__name__)
  20. class Data2VecAudioConfig(PretrainedConfig):
  21. r"""
  22. This is the configuration class to store the configuration of a [`Data2VecAudioModel`]. It is used to instantiate
  23. an Data2VecAudio model according to the specified arguments, defining the model architecture. Instantiating a
  24. configuration with the defaults will yield a similar configuration to that of the Data2VecAudio
  25. [facebook/data2vec-audio-base-960h](https://huggingface.co/facebook/data2vec-audio-base-960h) architecture.
  26. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
  27. documentation from [`PretrainedConfig`] for more information.
  28. Args:
  29. vocab_size (`int`, *optional*, defaults to 32):
  30. Vocabulary size of the Data2VecAudio model. Defines the number of different tokens that can be represented
  31. by the `inputs_ids` passed when calling [`Data2VecAudioModel`] or [`TFData2VecAudioModel`]. Vocabulary size
  32. of the model. Defines the different tokens that can be represented by the *inputs_ids* passed to the
  33. forward method of [`Data2VecAudioModel`].
  34. hidden_size (`int`, *optional*, defaults to 768):
  35. Dimensionality of the encoder layers and the pooler layer.
  36. num_hidden_layers (`int`, *optional*, defaults to 12):
  37. Number of hidden layers in the Transformer encoder.
  38. num_attention_heads (`int`, *optional*, defaults to 12):
  39. Number of attention heads for each attention layer in the Transformer encoder.
  40. intermediate_size (`int`, *optional*, defaults to 3072):
  41. Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
  42. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
  43. The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
  44. `"relu"`, `"selu"` and `"gelu_new"` are supported.
  45. hidden_dropout (`float`, *optional*, defaults to 0.1):
  46. The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
  47. activation_dropout (`float`, *optional*, defaults to 0.1):
  48. The dropout ratio for activations inside the fully connected layer.
  49. attention_dropout (`float`, *optional*, defaults to 0.1):
  50. The dropout ratio for the attention probabilities.
  51. final_dropout (`float`, *optional*, defaults to 0.1):
  52. The dropout probability for the final projection layer of [`Data2VecAudioForCTC`].
  53. layerdrop (`float`, *optional*, defaults to 0.1):
  54. The LayerDrop probability. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more
  55. details.
  56. initializer_range (`float`, *optional*, defaults to 0.02):
  57. The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
  58. layer_norm_eps (`float`, *optional*, defaults to 1e-12):
  59. The epsilon used by the layer normalization layers.
  60. feat_proj_dropout (`float`, *optional*, defaults to 0.0):
  61. The dropout probability for output of the feature encoder.
  62. feat_extract_activation (`str, `optional`, defaults to `"gelu"`):
  63. The non-linear activation function (function or string) in the 1D convolutional layers of the feature
  64. extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported.
  65. conv_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`):
  66. A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the
  67. feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers.
  68. conv_stride (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`):
  69. A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length
  70. of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*.
  71. conv_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 3, 3)`):
  72. A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The
  73. length of *conv_kernel* defines the number of convolutional layers and has to match the length of
  74. *conv_dim*.
  75. conv_bias (`bool`, *optional*, defaults to `False`):
  76. Whether the 1D convolutional layers have a bias.
  77. num_conv_pos_embeddings (`int`, *optional*, defaults to 128):
  78. Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional
  79. embeddings layer.
  80. num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16):
  81. Number of groups of 1D convolutional positional embeddings layer.
  82. mask_time_prob (`float`, *optional*, defaults to 0.05):
  83. Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking
  84. procecure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If
  85. reasoning from the propability of each feature vector to be chosen as the start of the vector span to be
  86. masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the
  87. mask_time_length (`int`, *optional*, defaults to 10):
  88. Length of vector span along the time axis.
  89. mask_time_min_masks (`int`, *optional*, defaults to 2),:
  90. The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step,
  91. irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length <
  92. mask_time_min_masks''
  93. mask_feature_prob (`float`, *optional*, defaults to 0.0):
  94. Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The
  95. masking procecure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over
  96. the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector
  97. span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap
  98. may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is
  99. True`.
  100. mask_feature_length (`int`, *optional*, defaults to 10):
  101. Length of vector span along the feature axis.
  102. mask_feature_min_masks (`int`, *optional*, defaults to 0),:
  103. The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time
  104. step, irrespectively of `mask_feature_prob`. Only relevant if
  105. ''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks''
  106. ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`):
  107. Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an
  108. instance of [`Data2VecAudioForCTC`].
  109. ctc_zero_infinity (`bool`, *optional*, defaults to `False`):
  110. Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly
  111. occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance
  112. of [`Data2VecAudioForCTC`].
  113. use_weighted_layer_sum (`bool`, *optional*, defaults to `False`):
  114. Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an
  115. instance of [`Data2VecAudioForSequenceClassification`].
  116. classifier_proj_size (`int`, *optional*, defaults to 256):
  117. Dimensionality of the projection before token mean-pooling for classification.
  118. tdnn_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(512, 512, 512, 512, 1500)`):
  119. A tuple of integers defining the number of output channels of each 1D convolutional layer in the *TDNN*
  120. module of the *XVector* model. The length of *tdnn_dim* defines the number of *TDNN* layers.
  121. tdnn_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 3, 3, 1, 1)`):
  122. A tuple of integers defining the kernel size of each 1D convolutional layer in the *TDNN* module of the
  123. *XVector* model. The length of *tdnn_kernel* has to match the length of *tdnn_dim*.
  124. tdnn_dilation (`Tuple[int]` or `List[int]`, *optional*, defaults to `(1, 2, 3, 1, 1)`):
  125. A tuple of integers defining the dilation factor of each 1D convolutional layer in *TDNN* module of the
  126. *XVector* model. The length of *tdnn_dilation* has to match the length of *tdnn_dim*.
  127. xvector_output_dim (`int`, *optional*, defaults to 512):
  128. Dimensionality of the *XVector* embedding vectors.
  129. add_adapter (`bool`, *optional*, defaults to `False`):
  130. Whether a convolutional network should be stacked on top of the Data2VecAudio Encoder. Can be very useful
  131. for warm-starting Data2VecAudio for SpeechEncoderDecoder models.
  132. adapter_kernel_size (`int`, *optional*, defaults to 3):
  133. Kernel size of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
  134. adapter_stride (`int`, *optional*, defaults to 2):
  135. Stride of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
  136. num_adapter_layers (`int`, *optional*, defaults to 3):
  137. Number of convolutional layers that should be used in the adapter network. Only relevant if `add_adapter is
  138. True`.
  139. output_hidden_size (`int`, *optional*):
  140. Dimensionality of the encoder output layer. If not defined, this defaults to *hidden-size*. Only relevant
  141. if `add_adapter is True`.
  142. Example:
  143. ```python
  144. >>> from transformers import Data2VecAudioConfig, Data2VecAudioModel
  145. >>> # Initializing a Data2VecAudio facebook/data2vec-audio-base-960h style configuration
  146. >>> configuration = Data2VecAudioConfig()
  147. >>> # Initializing a model (with random weights) from the facebook/data2vec-audio-base-960h style configuration
  148. >>> model = Data2VecAudioModel(configuration)
  149. >>> # Accessing the model configuration
  150. >>> configuration = model.config
  151. ```"""
  152. model_type = "data2vec-audio"
  153. def __init__(
  154. self,
  155. vocab_size=32,
  156. hidden_size=768,
  157. num_hidden_layers=12,
  158. num_attention_heads=12,
  159. intermediate_size=3072,
  160. hidden_act="gelu",
  161. hidden_dropout=0.1,
  162. activation_dropout=0.1,
  163. attention_dropout=0.1,
  164. feat_proj_dropout=0.0,
  165. final_dropout=0.1,
  166. layerdrop=0.1,
  167. initializer_range=0.02,
  168. layer_norm_eps=1e-5,
  169. feat_extract_activation="gelu",
  170. conv_dim=(512, 512, 512, 512, 512, 512, 512),
  171. conv_stride=(5, 2, 2, 2, 2, 2, 2),
  172. conv_kernel=(10, 3, 3, 3, 3, 2, 2),
  173. conv_bias=False,
  174. num_conv_pos_embedding_groups=16,
  175. conv_pos_kernel_size=19,
  176. num_conv_pos_embeddings=5,
  177. mask_time_prob=0.05,
  178. mask_time_length=10,
  179. mask_time_min_masks=2,
  180. mask_feature_prob=0.0,
  181. mask_feature_length=10,
  182. mask_feature_min_masks=0,
  183. ctc_loss_reduction="sum",
  184. ctc_zero_infinity=False,
  185. use_weighted_layer_sum=False,
  186. classifier_proj_size=256,
  187. tdnn_dim=(512, 512, 512, 512, 1500),
  188. tdnn_kernel=(5, 3, 3, 1, 1),
  189. tdnn_dilation=(1, 2, 3, 1, 1),
  190. xvector_output_dim=512,
  191. pad_token_id=0,
  192. bos_token_id=1,
  193. eos_token_id=2,
  194. add_adapter=False,
  195. adapter_kernel_size=3,
  196. adapter_stride=2,
  197. num_adapter_layers=3,
  198. output_hidden_size=None,
  199. **kwargs,
  200. ):
  201. super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
  202. self.hidden_size = hidden_size
  203. self.feat_extract_activation = feat_extract_activation
  204. self.conv_dim = list(conv_dim)
  205. self.conv_stride = list(conv_stride)
  206. self.conv_kernel = list(conv_kernel)
  207. self.conv_bias = conv_bias
  208. self.num_conv_pos_embeddings = num_conv_pos_embeddings
  209. self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
  210. self.conv_pos_kernel_size = conv_pos_kernel_size
  211. self.num_feat_extract_layers = len(self.conv_dim)
  212. self.num_hidden_layers = num_hidden_layers
  213. self.intermediate_size = intermediate_size
  214. self.hidden_act = hidden_act
  215. self.num_attention_heads = num_attention_heads
  216. self.hidden_dropout = hidden_dropout
  217. self.attention_dropout = attention_dropout
  218. self.activation_dropout = activation_dropout
  219. self.feat_proj_dropout = feat_proj_dropout
  220. self.final_dropout = final_dropout
  221. self.layerdrop = layerdrop
  222. self.layer_norm_eps = layer_norm_eps
  223. self.initializer_range = initializer_range
  224. self.vocab_size = vocab_size
  225. self.use_weighted_layer_sum = use_weighted_layer_sum
  226. if (
  227. (len(self.conv_stride) != self.num_feat_extract_layers)
  228. or (len(self.conv_kernel) != self.num_feat_extract_layers)
  229. or (len(self.conv_dim) != self.num_feat_extract_layers)
  230. ):
  231. raise ValueError(
  232. "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
  233. " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
  234. f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
  235. f" `len(config.conv_kernel) = {len(self.conv_kernel)}`."
  236. )
  237. # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
  238. self.mask_time_prob = mask_time_prob
  239. self.mask_time_length = mask_time_length
  240. self.mask_time_min_masks = mask_time_min_masks
  241. self.mask_feature_prob = mask_feature_prob
  242. self.mask_feature_length = mask_feature_length
  243. self.mask_feature_min_masks = mask_feature_min_masks
  244. # ctc loss
  245. self.ctc_loss_reduction = ctc_loss_reduction
  246. self.ctc_zero_infinity = ctc_zero_infinity
  247. # adapter
  248. self.add_adapter = add_adapter
  249. self.adapter_kernel_size = adapter_kernel_size
  250. self.adapter_stride = adapter_stride
  251. self.num_adapter_layers = num_adapter_layers
  252. self.output_hidden_size = output_hidden_size or hidden_size
  253. # SequenceClassification-specific parameter. Feel free to ignore for other classes.
  254. self.classifier_proj_size = classifier_proj_size
  255. # XVector-specific parameters. Feel free to ignore for other classes.
  256. self.tdnn_dim = list(tdnn_dim)
  257. self.tdnn_kernel = list(tdnn_kernel)
  258. self.tdnn_dilation = list(tdnn_dilation)
  259. self.xvector_output_dim = xvector_output_dim
  260. @property
  261. def inputs_to_logits_ratio(self):
  262. return math.prod(self.conv_stride)