configuration_perceiver.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. # coding=utf-8
  2. # Copyright Deepmind and The HuggingFace Inc. team. All rights reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """Perceiver model configuration"""
  16. from collections import OrderedDict
  17. from typing import Any, Mapping, Optional, Union
  18. from ...configuration_utils import PretrainedConfig
  19. from ...feature_extraction_utils import FeatureExtractionMixin
  20. from ...onnx import OnnxConfig
  21. from ...onnx.utils import compute_effective_axis_dimension
  22. from ...tokenization_utils_base import PreTrainedTokenizerBase
  23. from ...utils import TensorType, logging
  24. logger = logging.get_logger(__name__)
  25. class PerceiverConfig(PretrainedConfig):
  26. r"""
  27. This is the configuration class to store the configuration of a [`PerceiverModel`]. It is used to instantiate an
  28. Perceiver model according to the specified arguments, defining the model architecture. Instantiating a
  29. configuration with the defaults will yield a similar configuration to that of the Perceiver
  30. [deepmind/language-perceiver](https://huggingface.co/deepmind/language-perceiver) architecture.
  31. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
  32. documentation from [`PretrainedConfig`] for more information.
  33. Args:
  34. num_latents (`int`, *optional*, defaults to 256):
  35. The number of latents.
  36. d_latents (`int`, *optional*, defaults to 1280):
  37. Dimension of the latent embeddings.
  38. d_model (`int`, *optional*, defaults to 768):
  39. Dimension of the inputs. Should only be provided in case [*PerceiverTextPreprocessor*] is used or no
  40. preprocessor is provided.
  41. num_blocks (`int`, *optional*, defaults to 1):
  42. Number of blocks in the Transformer encoder.
  43. num_self_attends_per_block (`int`, *optional*, defaults to 26):
  44. The number of self-attention layers per block.
  45. num_self_attention_heads (`int`, *optional*, defaults to 8):
  46. Number of attention heads for each self-attention layer in the Transformer encoder.
  47. num_cross_attention_heads (`int`, *optional*, defaults to 8):
  48. Number of attention heads for each cross-attention layer in the Transformer encoder.
  49. qk_channels (`int`, *optional*):
  50. Dimension to project the queries + keys before applying attention in the cross-attention and self-attention
  51. layers of the encoder. Will default to preserving the dimension of the queries if not specified.
  52. v_channels (`int`, *optional*):
  53. Dimension to project the values before applying attention in the cross-attention and self-attention layers
  54. of the encoder. Will default to preserving the dimension of the queries if not specified.
  55. cross_attention_shape_for_attention (`str`, *optional*, defaults to `"kv"`):
  56. Dimension to use when downsampling the queries and keys in the cross-attention layer of the encoder.
  57. self_attention_widening_factor (`int`, *optional*, defaults to 1):
  58. Dimension of the feed-forward layer in the cross-attention layer of the Transformer encoder.
  59. cross_attention_widening_factor (`int`, *optional*, defaults to 1):
  60. Dimension of the feed-forward layer in the self-attention layers of the Transformer encoder.
  61. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
  62. The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
  63. `"relu"`, `"selu"` and `"gelu_new"` are supported.
  64. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
  65. The dropout ratio for the attention probabilities.
  66. initializer_range (`float`, *optional*, defaults to 0.02):
  67. The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
  68. layer_norm_eps (`float`, *optional*, defaults to 1e-12):
  69. The epsilon used by the layer normalization layers.
  70. use_query_residual (`float`, *optional*, defaults to `True`):
  71. Whether to add a query residual in the cross-attention layer of the encoder.
  72. vocab_size (`int`, *optional*, defaults to 262):
  73. Vocabulary size for the masked language modeling model.
  74. max_position_embeddings (`int`, *optional*, defaults to 2048):
  75. The maximum sequence length that the masked language modeling model might ever be used with. Typically set
  76. this to something large just in case (e.g., 512 or 1024 or 2048).
  77. image_size (`int`, *optional*, defaults to 56):
  78. Size of the images after preprocessing, for [`PerceiverForImageClassificationLearned`].
  79. train_size (`List[int]`, *optional*, defaults to `[368, 496]`):
  80. Training size of the images for the optical flow model.
  81. num_frames (`int`, *optional*, defaults to 16):
  82. Number of video frames used for the multimodal autoencoding model.
  83. audio_samples_per_frame (`int`, *optional*, defaults to 1920):
  84. Number of audio samples per frame for the multimodal autoencoding model.
  85. samples_per_patch (`int`, *optional*, defaults to 16):
  86. Number of audio samples per patch when preprocessing the audio for the multimodal autoencoding model.
  87. output_shape (`List[int]`, *optional*, defaults to `[1, 16, 224, 224]`):
  88. Shape of the output (batch_size, num_frames, height, width) for the video decoder queries of the multimodal
  89. autoencoding model. This excludes the channel dimension.
  90. output_num_channels (`int`, *optional*, defaults to 512):
  91. Number of output channels for each modalitiy decoder.
  92. Example:
  93. ```python
  94. >>> from transformers import PerceiverModel, PerceiverConfig
  95. >>> # Initializing a Perceiver deepmind/language-perceiver style configuration
  96. >>> configuration = PerceiverConfig()
  97. >>> # Initializing a model from the deepmind/language-perceiver style configuration
  98. >>> model = PerceiverModel(configuration)
  99. >>> # Accessing the model configuration
  100. >>> configuration = model.config
  101. ```"""
  102. model_type = "perceiver"
  103. def __init__(
  104. self,
  105. num_latents=256,
  106. d_latents=1280,
  107. d_model=768,
  108. num_blocks=1,
  109. num_self_attends_per_block=26,
  110. num_self_attention_heads=8,
  111. num_cross_attention_heads=8,
  112. qk_channels=None,
  113. v_channels=None,
  114. cross_attention_shape_for_attention="kv",
  115. self_attention_widening_factor=1,
  116. cross_attention_widening_factor=1,
  117. hidden_act="gelu",
  118. attention_probs_dropout_prob=0.1,
  119. initializer_range=0.02,
  120. layer_norm_eps=1e-12,
  121. use_query_residual=True,
  122. vocab_size=262,
  123. max_position_embeddings=2048,
  124. image_size=56,
  125. train_size=[368, 496],
  126. num_frames=16,
  127. audio_samples_per_frame=1920,
  128. samples_per_patch=16,
  129. output_shape=[1, 16, 224, 224],
  130. output_num_channels=512,
  131. _label_trainable_num_channels=1024,
  132. **kwargs,
  133. ):
  134. super().__init__(**kwargs)
  135. self.num_latents = num_latents
  136. self.d_latents = d_latents
  137. self.d_model = d_model
  138. self.num_blocks = num_blocks
  139. self.num_self_attends_per_block = num_self_attends_per_block
  140. self.num_self_attention_heads = num_self_attention_heads
  141. self.num_cross_attention_heads = num_cross_attention_heads
  142. self.qk_channels = qk_channels
  143. self.v_channels = v_channels
  144. self.cross_attention_shape_for_attention = cross_attention_shape_for_attention
  145. self.self_attention_widening_factor = self_attention_widening_factor
  146. self.cross_attention_widening_factor = cross_attention_widening_factor
  147. self.hidden_act = hidden_act
  148. self.attention_probs_dropout_prob = attention_probs_dropout_prob
  149. self.initializer_range = initializer_range
  150. self.layer_norm_eps = layer_norm_eps
  151. self.use_query_residual = use_query_residual
  152. # masked language modeling attributes
  153. self.vocab_size = vocab_size
  154. self.max_position_embeddings = max_position_embeddings
  155. # image classification attributes
  156. self.image_size = image_size
  157. # flow attributes
  158. self.train_size = train_size
  159. # multimodal autoencoding attributes
  160. self.num_frames = num_frames
  161. self.audio_samples_per_frame = audio_samples_per_frame
  162. self.samples_per_patch = samples_per_patch
  163. self.output_shape = output_shape
  164. self.output_num_channels = output_num_channels
  165. self._label_trainable_num_channels = _label_trainable_num_channels
  166. class PerceiverOnnxConfig(OnnxConfig):
  167. @property
  168. def inputs(self) -> Mapping[str, Mapping[int, str]]:
  169. if self.task == "multiple-choice":
  170. dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
  171. else:
  172. dynamic_axis = {0: "batch", 1: "sequence"}
  173. return OrderedDict(
  174. [
  175. ("inputs", dynamic_axis),
  176. ("attention_mask", dynamic_axis),
  177. ]
  178. )
  179. @property
  180. def atol_for_validation(self) -> float:
  181. return 1e-4
  182. def generate_dummy_inputs(
  183. self,
  184. preprocessor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],
  185. batch_size: int = -1,
  186. seq_length: int = -1,
  187. num_choices: int = -1,
  188. is_pair: bool = False,
  189. framework: Optional[TensorType] = None,
  190. num_channels: int = 3,
  191. image_width: int = 40,
  192. image_height: int = 40,
  193. ) -> Mapping[str, Any]:
  194. # copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
  195. if isinstance(preprocessor, PreTrainedTokenizerBase):
  196. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
  197. batch_size = compute_effective_axis_dimension(
  198. batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0
  199. )
  200. # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
  201. token_to_add = preprocessor.num_special_tokens_to_add(is_pair)
  202. seq_length = compute_effective_axis_dimension(
  203. seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add
  204. )
  205. # Generate dummy inputs according to compute batch and sequence
  206. dummy_input = [" ".join(["a"]) * seq_length] * batch_size
  207. inputs = dict(preprocessor(dummy_input, return_tensors=framework))
  208. inputs["inputs"] = inputs.pop("input_ids")
  209. return inputs
  210. elif isinstance(preprocessor, FeatureExtractionMixin) and preprocessor.model_input_names[0] == "pixel_values":
  211. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
  212. batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
  213. dummy_input = self._generate_dummy_images(batch_size, num_channels, image_height, image_width)
  214. inputs = dict(preprocessor(images=dummy_input, return_tensors=framework))
  215. inputs["inputs"] = inputs.pop("pixel_values")
  216. return inputs
  217. else:
  218. raise ValueError(
  219. "Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor."
  220. )