configuration_levit.py 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141
  1. # coding=utf-8
  2. # Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """LeViT model configuration"""
  16. from collections import OrderedDict
  17. from typing import Mapping
  18. from packaging import version
  19. from ...configuration_utils import PretrainedConfig
  20. from ...onnx import OnnxConfig
  21. from ...utils import logging
  22. logger = logging.get_logger(__name__)
  23. class LevitConfig(PretrainedConfig):
  24. r"""
  25. This is the configuration class to store the configuration of a [`LevitModel`]. It is used to instantiate a LeViT
  26. model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
  27. defaults will yield a similar configuration to that of the LeViT
  28. [facebook/levit-128S](https://huggingface.co/facebook/levit-128S) architecture.
  29. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
  30. documentation from [`PretrainedConfig`] for more information.
  31. Args:
  32. image_size (`int`, *optional*, defaults to 224):
  33. The size of the input image.
  34. num_channels (`int`, *optional*, defaults to 3):
  35. Number of channels in the input image.
  36. kernel_size (`int`, *optional*, defaults to 3):
  37. The kernel size for the initial convolution layers of patch embedding.
  38. stride (`int`, *optional*, defaults to 2):
  39. The stride size for the initial convolution layers of patch embedding.
  40. padding (`int`, *optional*, defaults to 1):
  41. The padding size for the initial convolution layers of patch embedding.
  42. patch_size (`int`, *optional*, defaults to 16):
  43. The patch size for embeddings.
  44. hidden_sizes (`List[int]`, *optional*, defaults to `[128, 256, 384]`):
  45. Dimension of each of the encoder blocks.
  46. num_attention_heads (`List[int]`, *optional*, defaults to `[4, 8, 12]`):
  47. Number of attention heads for each attention layer in each block of the Transformer encoder.
  48. depths (`List[int]`, *optional*, defaults to `[4, 4, 4]`):
  49. The number of layers in each encoder block.
  50. key_dim (`List[int]`, *optional*, defaults to `[16, 16, 16]`):
  51. The size of key in each of the encoder blocks.
  52. drop_path_rate (`int`, *optional*, defaults to 0):
  53. The dropout probability for stochastic depths, used in the blocks of the Transformer encoder.
  54. mlp_ratios (`List[int]`, *optional*, defaults to `[2, 2, 2]`):
  55. Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the
  56. encoder blocks.
  57. attention_ratios (`List[int]`, *optional*, defaults to `[2, 2, 2]`):
  58. Ratio of the size of the output dimension compared to input dimension of attention layers.
  59. initializer_range (`float`, *optional*, defaults to 0.02):
  60. The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
  61. Example:
  62. ```python
  63. >>> from transformers import LevitConfig, LevitModel
  64. >>> # Initializing a LeViT levit-128S style configuration
  65. >>> configuration = LevitConfig()
  66. >>> # Initializing a model (with random weights) from the levit-128S style configuration
  67. >>> model = LevitModel(configuration)
  68. >>> # Accessing the model configuration
  69. >>> configuration = model.config
  70. ```"""
  71. model_type = "levit"
  72. def __init__(
  73. self,
  74. image_size=224,
  75. num_channels=3,
  76. kernel_size=3,
  77. stride=2,
  78. padding=1,
  79. patch_size=16,
  80. hidden_sizes=[128, 256, 384],
  81. num_attention_heads=[4, 8, 12],
  82. depths=[4, 4, 4],
  83. key_dim=[16, 16, 16],
  84. drop_path_rate=0,
  85. mlp_ratio=[2, 2, 2],
  86. attention_ratio=[2, 2, 2],
  87. initializer_range=0.02,
  88. **kwargs,
  89. ):
  90. super().__init__(**kwargs)
  91. self.image_size = image_size
  92. self.num_channels = num_channels
  93. self.kernel_size = kernel_size
  94. self.stride = stride
  95. self.padding = padding
  96. self.hidden_sizes = hidden_sizes
  97. self.num_attention_heads = num_attention_heads
  98. self.depths = depths
  99. self.key_dim = key_dim
  100. self.drop_path_rate = drop_path_rate
  101. self.patch_size = patch_size
  102. self.attention_ratio = attention_ratio
  103. self.mlp_ratio = mlp_ratio
  104. self.initializer_range = initializer_range
  105. self.down_ops = [
  106. ["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
  107. ["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
  108. ]
  109. # Copied from transformers.models.vit.configuration_vit.ViTOnnxConfig
  110. class LevitOnnxConfig(OnnxConfig):
  111. torch_onnx_minimum_version = version.parse("1.11")
  112. @property
  113. def inputs(self) -> Mapping[str, Mapping[int, str]]:
  114. return OrderedDict(
  115. [
  116. ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
  117. ]
  118. )
  119. @property
  120. def atol_for_validation(self) -> float:
  121. return 1e-4