configuration_idefics.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324
  1. # coding=utf-8
  2. # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
  3. #
  4. # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
  5. # and OPT implementations in this library. It has been modified from its
  6. # original forms to accommodate minor architectural differences compared
  7. # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
  8. #
  9. # Licensed under the Apache License, Version 2.0 (the "License");
  10. # you may not use this file except in compliance with the License.
  11. # You may obtain a copy of the License at
  12. #
  13. # http://www.apache.org/licenses/LICENSE-2.0
  14. #
  15. # Unless required by applicable law or agreed to in writing, software
  16. # distributed under the License is distributed on an "AS IS" BASIS,
  17. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  18. # See the License for the specific language governing permissions and
  19. # limitations under the License.
  20. """Idefics model configuration"""
  21. from ...configuration_utils import PretrainedConfig
  22. from ...utils import logging
  23. logger = logging.get_logger(__name__)
  24. class IdeficsVisionConfig(PretrainedConfig):
  25. r"""
  26. This is the configuration class to store the configuration of a [`IdeficsModel`]. It is used to instantiate an
  27. Idefics model according to the specified arguments, defining the model architecture. Instantiating a configuration
  28. with the defaults will yield a similar configuration to that of the Idefics-9B.
  29. e.g. [HuggingFaceM4/idefics-9b](https://huggingface.co/HuggingFaceM4/idefics-9b)
  30. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
  31. documentation from [`PretrainedConfig`] for more information.
  32. Args:
  33. hidden_size (`int`, *optional*, defaults to 768):
  34. Dimensionality of the encoder layers and the pooler layer. (elsewhere referred to as `hidden_size`)
  35. image_size (`int`, *optional*, defaults to 224):
  36. The size (resolution) of each image.
  37. intermediate_size (`int`, *optional*, defaults to 5120):
  38. Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
  39. patch_size (`int`, *optional*, defaults to 14):
  40. The size (resolution) of each patch.
  41. num_hidden_layers (`int`, *optional*, defaults to 32):
  42. Number of hidden layers in the Transformer encoder.
  43. num_attention_heads (`int`, *optional*, defaults to 16):
  44. Number of attention heads for each attention layer in the Transformer encoder.
  45. image_num_channels (`int`, *optional*, defaults to `3`):
  46. Number of image channels.
  47. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
  48. The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
  49. `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
  50. layer_norm_eps (`float`, *optional*, defaults to 1e-5):
  51. The epsilon used by the layer normalization layers.
  52. attention_dropout (`float`, *optional*, defaults to 0.0):
  53. The dropout ratio for the attention probabilities.
  54. initializer_range (`float`, *optional*, defaults to 0.02):
  55. The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
  56. initializer_factor (`float`, *optional*, defaults to 1.0):
  57. A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization
  58. testing).
  59. initializer_range (`float`, *optional*, defaults to 0.02):
  60. The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
  61. """
  62. model_type = "idefics"
  63. attribute_map = {
  64. "hidden_size": "embed_dim",
  65. }
  66. def __init__(
  67. self,
  68. embed_dim=768,
  69. image_size=224,
  70. intermediate_size=5120,
  71. patch_size=14,
  72. num_hidden_layers=32,
  73. num_attention_heads=16,
  74. num_channels=3,
  75. hidden_act="gelu",
  76. layer_norm_eps=1e-5,
  77. attention_dropout=0.0,
  78. initializer_range=0.02,
  79. initializer_factor=1.0,
  80. **kwargs,
  81. ):
  82. self.embed_dim = embed_dim
  83. self.image_size = image_size
  84. self.intermediate_size = intermediate_size
  85. self.patch_size = patch_size
  86. self.num_hidden_layers = num_hidden_layers
  87. self.num_attention_heads = num_attention_heads
  88. self.num_channels = num_channels
  89. self.layer_norm_eps = layer_norm_eps
  90. self.attention_dropout = attention_dropout
  91. self.initializer_range = initializer_range
  92. self.initializer_factor = initializer_factor
  93. self.hidden_act = hidden_act
  94. super().__init__(**kwargs)
  95. class IdeficsPerceiverConfig(PretrainedConfig):
  96. r"""
  97. This is the configuration class to store the configuration of a [`IdeficsModel`]. It is used to instantiate an
  98. Idefics model according to the specified arguments, defining the model architecture. Instantiating a configuration
  99. with the defaults will yield a similar configuration to that of the Idefics-9B.
  100. e.g. [HuggingFaceM4/idefics-9b](https://huggingface.co/HuggingFaceM4/idefics-9b)
  101. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
  102. documentation from [`PretrainedConfig`] for more information.
  103. Args:
  104. use_resampler (`bool`, *optional*, defaults to `False`):
  105. Whether or not to use the resampler
  106. resampler_n_latents (`int`, *optional*, defaults to ):
  107. Number of latent embeddings to resample ("compress") the input sequence to (usually < 128).
  108. resampler_depth (`int`, *optional*, defaults to 6):
  109. Depth of the Perceiver Resampler (Transformer w/ cross attention). Should be shallow (< 3).
  110. resampler_n_heads (`int`, *optional*, defaults to 16):
  111. Number of heads in each Transformer block (for multi-headed self-attention).
  112. resampler_head_dim (`int`, *optional*, defaults to 96):
  113. Dimensionality of each head projection in the Transformer block.
  114. qk_layer_norms_perceiver (`bool`, *optional*, defaults to `False`):
  115. Whether or not to use qk layer norms in perceiver
  116. """
  117. model_type = "idefics"
  118. def __init__(
  119. self,
  120. use_resampler=False,
  121. resampler_n_latents=64,
  122. resampler_depth=6,
  123. resampler_n_heads=16,
  124. resampler_head_dim=96,
  125. qk_layer_norms_perceiver=False,
  126. **kwargs,
  127. ):
  128. self.use_resampler = use_resampler
  129. self.resampler_n_latents = resampler_n_latents
  130. self.resampler_depth = resampler_depth
  131. self.resampler_n_heads = resampler_n_heads
  132. self.resampler_head_dim = resampler_head_dim
  133. self.qk_layer_norms_perceiver = qk_layer_norms_perceiver
  134. super().__init__(**kwargs)
  135. class IdeficsConfig(PretrainedConfig):
  136. r"""
  137. This is the configuration class to store the configuration of a [`IdeficsModel`]. It is used to instantiate an
  138. Idefics model according to the specified arguments, defining the model architecture. Instantiating a configuration
  139. with the defaults will yield a similar configuration to that of the Idefics-9B.
  140. e.g. [HuggingFaceM4/idefics-9b](https://huggingface.co/HuggingFaceM4/idefics-9b)
  141. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
  142. documentation from [`PretrainedConfig`] for more information.
  143. Args:
  144. additional_vocab_size (`int`, *optional*, defaults to 0):
  145. Additional vocabulary size of the model, typically for the special "<img>" token. Additional vocab tokens
  146. are always trainable whereas regular vocab tokens can be frozen or not.
  147. vocab_size (`int`, *optional*, defaults to 32000):
  148. Vocabulary size of the Idefics model. Defines the number of different tokens that can be represented by the
  149. `inputs_ids` passed when calling [`~IdeficsModel`]
  150. hidden_size (`int`, *optional*, defaults to 4096):
  151. Dimension of the hidden representations.
  152. intermediate_size (`int`, *optional*, defaults to 11008):
  153. Dimension of the MLP representations.
  154. num_hidden_layers (`int`, *optional*, defaults to 32):
  155. Number of hidden layers in the Transformer encoder.
  156. num_attention_heads (`int`, *optional*, defaults to 32):
  157. Number of attention heads for each attention layer in the Transformer encoder.
  158. dropout (`float`, *optional*, defaults to 0.0):
  159. The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
  160. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
  161. The non-linear activation function (function or string) in the decoder.
  162. initializer_range (`float`, *optional*, defaults to 0.02):
  163. The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
  164. alpha_initializer (`str`, *optional*, defaults to `"zeros"`):
  165. Initialization type for the alphas.
  166. alphas_initializer_range (`float`, *optional*, defaults to 0.0):
  167. The standard deviation of the truncated_normal_initializer for initializing the alphas in the Gated Cross
  168. Attention.
  169. alpha_type (`str`, *optional*, defaults to `"float"`):
  170. Whether the gating alphas should be vectors or single floats.
  171. rms_norm_eps (`float`, *optional*, defaults to 1e-6):
  172. The epsilon used by the rms normalization layers.
  173. use_cache (`bool`, *optional*, defaults to `True`):
  174. Whether or not the model should return the last key/values attentions (not used by all models). Only
  175. relevant if `config.is_decoder=True`.
  176. pad_token_id (`int`, *optional*, defaults to 0)
  177. Padding token id.
  178. bos_token_id (`int`, *optional*, defaults to 1)
  179. Beginning of stream token id.
  180. eos_token_id (`int`, *optional*, defaults to 2)
  181. End of stream token id.
  182. tie_word_embeddings(`bool`, *optional*, defaults to `False`):
  183. Whether to tie weight embeddings
  184. cross_layer_interval (`int`, *optional*, default to 1)
  185. Interval for cross attention (from text to image) layers.
  186. qk_layer_norms (`bool`, *optional*, defaults to `False`): Whether to add layer norm after q and k
  187. freeze_text_layers (`bool`, *optional*, defaults to `True`): Whether to freeze text layers
  188. freeze_text_module_exceptions (`bool`, *optional*, defaults to `[]`):
  189. Exceptions to freezing text layers when `freeze_text_layers` is `True`
  190. freeze_lm_head (`bool`, *optional*, defaults to `False`): Whether to freeze lm head
  191. freeze_vision_layers (`bool`, *optional*, defaults to `True`): Whether to freeze vision layers
  192. freeze_vision_module_exceptions (`bool`, *optional*, defaults to `[]`):
  193. Exceptions to freezing vision layers when `freeze_vision_layers` is `True`
  194. use_resampler (`bool`, *optional*, defaults to `False`): Whether to use the Resampler
  195. vision_config (`IdeficsVisionConfig`, *optional*): Custom vision config or dict
  196. perceiver_config (`IdeficsPerceiverConfig`, *optional*): Custom perceiver config or dict
  197. Example:
  198. ```python
  199. >>> from transformers import IdeficsModel, IdeficsConfig
  200. >>> # Initializing a Idefics idefics-9b style configuration
  201. >>> configuration = IdeficsConfig()
  202. >>> # Initializing a model from the idefics-9b style configuration
  203. >>> model = IdeficsModel(configuration)
  204. >>> # Accessing the model configuration
  205. >>> configuration = model.config
  206. ```"""
  207. model_type = "idefics"
  208. is_composition = False
  209. def __init__(
  210. self,
  211. vocab_size=32000,
  212. additional_vocab_size=0,
  213. hidden_size=4096,
  214. intermediate_size=11008,
  215. num_hidden_layers=32,
  216. num_attention_heads=32,
  217. dropout=0.0,
  218. hidden_act="silu",
  219. initializer_range=0.02,
  220. alpha_initializer="zeros",
  221. alphas_initializer_range=0.0,
  222. alpha_type="float",
  223. rms_norm_eps=1e-6,
  224. use_cache=True,
  225. pad_token_id=0,
  226. bos_token_id=1,
  227. eos_token_id=2,
  228. tie_word_embeddings=False,
  229. cross_layer_interval=1,
  230. qk_layer_norms=False,
  231. freeze_text_layers=True,
  232. freeze_text_module_exceptions=[],
  233. freeze_lm_head=False,
  234. freeze_vision_layers=True,
  235. freeze_vision_module_exceptions=[],
  236. use_resampler=False,
  237. vision_config=None,
  238. perceiver_config=None,
  239. **kwargs,
  240. ):
  241. self.vocab_size = vocab_size
  242. self.additional_vocab_size = additional_vocab_size
  243. self.hidden_size = hidden_size
  244. self.intermediate_size = intermediate_size
  245. self.num_hidden_layers = num_hidden_layers
  246. self.num_attention_heads = num_attention_heads
  247. self.dropout = dropout
  248. self.hidden_act = hidden_act
  249. self.initializer_range = initializer_range
  250. self.alpha_initializer = alpha_initializer
  251. self.alphas_initializer_range = alphas_initializer_range
  252. self.alpha_type = alpha_type
  253. self.rms_norm_eps = rms_norm_eps
  254. self.use_cache = use_cache
  255. self.cross_layer_interval = cross_layer_interval
  256. self.qk_layer_norms = qk_layer_norms
  257. self.freeze_vision_layers = freeze_vision_layers
  258. self.freeze_text_layers = freeze_text_layers
  259. self.freeze_text_module_exceptions = freeze_text_module_exceptions
  260. self.freeze_vision_module_exceptions = freeze_vision_module_exceptions
  261. self.freeze_lm_head = freeze_lm_head
  262. self.use_resampler = use_resampler
  263. if perceiver_config is None:
  264. self.perceiver_config = IdeficsPerceiverConfig()
  265. elif isinstance(perceiver_config, dict):
  266. self.perceiver_config = IdeficsPerceiverConfig(**perceiver_config)
  267. elif isinstance(perceiver_config, IdeficsPerceiverConfig):
  268. self.perceiver_config = perceiver_config
  269. if vision_config is None:
  270. self.vision_config = IdeficsVisionConfig()
  271. elif isinstance(vision_config, dict):
  272. self.vision_config = IdeficsVisionConfig(**vision_config)
  273. elif isinstance(vision_config, IdeficsVisionConfig):
  274. self.vision_config = vision_config
  275. super().__init__(
  276. pad_token_id=pad_token_id,
  277. bos_token_id=bos_token_id,
  278. eos_token_id=eos_token_id,
  279. tie_word_embeddings=tie_word_embeddings,
  280. **kwargs,
  281. )
  282. # IMPORTANT: Do not do any __init__ args-based checks in the constructor, since
  283. # PretrainedConfig.from_dict first instantiates the class with the config dict and only then
  284. # updates the config object with `kwargs` from from_pretrained, so during the instantiation
  285. # of this object many attributes have default values and haven't yet been overridden.
  286. # Do any required checks inside `from_pretrained` once the superclass' `from_pretrained` was run.