modeling_idefics.py 79 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750
  1. # coding=utf-8
  2. # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
  3. #
  4. # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
  5. # and OPT implementations in this library. It has been modified from its
  6. # original forms to accommodate minor architectural differences compared
  7. # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
  8. #
  9. # Licensed under the Apache License, Version 2.0 (the "License");
  10. # you may not use this file except in compliance with the License.
  11. # You may obtain a copy of the License at
  12. #
  13. # http://www.apache.org/licenses/LICENSE-2.0
  14. #
  15. # Unless required by applicable law or agreed to in writing, software
  16. # distributed under the License is distributed on an "AS IS" BASIS,
  17. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  18. # See the License for the specific language governing permissions and
  19. # limitations under the License.
  20. """PyTorch Idefics model."""
  21. from dataclasses import dataclass
  22. from typing import Any, Dict, List, Optional, Tuple, Union
  23. import torch
  24. import torch.nn.functional as F
  25. import torch.utils.checkpoint
  26. from torch import nn
  27. from torch.nn import CrossEntropyLoss
  28. from ...activations import ACT2FN
  29. from ...cache_utils import Cache, DynamicCache, StaticCache
  30. from ...generation import GenerationMixin
  31. from ...modeling_attn_mask_utils import AttentionMaskConverter
  32. from ...modeling_outputs import ModelOutput
  33. from ...modeling_utils import PretrainedConfig, PreTrainedModel
  34. from ...pytorch_utils import ALL_LAYERNORM_LAYERS
  35. from ...utils import (
  36. add_start_docstrings,
  37. add_start_docstrings_to_model_forward,
  38. logging,
  39. replace_return_docstrings,
  40. )
  41. from .configuration_idefics import IdeficsConfig
  42. from .perceiver import IdeficsPerceiverResampler
  43. from .vision import IdeficsVisionTransformer
  44. logger = logging.get_logger(__name__)
  45. _CONFIG_FOR_DOC = "IdeficsConfig"
  46. @dataclass
  47. class IdeficsBaseModelOutputWithPast(ModelOutput):
  48. """
  49. Base class for Idefics model's outputs that may also contain a past key/values (to speed up sequential decoding).
  50. Args:
  51. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
  52. Sequence of hidden-states at the output of the last layer of the model.
  53. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
  54. hidden_size)` is output.
  55. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
  56. Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
  57. `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
  58. `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,
  59. encoder_sequence_length, embed_size_per_head)`.
  60. Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
  61. `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
  62. input) to speed up sequential decoding.
  63. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
  64. Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
  65. one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
  66. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
  67. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
  68. Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
  69. sequence_length)`.
  70. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
  71. heads.
  72. image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
  73. Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
  74. sequence_length, hidden_size)`.
  75. image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver
  76. """
  77. last_hidden_state: torch.FloatTensor = None
  78. past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
  79. hidden_states: Optional[Tuple[torch.FloatTensor]] = None
  80. attentions: Optional[Tuple[torch.FloatTensor]] = None
  81. image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
  82. @dataclass
  83. class IdeficsCausalLMOutputWithPast(ModelOutput):
  84. """
  85. Base class for Idefics causal language model (or autoregressive) outputs.
  86. Args:
  87. loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
  88. Language modeling loss (for next-token prediction).
  89. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
  90. Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
  91. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
  92. Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
  93. `(batch_size, num_heads, sequence_length, embed_size_per_head)`)
  94. Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
  95. `past_key_values` input) to speed up sequential decoding.
  96. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
  97. Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
  98. one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
  99. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
  100. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
  101. Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
  102. sequence_length)`.
  103. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
  104. heads.
  105. image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
  106. Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
  107. sequence_length, hidden_size)`.
  108. image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver
  109. """
  110. loss: Optional[torch.FloatTensor] = None
  111. logits: torch.FloatTensor = None
  112. past_key_values: Optional[List[torch.FloatTensor]] = None
  113. hidden_states: Optional[Tuple[torch.FloatTensor]] = None
  114. attentions: Optional[Tuple[torch.FloatTensor]] = None
  115. image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
  116. def expand_inputs_for_generation(
  117. input_ids,
  118. expand_size=1,
  119. is_encoder_decoder=False,
  120. attention_mask=None,
  121. encoder_outputs=None,
  122. **model_kwargs,
  123. ):
  124. expanded_return_idx = (
  125. torch.arange(input_ids.shape[0]).view(-1, 1).repeat(1, expand_size).view(-1).to(input_ids.device)
  126. )
  127. input_ids = input_ids.index_select(0, expanded_return_idx)
  128. model_kwargs["pixel_values"] = model_kwargs.get("pixel_values", None)
  129. model_kwargs["image_encoder_embeddings"] = model_kwargs.get("image_encoder_embeddings", None)
  130. model_kwargs["perceiver_embeddings"] = model_kwargs.get("perceiver_embeddings", None)
  131. model_kwargs["image_attention_mask"] = model_kwargs.get("image_attention_mask", None)
  132. if "token_type_ids" in model_kwargs:
  133. token_type_ids = model_kwargs["token_type_ids"]
  134. model_kwargs["token_type_ids"] = token_type_ids.index_select(0, expanded_return_idx)
  135. if attention_mask is not None:
  136. model_kwargs["attention_mask"] = attention_mask.index_select(0, expanded_return_idx)
  137. if model_kwargs["image_attention_mask"] is not None:
  138. model_kwargs["image_attention_mask"] = model_kwargs["image_attention_mask"].index_select(
  139. 0, expanded_return_idx
  140. )
  141. if model_kwargs["pixel_values"] is not None:
  142. model_kwargs["pixel_values"] = model_kwargs["pixel_values"].index_select(0, expanded_return_idx)
  143. elif model_kwargs["image_encoder_embeddings"] is not None:
  144. model_kwargs["image_encoder_embeddings"] = model_kwargs["image_encoder_embeddings"].index_select(
  145. 0, expanded_return_idx
  146. )
  147. elif model_kwargs["perceiver_embeddings"] is not None:
  148. model_kwargs["perceiver_embeddings"] = model_kwargs["perceiver_embeddings"].index_select(
  149. 0, expanded_return_idx
  150. )
  151. return input_ids, model_kwargs
  152. def freeze_model(model, module_exceptions=[]):
  153. mapping = {
  154. "LayerNorm": nn.LayerNorm,
  155. "Linear": nn.Linear,
  156. "Embedding": nn.Embedding,
  157. }
  158. module_exceptions_mapped = [mapping[m] for m in module_exceptions]
  159. for module in model.modules():
  160. if module_exceptions and any(isinstance(module, t) for t in module_exceptions_mapped):
  161. module.requires_grad_(True) # Explicitely setting it to true to avoid any mistakes
  162. else:
  163. module.requires_grad_(False)
  164. return model
  165. class IdeficsDecoupledEmbedding(nn.Embedding):
  166. # Derived from https://pytorch.org/docs/stable/_modules/torch/nn/modules/sparse.html#Embedding
  167. """
  168. Implements a decoupling of parameters to allow freezing (or not) a subset of the embeddings. In practise, the
  169. regular `weight` can be trained or frozen (i.e. `partially_freeze=True`), and if `num_additional_embeddings` > 0,
  170. then it will create `num_additional_embeddings` additional parameters that are always trained. If
  171. `num_additional_embeddings=0`, then the module defaults back to the regular behavior of `nn.Embedding`.
  172. """
  173. def __init__(
  174. self,
  175. num_embeddings,
  176. num_additional_embeddings,
  177. embedding_dim,
  178. partially_freeze: Optional[bool] = False,
  179. device=None,
  180. dtype=None,
  181. padding_idx=None,
  182. **kwargs,
  183. ) -> None:
  184. """
  185. Args:
  186. num_embeddings (`int`):
  187. Size of the dictionary of embeddings
  188. num_additional_embeddings (`int`):
  189. Number of additional embeddings. Only useful when you `partially_freeze=True`.
  190. embedding_dim (`int`):
  191. The size of each embedding vector
  192. partially_freeze: (`bool`, *optional*, defaults to `False`):
  193. If `True`, the regular `weight` will be frozen. `additional_weight` is never frozen.
  194. padding_idx (`int`, *optional*):
  195. The padding index (needs to be less than num_embeddings)
  196. Note: there are a lot of other parameters to initialize a standard `nn.Embedding` such as `padding_idx`,
  197. `max_norm` or `norm_type`. We are not supporting these.
  198. """
  199. if padding_idx is not None and padding_idx > num_embeddings:
  200. raise ValueError(f"padding_idx must be within num_embeddings. Got {padding_idx} and {num_embeddings}")
  201. super().__init__(
  202. num_embeddings=num_embeddings,
  203. embedding_dim=embedding_dim,
  204. device=device,
  205. dtype=dtype,
  206. padding_idx=padding_idx,
  207. **kwargs,
  208. )
  209. self.num_embeddings = num_embeddings
  210. self.padding_idx = padding_idx
  211. self.num_additional_embeddings = num_additional_embeddings
  212. self.partially_freeze = partially_freeze
  213. if partially_freeze:
  214. self.weight.requires_grad_(False)
  215. if self.num_additional_embeddings > 0:
  216. self.additional_embedding = nn.Embedding(
  217. num_embeddings=self.num_additional_embeddings,
  218. embedding_dim=embedding_dim,
  219. device=device,
  220. dtype=dtype,
  221. )
  222. def forward(self, input_ids):
  223. """
  224. we have 2 embeddings, with different indices - one pretrained self.weight and another
  225. self.additional_embedding.weight that is being trained.
  226. in order to make a lookup of the input ids, we:
  227. 1. find out the indices of the entries belonging to the 2nd embedding
  228. 2. extract those values while subtracting the size of the first embedding (num_embeddings), since the 2nd
  229. embedding starts from 0 and not num_embeddings
  230. 3. perform the 2nd embedding lookup
  231. 4. now we handle the 1st embedding, we overwrite indices belonging to the 2nd embedding with a padding index
  232. 5. perform the 1st embedding lookup
  233. 6. now we overwrite the values in the 1st embedding lookup with the values of the 2nd embedding lookup
  234. note: for the 1st embedding lookup we could have looked up only the low indices and not do the padding, but
  235. then we have to create a new tensor and populate it with 2 tensors that are spread out across various indices -
  236. i.e. not a simple concat - I haven't benchmarked the complex case if it's any faster, given that seqlens are
  237. usually relatively short it's probably not faster or if faster not by much - but might be a good idea to
  238. measure.
  239. """
  240. if self.num_additional_embeddings == 0:
  241. return F.embedding(input_ids, self.weight)
  242. # Clone so that we don't modify the original input_ids later on
  243. input_ids = input_ids.clone()
  244. additional_vocab_indices = torch.where(input_ids >= self.num_embeddings)
  245. input_ids_additional_vocab = input_ids[additional_vocab_indices]
  246. additional_embeddings = self.additional_embedding(input_ids_additional_vocab - self.num_embeddings)
  247. # for successful lookup replace input_ids with 0, the results of these will be discarded anyway
  248. input_ids[additional_vocab_indices] = 0
  249. full_vector = F.embedding(input_ids, self.weight)
  250. # overwrite the records with high indices
  251. full_vector[additional_vocab_indices] = additional_embeddings
  252. return full_vector
  253. def extra_repr(self) -> str:
  254. return "num_embeddings={}, num_additional_embeddings={}, embedding_dim={}, partially_freeze={}".format(
  255. self.num_embeddings,
  256. self.num_additional_embeddings,
  257. self.embedding_dim,
  258. self.partially_freeze,
  259. )
  260. class IdeficsDecoupledLinear(nn.Linear):
  261. # Derived from https://pytorch.org/docs/stable/_modules/torch/nn/modules/linear.html#Linear
  262. """
  263. Implements a decoupling of parameters to allow freezing (or not) a subset of the parameters. In practise, the
  264. regular `weight` can be trained or frozen (i.e. `partially_freeze=True`), and if `out_additional_features` > 0,
  265. then it will create `out_additional_features * in_features` additional parameters that are always trained. If
  266. `out_additional_features=0`, then the module defaults back to the regular behavior of `nn.Linear`.
  267. """
  268. def __init__(
  269. self,
  270. in_features: int,
  271. out_features: int,
  272. out_additional_features: int = 0,
  273. bias: bool = True,
  274. partially_freeze: bool = True,
  275. device=None,
  276. dtype=None,
  277. ) -> None:
  278. """
  279. out_additional_features: int. Number of additional trainable dimensions. Only makes sense when
  280. `partially_freeze=True`. partially_freeze: bool. If True, the regular `weight` will be frozen and extra
  281. parameters (if any) will be trainable. If False, default to the regular behavior of nn.Linear.
  282. """
  283. super().__init__(in_features, out_features, bias, device, dtype)
  284. self.out_additional_features = out_additional_features
  285. self.partially_freeze = partially_freeze
  286. self.in_features = in_features
  287. self.out_features = out_features
  288. if partially_freeze:
  289. self.weight.requires_grad_(False)
  290. if bias:
  291. self.bias.requires_grad_(False)
  292. if out_additional_features > 0:
  293. self.additional_fc = nn.Linear(
  294. in_features=in_features,
  295. out_features=out_additional_features,
  296. bias=bias,
  297. device=device,
  298. dtype=dtype,
  299. )
  300. def forward(self, input: torch.Tensor) -> torch.Tensor:
  301. output = F.linear(input, self.weight, self.bias)
  302. if self.out_additional_features > 0:
  303. additional_features = self.additional_fc(input)
  304. output = torch.cat((output, additional_features), -1)
  305. return output
  306. def extra_repr(self) -> str:
  307. """Overwriting `nn.Linear.extra_repr` to include new parameters."""
  308. return "in_features={}, out_features={}, out_additional_features={}, bias={}, partially_freeze={}".format(
  309. self.in_features,
  310. self.out_features,
  311. self.out_additional_features,
  312. self.bias is not None,
  313. self.partially_freeze,
  314. )
  315. # this was adapted from LlamaRMSNorm
  316. class IdeficsRMSNorm(nn.Module):
  317. def __init__(self, hidden_size, eps=1e-6):
  318. """
  319. IdeficsRMSNorm is equivalent to T5LayerNorm
  320. """
  321. super().__init__()
  322. self.weight = nn.Parameter(torch.ones(hidden_size))
  323. self.variance_epsilon = eps
  324. def forward(self, hidden_states):
  325. variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
  326. hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
  327. # convert into half-precision if necessary
  328. if self.weight.dtype in [torch.float16, torch.bfloat16]:
  329. hidden_states = hidden_states.to(self.weight.dtype)
  330. return self.weight * hidden_states
  331. def extra_repr(self):
  332. return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
  333. ALL_LAYERNORM_LAYERS.append(IdeficsRMSNorm)
  334. # this was adapted from LlamaRotaryEmbedding
  335. class IdeficsEmbedding(torch.nn.Module):
  336. def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
  337. super().__init__()
  338. self.dim = dim
  339. self.max_position_embeddings = max_position_embeddings
  340. self.base = base
  341. inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
  342. self.register_buffer("inv_freq", inv_freq, persistent=False)
  343. # Build here to make `torch.jit.trace` work.
  344. self._set_cos_sin_cache(
  345. seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
  346. )
  347. def _set_cos_sin_cache(self, seq_len, device, dtype):
  348. self.max_seq_len_cached = seq_len
  349. t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
  350. freqs = torch.einsum("i,j->ij", t, self.inv_freq)
  351. # Different from paper, but it uses a different permutation in order to obtain the same calculation
  352. emb = torch.cat((freqs, freqs), dim=-1)
  353. self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
  354. self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
  355. def forward(self, x, seq_len=None):
  356. # x: [bs, num_attention_heads, seq_len, head_size]
  357. if seq_len > self.max_seq_len_cached:
  358. self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
  359. return (
  360. self.cos_cached[:seq_len].to(dtype=x.dtype),
  361. self.sin_cached[:seq_len].to(dtype=x.dtype),
  362. )
  363. def rotate_half(x):
  364. """Rotates half the hidden dims of the input."""
  365. x1 = x[..., : x.shape[-1] // 2]
  366. x2 = x[..., x.shape[-1] // 2 :]
  367. return torch.cat((-x2, x1), dim=-1)
  368. # Copied from transformers.models.mixtral.modeling_mixtral.apply_rotary_pos_emb
  369. def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
  370. """Applies Rotary Position Embedding to the query and key tensors.
  371. Args:
  372. q (`torch.Tensor`): The query tensor.
  373. k (`torch.Tensor`): The key tensor.
  374. cos (`torch.Tensor`): The cosine part of the rotary embedding.
  375. sin (`torch.Tensor`): The sine part of the rotary embedding.
  376. position_ids (`torch.Tensor`):
  377. The position indices of the tokens corresponding to the query and key tensors. For example, this can be
  378. used to pass offsetted position ids when working with a KV-cache.
  379. unsqueeze_dim (`int`, *optional*, defaults to 1):
  380. The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
  381. sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
  382. that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
  383. k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
  384. cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
  385. the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
  386. Returns:
  387. `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
  388. """
  389. cos = cos[position_ids].unsqueeze(unsqueeze_dim)
  390. sin = sin[position_ids].unsqueeze(unsqueeze_dim)
  391. q_embed = (q * cos) + (rotate_half(q) * sin)
  392. k_embed = (k * cos) + (rotate_half(k) * sin)
  393. return q_embed, k_embed
  394. # this was adapted from LlamaMLP
  395. class IdeficsMLP(nn.Module):
  396. def __init__(
  397. self,
  398. hidden_size: int,
  399. intermediate_size: int,
  400. hidden_act: str,
  401. ):
  402. super().__init__()
  403. self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
  404. self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False)
  405. self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
  406. self.act_fn = ACT2FN[hidden_act]
  407. def forward(self, x):
  408. return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
  409. # this was adapted from LlamaAttention
  410. class IdeficsAttention(nn.Module):
  411. """Multi-headed attention from 'Attention Is All You Need' paper"""
  412. def __init__(
  413. self,
  414. hidden_size: int,
  415. num_heads: int,
  416. dropout: float = 0.0,
  417. is_cross_attention: bool = False,
  418. config: PretrainedConfig = None,
  419. qk_layer_norms: bool = False,
  420. layer_idx: int = None,
  421. ):
  422. super().__init__()
  423. self.hidden_size = hidden_size
  424. self.num_heads = num_heads
  425. self.head_dim = hidden_size // num_heads
  426. self.dropout = dropout
  427. self.is_causal = True
  428. self.layer_idx = layer_idx
  429. if layer_idx is None:
  430. logger.warning_once(
  431. f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
  432. "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
  433. "when creating this class."
  434. )
  435. if (self.head_dim * num_heads) != self.hidden_size:
  436. raise ValueError(
  437. f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
  438. f" and `num_heads`: {num_heads})."
  439. )
  440. self.is_cross_attention = is_cross_attention
  441. if not hasattr(nn.functional, "scaled_dot_product_attention"):
  442. raise ValueError("this model requires pytorch 2.0 or higher")
  443. if self.is_cross_attention:
  444. kv_input_dim = (
  445. self.hidden_size if not hasattr(config.vision_config, "embed_dim") else config.vision_config.embed_dim
  446. )
  447. self.q_proj = nn.Linear(
  448. self.hidden_size,
  449. num_heads * self.head_dim,
  450. bias=False,
  451. )
  452. self.k_proj = nn.Linear(kv_input_dim, num_heads * self.head_dim, bias=False)
  453. self.v_proj = nn.Linear(
  454. kv_input_dim,
  455. num_heads * self.head_dim,
  456. bias=False,
  457. )
  458. else:
  459. self.q_proj = nn.Linear(
  460. self.hidden_size,
  461. num_heads * self.head_dim,
  462. bias=False,
  463. )
  464. self.k_proj = nn.Linear(
  465. self.hidden_size,
  466. num_heads * self.head_dim,
  467. bias=False,
  468. )
  469. self.v_proj = nn.Linear(
  470. self.hidden_size,
  471. num_heads * self.head_dim,
  472. bias=False,
  473. )
  474. self.o_proj = nn.Linear(
  475. num_heads * self.head_dim,
  476. hidden_size,
  477. bias=False,
  478. )
  479. self.rotary_emb = IdeficsEmbedding(self.head_dim)
  480. self.qk_layer_norms = qk_layer_norms
  481. if self.qk_layer_norms:
  482. self.q_layer_norm = IdeficsRMSNorm(self.head_dim, eps=config.rms_norm_eps)
  483. self.k_layer_norm = IdeficsRMSNorm(self.head_dim, eps=config.rms_norm_eps)
  484. def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
  485. return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
  486. def forward(
  487. self,
  488. hidden_states: torch.Tensor,
  489. key_value_states: Optional[torch.Tensor] = None,
  490. attention_mask: Optional[torch.Tensor] = None,
  491. position_ids: Optional[torch.LongTensor] = None,
  492. past_key_value: Optional[Tuple[torch.Tensor]] = None,
  493. output_attentions: bool = False,
  494. use_cache: bool = False,
  495. cache_position: Optional[torch.LongTensor] = None,
  496. ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
  497. # if key_value_states are provided this layer is used as a cross-attention layer
  498. is_cross_attention = self.is_cross_attention or key_value_states is not None
  499. bsz, q_len, _ = hidden_states.size()
  500. query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
  501. if not is_cross_attention:
  502. key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
  503. value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
  504. else:
  505. _, kv_len, _ = key_value_states.size() # Note that, in this case, `kv_len` == `kv_seq_len`
  506. key_states = self.k_proj(key_value_states).view(bsz, kv_len, self.num_heads, self.head_dim).transpose(1, 2)
  507. value_states = (
  508. self.v_proj(key_value_states).view(bsz, kv_len, self.num_heads, self.head_dim).transpose(1, 2)
  509. )
  510. kv_seq_len = key_states.shape[-2]
  511. if past_key_value is not None:
  512. kv_seq_len += cache_position[0]
  513. if not is_cross_attention:
  514. cos, sin = self.rotary_emb(value_states, seq_len=max(kv_seq_len, q_len))
  515. query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
  516. # [bsz, nh, t, hd]
  517. if past_key_value is not None:
  518. # sin and cos are specific to RoPE models; cache_position needed for the static cache
  519. cache_kwargs = {"cache_position": cache_position}
  520. key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
  521. if self.qk_layer_norms:
  522. query_states = self.q_layer_norm(query_states)
  523. key_states = self.k_layer_norm(key_states)
  524. causal_mask = attention_mask
  525. if attention_mask is not None:
  526. causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]
  527. # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
  528. # Reference: https://github.com/pytorch/pytorch/issues/112577.
  529. if query_states.device.type == "cuda" and attention_mask is not None:
  530. query_states = query_states.contiguous()
  531. key_states = key_states.contiguous()
  532. value_states = value_states.contiguous()
  533. # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
  534. # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
  535. # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
  536. is_causal = True if self.is_causal and causal_mask is None and q_len > 1 else False
  537. attn_output = torch.nn.functional.scaled_dot_product_attention(
  538. query_states,
  539. key_states,
  540. value_states,
  541. attn_mask=causal_mask,
  542. dropout_p=self.dropout if self.training else 0.0,
  543. is_causal=is_causal,
  544. )
  545. if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
  546. raise ValueError(
  547. f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
  548. f" {attn_output.size()}"
  549. )
  550. attn_output = attn_output.transpose(1, 2)
  551. attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
  552. attn_output = self.o_proj(attn_output)
  553. attn_weights = None
  554. if output_attentions:
  555. logger.warning_once(
  556. "attn_weights are not extracted in scaled_dot_product_attention. The model returns None instead"
  557. )
  558. return attn_output, attn_weights, past_key_value
  559. # this was adapted from LlamaDecoderLayer
  560. class IdeficsDecoderLayer(nn.Module):
  561. def __init__(self, config: IdeficsConfig, layer_idx: int = None):
  562. super().__init__()
  563. self.hidden_size = config.hidden_size
  564. self.self_attn = IdeficsAttention(
  565. hidden_size=self.hidden_size,
  566. num_heads=config.num_attention_heads,
  567. dropout=config.dropout,
  568. config=config,
  569. layer_idx=layer_idx,
  570. )
  571. self.mlp = IdeficsMLP(
  572. hidden_size=self.hidden_size,
  573. intermediate_size=config.intermediate_size,
  574. hidden_act=config.hidden_act,
  575. )
  576. self.input_layernorm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
  577. self.post_attention_layernorm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
  578. self.dropout = config.dropout
  579. def forward(
  580. self,
  581. hidden_states: torch.Tensor,
  582. attention_mask: Optional[torch.Tensor] = None,
  583. position_ids: Optional[torch.LongTensor] = None,
  584. past_key_value: Optional[Tuple[torch.Tensor]] = None,
  585. output_attentions: Optional[bool] = False,
  586. use_cache: Optional[bool] = False,
  587. cache_position: Optional[torch.LongTensor] = None,
  588. ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
  589. """
  590. Args:
  591. hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
  592. attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
  593. `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
  594. output_attentions (`bool`, *optional*):
  595. Whether or not to return the attentions tensors of all attention layers. See `attentions` under
  596. returned tensors for more detail.
  597. use_cache (`bool`, *optional*):
  598. If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
  599. (see `past_key_values`).
  600. past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
  601. """
  602. residual = hidden_states
  603. hidden_states = self.input_layernorm(hidden_states)
  604. # Self Attention
  605. hidden_states, self_attn_weights, present_key_value = self.self_attn(
  606. hidden_states=hidden_states,
  607. attention_mask=attention_mask,
  608. position_ids=position_ids,
  609. past_key_value=past_key_value,
  610. output_attentions=output_attentions,
  611. use_cache=use_cache,
  612. cache_position=cache_position,
  613. )
  614. hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
  615. hidden_states = residual + hidden_states
  616. # Fully Connected
  617. residual = hidden_states
  618. hidden_states = self.post_attention_layernorm(hidden_states)
  619. hidden_states = self.mlp(hidden_states)
  620. hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
  621. hidden_states = residual + hidden_states
  622. outputs = (hidden_states,)
  623. if output_attentions:
  624. outputs += (self_attn_weights,)
  625. if use_cache:
  626. outputs += (present_key_value,)
  627. return outputs
  628. class IdeficsGatedCrossAttentionLayer(nn.Module):
  629. def __init__(self, config: IdeficsConfig):
  630. super().__init__()
  631. self.hidden_size = config.hidden_size
  632. self.cross_attn = IdeficsAttention(
  633. hidden_size=self.hidden_size,
  634. num_heads=config.num_attention_heads,
  635. is_cross_attention=True,
  636. dropout=config.dropout,
  637. config=config,
  638. qk_layer_norms=config.qk_layer_norms,
  639. )
  640. self.mlp = IdeficsMLP(
  641. hidden_size=self.hidden_size,
  642. intermediate_size=config.intermediate_size,
  643. hidden_act=config.hidden_act,
  644. )
  645. self.input_layernorm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
  646. self.post_attention_layernorm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
  647. self.config = config.dropout
  648. self.act_cross_attn = nn.Tanh()
  649. self.act_dense = nn.Tanh()
  650. if config.alpha_initializer == "zeros":
  651. if config.alpha_type == "vector":
  652. self.alpha_cross_attn = nn.Parameter(torch.zeros(1, 1, self.hidden_size))
  653. self.alpha_dense = nn.Parameter(torch.zeros(1, 1, self.hidden_size))
  654. elif config.alpha_type == "float":
  655. self.alpha_cross_attn = nn.Parameter(torch.zeros(1))
  656. self.alpha_dense = nn.Parameter(torch.zeros(1))
  657. else:
  658. raise ValueError(f"Unknown value for `alpha_type` ({config.alpha_type})")
  659. elif config.alpha_initializer == "ones":
  660. if config.alpha_type == "vector":
  661. self.alpha_cross_attn = nn.Parameter(torch.ones(1, 1, self.hidden_size))
  662. self.alpha_dense = nn.Parameter(torch.ones(1, 1, self.hidden_size))
  663. elif config.alpha_type == "float":
  664. self.alpha_cross_attn = nn.Parameter(torch.ones(1))
  665. self.alpha_dense = nn.Parameter(torch.ones(1))
  666. else:
  667. raise ValueError(f"Unknown value for `alpha_type` ({config.alpha_type})")
  668. elif config.alpha_initializer in {"normal", "gaussian", "random"}:
  669. if config.alpha_type == "vector":
  670. self.alpha_cross_attn = nn.Parameter(
  671. torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1, 1, self.hidden_size))
  672. )
  673. self.alpha_dense = nn.Parameter(
  674. torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1, 1, self.hidden_size))
  675. )
  676. elif config.alpha_type == "float":
  677. self.alpha_cross_attn = nn.Parameter(
  678. torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1))
  679. )
  680. self.alpha_dense = nn.Parameter(torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1)))
  681. else:
  682. raise ValueError(f"Unknown value for `alpha_type` ({config.alpha_type})")
  683. else:
  684. raise NotImplementedError(f"Alpha initialization scheme {config.alpha_initializer} not yet implemented!")
  685. if not (hasattr(self, "alpha_cross_attn") and hasattr(self, "alpha_dense")):
  686. raise ValueError("Alpha parameters not initialized correctly!")
  687. def forward(
  688. self,
  689. hidden_states: torch.Tensor,
  690. attention_mask: Optional[torch.Tensor] = None,
  691. image_hidden_states: Optional[torch.Tensor] = None,
  692. image_attention_mask: Optional[torch.Tensor] = None,
  693. cross_attention_gate: Optional[torch.Tensor] = None,
  694. output_attentions: Optional[bool] = False,
  695. use_cache: Optional[bool] = False,
  696. past_key_value: Optional[Tuple[torch.Tensor]] = None,
  697. ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
  698. """
  699. Args:
  700. hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
  701. attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
  702. `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
  703. image_attention_mask (`torch.FloatTensor`, *optional*): image attention mask of size
  704. `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
  705. cross_attention_gate (`torch.FloatTensor`, *optional*):
  706. gate of size `(batch, seq_len)` used to zero-out cross-attention output for tokens attending no images.
  707. output_attentions (`bool`, *optional*):
  708. Whether or not to return the attentions tensors of all attention layers. See `attentions` under
  709. returned tensors for more detail.
  710. use_cache (`bool`, *optional*):
  711. If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
  712. (see `past_key_values`).
  713. past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
  714. """
  715. if image_hidden_states is None:
  716. raise ValueError(
  717. "`image_hidden_states` is required for Idefics cross attention module which are visual features to be"
  718. " conditioned on."
  719. )
  720. if cross_attention_gate is None:
  721. raise ValueError(
  722. "`cross_attention_gate` is required for Idefics cross attention module to zero-out the cross-attention hidden_states attending to no images."
  723. )
  724. if past_key_value is not None:
  725. raise NotImplementedError("Past key value states are not implemented for Idefics cross attention module.")
  726. residual = hidden_states
  727. hidden_states = self.input_layernorm(hidden_states)
  728. # Self Attention
  729. hidden_states, self_attn_weights, present_key_value = self.cross_attn(
  730. hidden_states=hidden_states,
  731. key_value_states=image_hidden_states,
  732. attention_mask=image_attention_mask,
  733. output_attentions=output_attentions,
  734. )
  735. hidden_states = nn.functional.dropout(hidden_states, p=self.config, training=self.training)
  736. # Fill in zeros for cross_attention hidden_states of tokens attending to no images
  737. hidden_states[cross_attention_gate == 0] = hidden_states[cross_attention_gate == 0].fill_(0)
  738. hidden_states = residual + self.act_cross_attn(self.alpha_cross_attn) * hidden_states
  739. # Fully Connected
  740. residual = hidden_states
  741. hidden_states = self.post_attention_layernorm(hidden_states)
  742. hidden_states = self.mlp(hidden_states)
  743. hidden_states = nn.functional.dropout(hidden_states, p=self.config, training=self.training)
  744. hidden_states = residual + self.act_dense(self.alpha_dense) * hidden_states
  745. outputs = (hidden_states,)
  746. if output_attentions:
  747. outputs += (self_attn_weights,)
  748. if use_cache:
  749. outputs += (present_key_value,)
  750. return outputs
  751. LLAMA_START_DOCSTRING = r"""
  752. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
  753. library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
  754. etc.)
  755. This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
  756. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
  757. and behavior.
  758. Parameters:
  759. config ([`IdeficsConfig`]):
  760. Model configuration class with all the parameters of the model. Initializing with a config file does not
  761. load the weights associated with the model, only the configuration. Check out the
  762. [`~PreTrainedModel.from_pretrained`] method to load the model weights.
  763. """
  764. @add_start_docstrings(
  765. "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
  766. LLAMA_START_DOCSTRING,
  767. )
  768. class IdeficsPreTrainedModel(PreTrainedModel):
  769. config_class = IdeficsConfig
  770. base_model_prefix = "model"
  771. supports_gradient_checkpointing = True
  772. _no_split_modules = ["IdeficsDecoderLayer", "IdeficsGatedCrossAttentionLayer"]
  773. _supports_sdpa = True
  774. _supports_cache_class = True
  775. _supports_static_cache = True
  776. def _init_weights(self, module):
  777. # important: this ported version of Idefics isn't meant for training from scratch - only
  778. # inference and fine-tuning - so the proper init weights code has been removed - the m4 code
  779. # base should be used for training from scratch and it contains the correct code.
  780. std = self.config.initializer_range
  781. if isinstance(module, nn.Linear):
  782. module.weight.data.normal_(mean=0.0, std=std)
  783. if module.bias is not None:
  784. module.bias.data.zero_()
  785. elif isinstance(module, nn.Embedding):
  786. module.weight.data.normal_(mean=0.0, std=std)
  787. if module.padding_idx is not None:
  788. module.weight.data[module.padding_idx].zero_()
  789. LLAMA_INPUTS_DOCSTRING = r"""
  790. Args:
  791. input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
  792. Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
  793. it.
  794. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
  795. [`PreTrainedTokenizer.__call__`] for details.
  796. [What are input IDs?](../glossary#input-ids)
  797. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
  798. Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
  799. - 1 for tokens that are **not masked**,
  800. - 0 for tokens that are **masked**.
  801. [What are attention masks?](../glossary#attention-mask)
  802. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
  803. [`PreTrainedTokenizer.__call__`] for details.
  804. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
  805. `past_key_values`).
  806. If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
  807. and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
  808. information on the default strategy.
  809. - 1 indicates the head is **not masked**,
  810. - 0 indicates the head is **masked**.
  811. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
  812. Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
  813. config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
  814. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
  815. Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
  816. `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
  817. `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
  818. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
  819. blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
  820. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
  821. don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
  822. `decoder_input_ids` of shape `(batch_size, sequence_length)`.
  823. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
  824. Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
  825. is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
  826. model's internal embedding lookup matrix.
  827. use_cache (`bool`, *optional*):
  828. If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
  829. `past_key_values`).
  830. output_attentions (`bool`, *optional*):
  831. Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
  832. tensors for more detail.
  833. output_hidden_states (`bool`, *optional*):
  834. Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
  835. more detail.
  836. return_dict (`bool`, *optional*):
  837. Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
  838. cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
  839. Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
  840. this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
  841. the complete sequence length.
  842. """
  843. @add_start_docstrings(
  844. "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
  845. LLAMA_START_DOCSTRING,
  846. )
  847. class IdeficsModel(IdeficsPreTrainedModel):
  848. """
  849. Transformer decoder consisting of `config.num_hidden_layers` layers. Each layer is a [`IdeficsDecoderLayer`]
  850. Args:
  851. config: IdeficsConfig
  852. """
  853. def __init__(self, config: IdeficsConfig):
  854. super().__init__(config)
  855. self.config = config
  856. self.padding_idx = config.pad_token_id
  857. self.vocab_size = config.vocab_size
  858. self.embed_tokens = IdeficsDecoupledEmbedding(
  859. num_embeddings=config.vocab_size,
  860. num_additional_embeddings=config.additional_vocab_size,
  861. embedding_dim=config.hidden_size,
  862. partially_freeze=config.freeze_text_layers,
  863. padding_idx=self.padding_idx,
  864. )
  865. self.image_size = config.vision_config.image_size
  866. self.vision_config = config.vision_config
  867. self.vision_model = IdeficsVisionTransformer(config.vision_config)
  868. # Perceiver Resampler
  869. if config.use_resampler:
  870. perceiver_config = config.perceiver_config
  871. self.perceiver_resampler = IdeficsPerceiverResampler(
  872. config,
  873. config.vision_config.embed_dim,
  874. perceiver_config.resampler_depth,
  875. perceiver_config.resampler_n_heads,
  876. perceiver_config.resampler_head_dim,
  877. perceiver_config.resampler_n_latents,
  878. )
  879. self.layers = nn.ModuleList(
  880. [IdeficsDecoderLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)]
  881. )
  882. self.cross_layer_interval = config.cross_layer_interval
  883. num_cross_layers = config.num_hidden_layers // self.cross_layer_interval
  884. self.gated_cross_attn_layers = nn.ModuleList(
  885. [IdeficsGatedCrossAttentionLayer(config) for _ in range(num_cross_layers)]
  886. )
  887. self.gradient_checkpointing = False
  888. self.norm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
  889. # Initialize weights and apply final processing
  890. self.post_init()
  891. self.freeze_relevant_params(config)
  892. def freeze_relevant_params(self, config=None):
  893. if config is None:
  894. config = self.config
  895. if config.freeze_text_layers:
  896. self.freeze_text_layers(config.freeze_text_module_exceptions)
  897. if config.freeze_vision_layers:
  898. freeze_model(self.vision_model, module_exceptions=config.freeze_vision_module_exceptions)
  899. def freeze_text_layers(self, module_exceptions=[]):
  900. for module in [self.layers, self.norm]:
  901. freeze_model(module, module_exceptions=module_exceptions)
  902. def freeze_vision_layers(self, module_exceptions=[]):
  903. freeze_model(self.vision_model, module_exceptions=module_exceptions)
  904. def get_input_embeddings(self):
  905. return self.embed_tokens
  906. def set_input_embeddings(self, value):
  907. self.embed_tokens = value
  908. @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
  909. def forward(
  910. self,
  911. input_ids: torch.LongTensor = None,
  912. attention_mask: Optional[torch.Tensor] = None,
  913. position_ids: Optional[torch.LongTensor] = None,
  914. past_key_values: Optional[List[torch.FloatTensor]] = None,
  915. inputs_embeds: Optional[torch.FloatTensor] = None,
  916. pixel_values: Optional[torch.FloatTensor] = None,
  917. image_encoder_embeddings: Optional[torch.FloatTensor] = None,
  918. perceiver_embeddings: Optional[torch.FloatTensor] = None,
  919. image_attention_mask: Optional[torch.Tensor] = None,
  920. use_cache: Optional[bool] = None,
  921. output_attentions: Optional[bool] = None,
  922. output_hidden_states: Optional[bool] = None,
  923. interpolate_pos_encoding: Optional[bool] = False,
  924. return_dict: Optional[bool] = None,
  925. cache_position: Optional[torch.LongTensor] = None,
  926. ) -> Union[Tuple, IdeficsBaseModelOutputWithPast]:
  927. device = input_ids.device if input_ids is not None else inputs_embeds.device
  928. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
  929. output_hidden_states = (
  930. output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
  931. )
  932. use_cache = use_cache if use_cache is not None else self.config.use_cache
  933. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  934. if (input_ids is None) ^ (inputs_embeds is not None):
  935. raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
  936. if self.gradient_checkpointing and self.training and use_cache:
  937. logger.warning_once(
  938. "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
  939. )
  940. use_cache = False
  941. if inputs_embeds is None:
  942. inputs_embeds = self.embed_tokens(input_ids)
  943. # kept for BC (non `Cache` `past_key_values` inputs)
  944. return_legacy_cache = False
  945. if use_cache and not isinstance(past_key_values, Cache):
  946. return_legacy_cache = True
  947. if past_key_values is None:
  948. past_key_values = DynamicCache()
  949. else:
  950. past_key_values = DynamicCache.from_legacy_cache(past_key_values)
  951. logger.warning_once(
  952. "We detected that you are passing `past_key_values` as a tuple of tuples. This is deprecated and "
  953. "will be removed in v4.47. Please convert your cache or use an appropriate `Cache` class "
  954. "(https://huggingface.co/docs/transformers/kv_cache#legacy-cache-format)"
  955. )
  956. batch_size, seq_length, _ = inputs_embeds.shape
  957. past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
  958. seq_length_with_past = seq_length + past_key_values_length
  959. if cache_position is None:
  960. cache_position = torch.arange(
  961. past_key_values_length, past_key_values_length + inputs_embeds.shape[1], device=inputs_embeds.device
  962. )
  963. if attention_mask is not None and position_ids is None:
  964. # create position_ids on the fly for batch generation
  965. position_ids = attention_mask.long().cumsum(-1) - 1
  966. position_ids.masked_fill_(attention_mask == 0, 1)
  967. position_ids = position_ids[:, -seq_length:]
  968. elif position_ids is None:
  969. position_ids = cache_position.unsqueeze(0)
  970. if (pixel_values, image_encoder_embeddings, perceiver_embeddings).count(None) != 2:
  971. raise ValueError(
  972. "Exactly 1 of pixel_values, image_encoder_embeddings or perceiver_embeddings has to be not-None."
  973. )
  974. elif pixel_values is not None:
  975. pixel_values = pixel_values.to(dtype=self.dtype, device=device) # fp16 compatibility
  976. batch_size, num_images = pixel_values.shape[:2]
  977. pixel_values = pixel_values.contiguous().view(batch_size * num_images, *pixel_values.shape[2:])
  978. # Get sequence from the vision encoder
  979. image_hidden_states = self.vision_model(
  980. pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding
  981. ).last_hidden_state
  982. elif image_encoder_embeddings is not None:
  983. batch_size, num_images, image_seq_len, image_hidden_size = image_encoder_embeddings.size()
  984. image_hidden_states = image_encoder_embeddings.to(dtype=self.dtype, device=device)
  985. image_hidden_states = image_hidden_states.view(batch_size * num_images, image_seq_len, image_hidden_size)
  986. if self.config.use_resampler:
  987. if perceiver_embeddings is None:
  988. perceiver_embeddings = self.perceiver_resampler(image_hidden_states)
  989. image_seq_len, image_hidden_size = perceiver_embeddings.size(1), perceiver_embeddings.size(2)
  990. else:
  991. batch_size, num_images, image_seq_len, image_hidden_size = perceiver_embeddings.size()
  992. image_hidden_states = perceiver_embeddings
  993. elif perceiver_embeddings is None:
  994. image_seq_len, image_hidden_size = image_hidden_states.size(1), image_hidden_states.size(2)
  995. else:
  996. raise ValueError("If `perceiver_embeddings` are passed, use_resampler should be True")
  997. image_hidden_states = image_hidden_states.view(batch_size, num_images * image_seq_len, image_hidden_size)
  998. # # Hack to use the model in full language modeling mode
  999. # image_attention_mask = torch.zeros(batch_size, seq_length, 1, dtype=torch.long, device=image_hidden_states.device)
  1000. # Make image_attention_mask compatible with hidden states
  1001. text_seq_len = image_attention_mask.size(1)
  1002. image_attention_mask = image_attention_mask.unsqueeze(-1)
  1003. image_attention_mask = image_attention_mask.repeat(1, 1, 1, image_seq_len)
  1004. image_attention_mask = image_attention_mask.view(batch_size, text_seq_len, num_images * image_seq_len)
  1005. if image_hidden_states is not None:
  1006. image_batch_size, image_sequence_length, _ = image_hidden_states.size()
  1007. image_hidden_shape = (image_batch_size, image_sequence_length)
  1008. if image_attention_mask is None:
  1009. image_attention_mask = torch.ones(image_hidden_shape, device=device)
  1010. image_attention_mask = self.invert_attention_mask(image_attention_mask)
  1011. else:
  1012. image_attention_mask = None
  1013. # cross_attention_gate:
  1014. # For any tokens attending to no images, the hidden_states comming out of the cross-attention should be zeroed-out.
  1015. # `image_attention_mask` has shape [bsz, 1, num_images, hidden_size] with elements equal to either 0.0 or a very negative number.
  1016. # If any of the elements are 0.0, then the token is attending to at least one image and the gate value is 1. Otherwise the gate value is 0.
  1017. # `cross_attention_gate` has shape [bsz, seq_len] with elements equal to either 0.0 or 1.0.
  1018. cross_attention_gate = ((((image_attention_mask == 0.0).any(dim=-1)).to(dtype=self.dtype)).squeeze(dim=1)).to(
  1019. device
  1020. )
  1021. # embed positions
  1022. if attention_mask is None:
  1023. attention_mask = torch.ones(
  1024. (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
  1025. )
  1026. attention_mask = self._update_causal_mask(
  1027. attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
  1028. )
  1029. hidden_states = inputs_embeds
  1030. # decoder layers
  1031. all_hidden_states = () if output_hidden_states else None
  1032. all_self_attns = () if output_attentions else None
  1033. next_decoder_cache = None
  1034. for idx, decoder_layer in enumerate(self.layers):
  1035. if output_hidden_states:
  1036. all_hidden_states += (hidden_states,)
  1037. def vblock(
  1038. main_block,
  1039. hidden_states,
  1040. attention_mask,
  1041. position_ids,
  1042. past_key_value,
  1043. image_hidden_states,
  1044. image_attention_mask,
  1045. cross_attention_gate,
  1046. output_attentions,
  1047. use_cache,
  1048. layer_idx,
  1049. cross_layer_interval,
  1050. gated_cross_attn_layers,
  1051. cache_position,
  1052. ):
  1053. # TODO(ls): Add cross attention values to respective lists
  1054. if layer_idx % cross_layer_interval == 0:
  1055. xblock = gated_cross_attn_layers[layer_idx // cross_layer_interval]
  1056. outputs = xblock(
  1057. hidden_states,
  1058. attention_mask=attention_mask,
  1059. image_hidden_states=image_hidden_states,
  1060. image_attention_mask=image_attention_mask,
  1061. cross_attention_gate=cross_attention_gate,
  1062. output_attentions=output_attentions,
  1063. use_cache=use_cache,
  1064. past_key_value=None, # not implemented
  1065. )
  1066. hidden_states = outputs[0]
  1067. layer_outputs = main_block(
  1068. hidden_states,
  1069. attention_mask=attention_mask,
  1070. position_ids=position_ids,
  1071. past_key_value=past_key_value,
  1072. output_attentions=output_attentions,
  1073. use_cache=use_cache,
  1074. cache_position=cache_position,
  1075. )
  1076. return layer_outputs
  1077. if self.gradient_checkpointing and self.training:
  1078. past_key_values = None
  1079. if use_cache:
  1080. logger.warning_once(
  1081. "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
  1082. )
  1083. use_cache = False
  1084. layer_outputs = self._gradient_checkpointing_func(
  1085. vblock,
  1086. decoder_layer,
  1087. hidden_states,
  1088. attention_mask,
  1089. position_ids,
  1090. past_key_values,
  1091. image_hidden_states,
  1092. image_attention_mask,
  1093. cross_attention_gate,
  1094. output_attentions,
  1095. use_cache,
  1096. idx,
  1097. self.cross_layer_interval,
  1098. self.gated_cross_attn_layers,
  1099. cache_position,
  1100. )
  1101. else:
  1102. layer_outputs = vblock(
  1103. decoder_layer,
  1104. hidden_states,
  1105. attention_mask=attention_mask,
  1106. position_ids=position_ids,
  1107. past_key_value=past_key_values,
  1108. image_hidden_states=image_hidden_states,
  1109. image_attention_mask=image_attention_mask,
  1110. cross_attention_gate=cross_attention_gate,
  1111. output_attentions=output_attentions,
  1112. use_cache=use_cache,
  1113. layer_idx=idx,
  1114. cross_layer_interval=self.cross_layer_interval,
  1115. gated_cross_attn_layers=self.gated_cross_attn_layers,
  1116. cache_position=cache_position,
  1117. )
  1118. hidden_states = layer_outputs[0]
  1119. if use_cache:
  1120. next_decoder_cache = layer_outputs[2 if output_attentions else 1]
  1121. if output_attentions:
  1122. all_self_attns += (layer_outputs[1],)
  1123. hidden_states = self.norm(hidden_states)
  1124. # add hidden states from the last decoder layer
  1125. if output_hidden_states:
  1126. all_hidden_states += (hidden_states,)
  1127. next_cache = next_decoder_cache if use_cache else None
  1128. if return_legacy_cache:
  1129. next_cache = next_cache.to_legacy_cache()
  1130. image_hidden_states = image_hidden_states.view(batch_size, num_images, image_seq_len, image_hidden_size)
  1131. if not return_dict:
  1132. return tuple(
  1133. v
  1134. for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, image_hidden_states]
  1135. if v is not None
  1136. )
  1137. return IdeficsBaseModelOutputWithPast(
  1138. last_hidden_state=hidden_states,
  1139. past_key_values=next_cache,
  1140. hidden_states=all_hidden_states,
  1141. attentions=all_self_attns,
  1142. image_hidden_states=image_hidden_states,
  1143. )
  1144. # Copied from transformers.models.llama.modeling_llama.LlamaModel._update_causal_mask
  1145. def _update_causal_mask(
  1146. self,
  1147. attention_mask: torch.Tensor,
  1148. input_tensor: torch.Tensor,
  1149. cache_position: torch.Tensor,
  1150. past_key_values: Cache,
  1151. output_attentions: bool,
  1152. ):
  1153. if self.config._attn_implementation == "flash_attention_2":
  1154. if attention_mask is not None and 0.0 in attention_mask:
  1155. return attention_mask
  1156. return None
  1157. # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
  1158. # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
  1159. # to infer the attention mask.
  1160. past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
  1161. using_static_cache = isinstance(past_key_values, StaticCache)
  1162. # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
  1163. if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
  1164. if AttentionMaskConverter._ignore_causal_mask_sdpa(
  1165. attention_mask,
  1166. inputs_embeds=input_tensor,
  1167. past_key_values_length=past_seen_tokens,
  1168. is_training=self.training,
  1169. ):
  1170. return None
  1171. dtype, device = input_tensor.dtype, input_tensor.device
  1172. sequence_length = input_tensor.shape[1]
  1173. if using_static_cache:
  1174. target_length = past_key_values.get_max_cache_shape()
  1175. else:
  1176. target_length = (
  1177. attention_mask.shape[-1]
  1178. if isinstance(attention_mask, torch.Tensor)
  1179. else past_seen_tokens + sequence_length + 1
  1180. )
  1181. # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
  1182. causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
  1183. attention_mask,
  1184. sequence_length=sequence_length,
  1185. target_length=target_length,
  1186. dtype=dtype,
  1187. device=device,
  1188. cache_position=cache_position,
  1189. batch_size=input_tensor.shape[0],
  1190. )
  1191. if (
  1192. self.config._attn_implementation == "sdpa"
  1193. and attention_mask is not None
  1194. and attention_mask.device.type == "cuda"
  1195. and not output_attentions
  1196. ):
  1197. # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
  1198. # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
  1199. # Details: https://github.com/pytorch/pytorch/issues/110213
  1200. min_dtype = torch.finfo(dtype).min
  1201. causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
  1202. return causal_mask
  1203. @staticmethod
  1204. # Copied from transformers.models.llama.modeling_llama.LlamaModel._prepare_4d_causal_attention_mask_with_cache_position
  1205. def _prepare_4d_causal_attention_mask_with_cache_position(
  1206. attention_mask: torch.Tensor,
  1207. sequence_length: int,
  1208. target_length: int,
  1209. dtype: torch.dtype,
  1210. device: torch.device,
  1211. cache_position: torch.Tensor,
  1212. batch_size: int,
  1213. **kwargs,
  1214. ):
  1215. """
  1216. Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
  1217. `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
  1218. Args:
  1219. attention_mask (`torch.Tensor`):
  1220. A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
  1221. `(batch_size, 1, query_length, key_value_length)`.
  1222. sequence_length (`int`):
  1223. The sequence length being processed.
  1224. target_length (`int`):
  1225. The target length: when generating with static cache, the mask should be as long as the static cache,
  1226. to account for the 0 padding, the part of the cache that is not filled yet.
  1227. dtype (`torch.dtype`):
  1228. The dtype to use for the 4D attention mask.
  1229. device (`torch.device`):
  1230. The device to plcae the 4D attention mask on.
  1231. cache_position (`torch.Tensor`):
  1232. Indices depicting the position of the input sequence tokens in the sequence.
  1233. batch_size (`torch.Tensor`):
  1234. Batch size.
  1235. """
  1236. if attention_mask is not None and attention_mask.dim() == 4:
  1237. # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
  1238. causal_mask = attention_mask
  1239. else:
  1240. min_dtype = torch.finfo(dtype).min
  1241. causal_mask = torch.full(
  1242. (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
  1243. )
  1244. if sequence_length != 1:
  1245. causal_mask = torch.triu(causal_mask, diagonal=1)
  1246. causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
  1247. causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
  1248. if attention_mask is not None:
  1249. causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
  1250. mask_length = attention_mask.shape[-1]
  1251. padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
  1252. padding_mask = padding_mask == 0
  1253. causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
  1254. padding_mask, min_dtype
  1255. )
  1256. return causal_mask
  1257. class IdeficsForVisionText2Text(IdeficsPreTrainedModel, GenerationMixin):
  1258. _keys_to_ignore_on_load_missing = [r"lm_head.weight"]
  1259. _tied_weights_keys = ["model.embed_tokens.weight", "lm_head.weight"]
  1260. def __init__(self, config, vision_model=None):
  1261. super().__init__(config)
  1262. self.model = IdeficsModel(config)
  1263. self.lm_head = IdeficsDecoupledLinear(
  1264. in_features=config.hidden_size,
  1265. out_features=config.vocab_size,
  1266. out_additional_features=config.additional_vocab_size,
  1267. bias=False,
  1268. partially_freeze=config.freeze_lm_head,
  1269. )
  1270. # Initialize weights and apply final processing
  1271. self.post_init()
  1272. def get_input_embeddings(self):
  1273. return self.model.embed_tokens
  1274. def set_input_embeddings(self, value):
  1275. self.model.embed_tokens = value
  1276. def get_output_embeddings(self):
  1277. return self.lm_head
  1278. def set_output_embeddings(self, new_embeddings):
  1279. self.lm_head = new_embeddings
  1280. def set_decoder(self, decoder):
  1281. self.model = decoder
  1282. def get_decoder(self):
  1283. return self.model
  1284. def tie_weights(self):
  1285. """
  1286. Overwrite `transformers.modeling_utils.PreTrainedModel.tie_weights` to handle the case of
  1287. IdeficsDecoupledLinear and IdeficsDecoupledEmbedding.
  1288. """
  1289. output_embeddings = self.get_output_embeddings()
  1290. input_embeddings = self.get_input_embeddings()
  1291. if getattr(self.config, "tie_word_embeddings", True):
  1292. output_embeddings.weight = input_embeddings.weight
  1293. if input_embeddings.num_additional_embeddings > 0:
  1294. assert output_embeddings.out_additional_features == input_embeddings.num_additional_embeddings
  1295. output_embeddings.additional_fc.weight = input_embeddings.additional_embedding.weight
  1296. if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"):
  1297. output_embeddings.out_features = input_embeddings.num_embeddings
  1298. if hasattr(output_embeddings, "out_additional_features") and hasattr(
  1299. input_embeddings, "num_additional_embeddings"
  1300. ):
  1301. output_embeddings.out_additional_features = input_embeddings.num_additional_embeddings
  1302. @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
  1303. @replace_return_docstrings(output_type=IdeficsCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
  1304. def forward(
  1305. self,
  1306. input_ids: torch.LongTensor = None,
  1307. attention_mask: Optional[torch.Tensor] = None,
  1308. position_ids: Optional[torch.LongTensor] = None,
  1309. past_key_values: Optional[List[torch.FloatTensor]] = None,
  1310. inputs_embeds: Optional[torch.FloatTensor] = None,
  1311. pixel_values: Optional[torch.FloatTensor] = None,
  1312. image_encoder_embeddings: Optional[torch.FloatTensor] = None,
  1313. perceiver_embeddings: Optional[torch.FloatTensor] = None,
  1314. image_attention_mask: Optional[torch.Tensor] = None,
  1315. labels: Optional[torch.LongTensor] = None,
  1316. use_cache: Optional[bool] = None,
  1317. output_attentions: Optional[bool] = None,
  1318. output_hidden_states: Optional[bool] = None,
  1319. interpolate_pos_encoding: Optional[bool] = False,
  1320. return_dict: Optional[bool] = None,
  1321. cache_position: Optional[torch.LongTensor] = None,
  1322. ) -> Union[Tuple, IdeficsCausalLMOutputWithPast]:
  1323. r"""
  1324. Args:
  1325. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
  1326. Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
  1327. config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
  1328. (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
  1329. Returns:
  1330. Example:
  1331. ```python
  1332. >>> from transformers import AutoProcessor, IdeficsForVisionText2Text
  1333. >>> model = IdeficsForVisionText2Text.from_pretrained("HuggingFaceM4/idefics-9b")
  1334. >>> processor = AutoProcessor.from_pretrained("HuggingFaceM4/idefics-9b")
  1335. >>> dogs_image_url_1 = "https://huggingface.co/datasets/hf-internal-testing/fixtures_nlvr2/raw/main/image1.jpeg"
  1336. >>> dogs_image_url_2 = "https://huggingface.co/datasets/hf-internal-testing/fixtures_nlvr2/raw/main/image2.jpeg"
  1337. >>> prompts = [
  1338. ... [
  1339. ... "User:",
  1340. ... dogs_image_url_1,
  1341. ... "Describe this image.\nAssistant: An image of two dogs.\n",
  1342. ... "User:",
  1343. ... dogs_image_url_2,
  1344. ... "Describe this image.\nAssistant:",
  1345. ... ]
  1346. ... ]
  1347. >>> inputs = processor(prompts, return_tensors="pt")
  1348. >>> generate_ids = model.generate(**inputs, max_new_tokens=6)
  1349. >>> processor.batch_decode(generate_ids, skip_special_tokens=True)
  1350. ```"""
  1351. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
  1352. output_hidden_states = (
  1353. output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
  1354. )
  1355. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1356. # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
  1357. outputs = self.model(
  1358. input_ids=input_ids,
  1359. attention_mask=attention_mask,
  1360. position_ids=position_ids,
  1361. past_key_values=past_key_values,
  1362. inputs_embeds=inputs_embeds,
  1363. pixel_values=pixel_values,
  1364. image_encoder_embeddings=image_encoder_embeddings,
  1365. perceiver_embeddings=perceiver_embeddings,
  1366. image_attention_mask=image_attention_mask,
  1367. use_cache=use_cache,
  1368. output_attentions=output_attentions,
  1369. output_hidden_states=output_hidden_states,
  1370. interpolate_pos_encoding=interpolate_pos_encoding,
  1371. return_dict=return_dict,
  1372. cache_position=cache_position,
  1373. )
  1374. hidden_states = outputs[0]
  1375. logits = self.lm_head(hidden_states)
  1376. loss = None
  1377. if labels is not None:
  1378. labels = labels.to(logits.device)
  1379. # Shift so that tokens < n predict n
  1380. if attention_mask is not None:
  1381. # we use the input attention mask to shift the logits and labels, because it is 2D.
  1382. # we also crop attn mask in case it is longer, which happens in PrefixTuning with peft
  1383. shift_attention_mask = attention_mask[:, -(logits.shape[1] - 1) :].to(logits.device)
  1384. shift_logits = logits[..., :-1, :][shift_attention_mask != 0].contiguous()
  1385. shift_labels = labels[..., 1:][shift_attention_mask != 0].contiguous()
  1386. else:
  1387. shift_logits = logits[..., :-1, :].contiguous()
  1388. shift_labels = labels[..., 1:].contiguous()
  1389. # Flatten the tokens
  1390. loss_fct = CrossEntropyLoss()
  1391. loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
  1392. if not return_dict:
  1393. output = (logits,) + outputs[1:]
  1394. return (loss,) + output if loss is not None else output
  1395. return IdeficsCausalLMOutputWithPast(
  1396. loss=loss,
  1397. logits=logits,
  1398. past_key_values=outputs.past_key_values,
  1399. hidden_states=outputs.hidden_states,
  1400. attentions=outputs.attentions,
  1401. image_hidden_states=outputs.image_hidden_states,
  1402. )
  1403. def prepare_inputs_for_generation(
  1404. self,
  1405. input_ids,
  1406. attention_mask=None,
  1407. position_ids=None,
  1408. inputs_embeds=None,
  1409. past_key_values=None,
  1410. cache_position=None,
  1411. pixel_values=None,
  1412. image_hidden_states=None,
  1413. image_attention_mask=None,
  1414. use_cache=None,
  1415. **kwargs,
  1416. ):
  1417. # Overwritten -- custom processing based on `config.use_resampler`
  1418. model_inputs = {}
  1419. if image_hidden_states is not None:
  1420. if self.config.use_resampler:
  1421. model_inputs["perceiver_embeddings"] = image_hidden_states
  1422. else:
  1423. model_inputs["image_encoder_embeddings"] = image_hidden_states
  1424. else:
  1425. model_inputs["pixel_values"] = pixel_values
  1426. # If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens
  1427. if past_key_values is not None:
  1428. if inputs_embeds is not None:
  1429. input_ids = input_ids[:, -cache_position.shape[0] :]
  1430. elif input_ids.shape[1] != cache_position.shape[0]:
  1431. input_ids = input_ids[:, cache_position]
  1432. if image_attention_mask is not None:
  1433. image_attention_mask = image_attention_mask[:, -input_ids.shape[1] :]
  1434. if attention_mask is not None and position_ids is None:
  1435. # create position_ids on the fly for batch generation
  1436. position_ids = attention_mask.long().cumsum(-1) - 1
  1437. position_ids.masked_fill_(attention_mask == 0, 1)
  1438. if past_key_values:
  1439. position_ids = position_ids[:, -input_ids.shape[1] :]
  1440. # This `clone` call is needed to avoid recapturing cuda graphs with `torch.compile`'s `mode="reduce-overhead`, as otherwise the input `position_ids` would have various stride during the decoding. Here, simply using `.contiguous()` is not sufficient as in the batch size = 1 case, `position_ids` is already contiguous but with varying stride which retriggers a capture.
  1441. position_ids = position_ids.clone(memory_format=torch.contiguous_format)
  1442. # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
  1443. if inputs_embeds is not None and cache_position[0] == 0:
  1444. model_inputs.update({"inputs_embeds": inputs_embeds, "input_ids": None})
  1445. else:
  1446. # The clone here is for the same reason as for `position_ids`.
  1447. model_inputs.update(
  1448. {"input_ids": input_ids.clone(memory_format=torch.contiguous_format), "inputs_embeds": None}
  1449. )
  1450. model_inputs.update(
  1451. {
  1452. "past_key_values": past_key_values,
  1453. "use_cache": use_cache,
  1454. "cache_position": cache_position,
  1455. "position_ids": position_ids,
  1456. "attention_mask": attention_mask,
  1457. "image_attention_mask": image_attention_mask,
  1458. "interpolate_pos_encoding": kwargs.get("interpolate_pos_encoding", False),
  1459. }
  1460. )
  1461. return model_inputs
  1462. def _update_model_kwargs_for_generation(
  1463. self,
  1464. outputs: ModelOutput,
  1465. model_kwargs: Dict[str, Any],
  1466. is_encoder_decoder: bool = False,
  1467. **kwargs,
  1468. ) -> Dict[str, Any]:
  1469. model_kwargs = super()._update_model_kwargs_for_generation(
  1470. outputs,
  1471. model_kwargs,
  1472. is_encoder_decoder,
  1473. **kwargs,
  1474. )
  1475. if "image_attention_mask" in model_kwargs:
  1476. image_attention_mask = model_kwargs["image_attention_mask"]
  1477. last_mask = image_attention_mask[:, -1, :].unsqueeze(1)
  1478. if model_kwargs.get("use_cache", True):
  1479. model_kwargs["image_attention_mask"] = last_mask
  1480. else:
  1481. model_kwargs["image_attention_mask"] = torch.cat([image_attention_mask, last_mask], dim=1)
  1482. # Get the precomputed image_hidden_states
  1483. model_kwargs["image_hidden_states"] = outputs.image_hidden_states
  1484. return model_kwargs
  1485. @staticmethod
  1486. def _reorder_cache(past, beam_idx):
  1487. reordered_past = ()
  1488. for layer_past in past:
  1489. reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
  1490. return reordered_past