modeling_mt5.py 116 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545
  1. # coding=utf-8
  2. # Copyright 2020 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """PyTorch mT5 model."""
  16. import copy
  17. import math
  18. import os
  19. import warnings
  20. from typing import List, Optional, Tuple, Union
  21. import torch
  22. from torch import nn
  23. from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
  24. from ...activations import ACT2FN
  25. from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache, StaticCache
  26. from ...generation import GenerationMixin
  27. from ...modeling_attn_mask_utils import AttentionMaskConverter
  28. from ...modeling_outputs import (
  29. BaseModelOutput,
  30. BaseModelOutputWithPastAndCrossAttentions,
  31. Seq2SeqLMOutput,
  32. Seq2SeqModelOutput,
  33. Seq2SeqQuestionAnsweringModelOutput,
  34. Seq2SeqSequenceClassifierOutput,
  35. TokenClassifierOutput,
  36. )
  37. from ...modeling_utils import PreTrainedModel
  38. from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
  39. from ...utils import (
  40. DUMMY_INPUTS,
  41. DUMMY_MASK,
  42. add_start_docstrings,
  43. add_start_docstrings_to_model_forward,
  44. is_torch_fx_proxy,
  45. is_torchdynamo_compiling,
  46. logging,
  47. replace_return_docstrings,
  48. )
  49. from ...utils.model_parallel_utils import assert_device_map, get_device_map
  50. from .configuration_mt5 import MT5Config
  51. logger = logging.get_logger(__name__)
  52. _CONFIG_FOR_DOC = "MT5Config"
  53. _CHECKPOINT_FOR_DOC = "mt5-small"
  54. ####################################################
  55. # This dict contains ids and associated url
  56. # for the pretrained weights provided with the models
  57. ####################################################
  58. PARALLELIZE_DOCSTRING = r"""
  59. This is an experimental feature and is a subject to change at a moment's notice.
  60. Uses a device map to distribute attention modules of the model across several devices. If no device map is given,
  61. it will evenly distribute blocks across all devices.
  62. Args:
  63. device_map (`Dict[int, list]`, *optional*):
  64. A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always
  65. automatically mapped to the first device (for esoteric reasons). That means that the first device should
  66. have fewer attention modules mapped to it than other devices. For reference, the mt5 models have the
  67. following number of attention modules:
  68. - mt5-small: 6
  69. - mt5-base: 12
  70. - mt5-large: 24
  71. - mt5-xl: 24
  72. - mt5-xxl: 24
  73. Example:
  74. ```python
  75. # Here is an example of a device map on a machine with 4 GPUs using mt5-xl, which has a total of 24 attention modules:
  76. model = MT5ForConditionalGeneration.from_pretrained("mt5-xl")
  77. device_map = {
  78. 0: [0, 1, 2],
  79. 1: [3, 4, 5, 6, 7, 8, 9],
  80. 2: [10, 11, 12, 13, 14, 15, 16],
  81. 3: [17, 18, 19, 20, 21, 22, 23],
  82. }
  83. model.parallelize(device_map)
  84. ```
  85. """
  86. DEPARALLELIZE_DOCSTRING = r"""
  87. Moves the model to cpu from a model parallel state.
  88. Example:
  89. ```python
  90. # On a 4 GPU machine with mt5-xl:
  91. model = MT5ForConditionalGeneration.from_pretrained("Mt5-xl")
  92. device_map = {
  93. 0: [0, 1, 2],
  94. 1: [3, 4, 5, 6, 7, 8, 9],
  95. 2: [10, 11, 12, 13, 14, 15, 16],
  96. 3: [17, 18, 19, 20, 21, 22, 23],
  97. }
  98. model.parallelize(device_map) # Splits the model across several devices
  99. model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()
  100. ```
  101. """
  102. # Copied from transformers.models.t5.modeling_t5.T5LayerNorm with T5->MT5
  103. class MT5LayerNorm(nn.Module):
  104. def __init__(self, hidden_size, eps=1e-6):
  105. """
  106. Construct a layernorm module in the MT5 style. No bias and no subtraction of mean.
  107. """
  108. super().__init__()
  109. self.weight = nn.Parameter(torch.ones(hidden_size))
  110. self.variance_epsilon = eps
  111. def forward(self, hidden_states):
  112. # MT5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
  113. # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated
  114. # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
  115. # half-precision inputs is done in fp32
  116. variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
  117. hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
  118. # convert into half-precision if necessary
  119. if self.weight.dtype in [torch.float16, torch.bfloat16]:
  120. hidden_states = hidden_states.to(self.weight.dtype)
  121. return self.weight * hidden_states
  122. # Copied from transformers.models.t5.modeling_t5.T5DenseActDense with T5->MT5
  123. class MT5DenseActDense(nn.Module):
  124. def __init__(self, config: MT5Config):
  125. super().__init__()
  126. self.wi = nn.Linear(config.d_model, config.d_ff, bias=False)
  127. self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
  128. self.dropout = nn.Dropout(config.dropout_rate)
  129. self.act = ACT2FN[config.dense_act_fn]
  130. def forward(self, hidden_states):
  131. hidden_states = self.wi(hidden_states)
  132. hidden_states = self.act(hidden_states)
  133. hidden_states = self.dropout(hidden_states)
  134. if (
  135. isinstance(self.wo.weight, torch.Tensor)
  136. and hidden_states.dtype != self.wo.weight.dtype
  137. and self.wo.weight.dtype != torch.int8
  138. ):
  139. hidden_states = hidden_states.to(self.wo.weight.dtype)
  140. hidden_states = self.wo(hidden_states)
  141. return hidden_states
  142. # Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5->MT5
  143. class MT5DenseGatedActDense(nn.Module):
  144. def __init__(self, config: MT5Config):
  145. super().__init__()
  146. self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False)
  147. self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False)
  148. self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
  149. self.dropout = nn.Dropout(config.dropout_rate)
  150. self.act = ACT2FN[config.dense_act_fn]
  151. def forward(self, hidden_states):
  152. hidden_gelu = self.act(self.wi_0(hidden_states))
  153. hidden_linear = self.wi_1(hidden_states)
  154. hidden_states = hidden_gelu * hidden_linear
  155. hidden_states = self.dropout(hidden_states)
  156. # To make 8bit quantization work for google/flan-t5-xxl, self.wo is kept in float32.
  157. # See https://github.com/huggingface/transformers/issues/20287
  158. # we also make sure the weights are not in `int8` in case users will force `_keep_in_fp32_modules` to be `None``
  159. if (
  160. isinstance(self.wo.weight, torch.Tensor)
  161. and hidden_states.dtype != self.wo.weight.dtype
  162. and self.wo.weight.dtype != torch.int8
  163. ):
  164. hidden_states = hidden_states.to(self.wo.weight.dtype)
  165. hidden_states = self.wo(hidden_states)
  166. return hidden_states
  167. # Copied from transformers.models.t5.modeling_t5.T5LayerFF with T5->MT5
  168. class MT5LayerFF(nn.Module):
  169. def __init__(self, config: MT5Config):
  170. super().__init__()
  171. if config.is_gated_act:
  172. self.DenseReluDense = MT5DenseGatedActDense(config)
  173. else:
  174. self.DenseReluDense = MT5DenseActDense(config)
  175. self.layer_norm = MT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
  176. self.dropout = nn.Dropout(config.dropout_rate)
  177. def forward(self, hidden_states):
  178. forwarded_states = self.layer_norm(hidden_states)
  179. forwarded_states = self.DenseReluDense(forwarded_states)
  180. hidden_states = hidden_states + self.dropout(forwarded_states)
  181. return hidden_states
  182. # Copied from transformers.models.t5.modeling_t5.T5Attention with T5->MT5
  183. class MT5Attention(nn.Module):
  184. def __init__(
  185. self,
  186. config: MT5Config,
  187. has_relative_attention_bias=False,
  188. layer_idx: Optional[int] = None,
  189. ):
  190. super().__init__()
  191. self.is_decoder = config.is_decoder
  192. self.has_relative_attention_bias = has_relative_attention_bias
  193. self.relative_attention_num_buckets = config.relative_attention_num_buckets
  194. self.relative_attention_max_distance = config.relative_attention_max_distance
  195. self.d_model = config.d_model
  196. self.key_value_proj_dim = config.d_kv
  197. self.n_heads = config.num_heads
  198. self.dropout = config.dropout_rate
  199. self.inner_dim = self.n_heads * self.key_value_proj_dim
  200. self.layer_idx = layer_idx
  201. if layer_idx is None and self.is_decoder:
  202. logger.warning_once(
  203. f"Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and "
  204. "will to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
  205. "when creating this class."
  206. )
  207. # Mesh TensorFlow initialization to avoid scaling before softmax
  208. self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
  209. self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
  210. self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
  211. self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
  212. if self.has_relative_attention_bias:
  213. self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
  214. self.pruned_heads = set()
  215. self.gradient_checkpointing = False
  216. def prune_heads(self, heads):
  217. if len(heads) == 0:
  218. return
  219. heads, index = find_pruneable_heads_and_indices(
  220. heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads
  221. )
  222. # Prune linear layers
  223. self.q = prune_linear_layer(self.q, index)
  224. self.k = prune_linear_layer(self.k, index)
  225. self.v = prune_linear_layer(self.v, index)
  226. self.o = prune_linear_layer(self.o, index, dim=1)
  227. # Update hyper params
  228. self.n_heads = self.n_heads - len(heads)
  229. self.inner_dim = self.key_value_proj_dim * self.n_heads
  230. self.pruned_heads = self.pruned_heads.union(heads)
  231. @staticmethod
  232. def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
  233. """
  234. Adapted from Mesh Tensorflow:
  235. https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
  236. Translate relative position to a bucket number for relative attention. The relative position is defined as
  237. memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
  238. position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
  239. small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
  240. positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
  241. This should allow for more graceful generalization to longer sequences than the model has been trained on
  242. Args:
  243. relative_position: an int32 Tensor
  244. bidirectional: a boolean - whether the attention is bidirectional
  245. num_buckets: an integer
  246. max_distance: an integer
  247. Returns:
  248. a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
  249. """
  250. relative_buckets = 0
  251. if bidirectional:
  252. num_buckets //= 2
  253. relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
  254. relative_position = torch.abs(relative_position)
  255. else:
  256. relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
  257. # now relative_position is in the range [0, inf)
  258. # half of the buckets are for exact increments in positions
  259. max_exact = num_buckets // 2
  260. is_small = relative_position < max_exact
  261. # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
  262. relative_position_if_large = max_exact + (
  263. torch.log(relative_position.float() / max_exact)
  264. / math.log(max_distance / max_exact)
  265. * (num_buckets - max_exact)
  266. ).to(torch.long)
  267. relative_position_if_large = torch.min(
  268. relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)
  269. )
  270. relative_buckets += torch.where(is_small, relative_position, relative_position_if_large)
  271. return relative_buckets
  272. def compute_bias(self, query_length, key_length, device=None, cache_position=None):
  273. """Compute binned relative position bias"""
  274. if device is None:
  275. device = self.relative_attention_bias.weight.device
  276. if cache_position is None:
  277. context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
  278. else:
  279. context_position = cache_position[:, None].to(device)
  280. memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :]
  281. relative_position = memory_position - context_position # shape (query_length, key_length)
  282. relative_position_bucket = self._relative_position_bucket(
  283. relative_position, # shape (query_length, key_length)
  284. bidirectional=(not self.is_decoder),
  285. num_buckets=self.relative_attention_num_buckets,
  286. max_distance=self.relative_attention_max_distance,
  287. )
  288. values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads)
  289. values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length)
  290. return values
  291. def forward(
  292. self,
  293. hidden_states,
  294. mask=None,
  295. key_value_states=None,
  296. position_bias=None,
  297. past_key_value=None,
  298. layer_head_mask=None,
  299. query_length=None,
  300. use_cache=False,
  301. output_attentions=False,
  302. cache_position=None,
  303. ):
  304. """
  305. Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
  306. """
  307. # Input is (batch_size, seq_length, dim)
  308. # Mask is (batch_size, 1, 1, key_length) (non-causal encoder) or (batch_size, 1, seq_length, key_length) (causal decoder)
  309. batch_size, seq_length = hidden_states.shape[:2]
  310. # if key_value_states are provided this layer is used as a cross-attention layer for the decoder
  311. is_cross_attention = key_value_states is not None
  312. query_states = self.q(hidden_states)
  313. query_states = query_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
  314. if past_key_value is not None:
  315. is_updated = past_key_value.is_updated.get(self.layer_idx)
  316. if is_cross_attention:
  317. # after the first generated id, we can subsequently re-use all key/value_states from cache
  318. curr_past_key_value = past_key_value.cross_attention_cache
  319. else:
  320. curr_past_key_value = past_key_value.self_attention_cache
  321. current_states = key_value_states if is_cross_attention else hidden_states
  322. if is_cross_attention and past_key_value is not None and is_updated:
  323. # reuse k,v, cross_attentions
  324. key_states = curr_past_key_value.key_cache[self.layer_idx]
  325. value_states = curr_past_key_value.value_cache[self.layer_idx]
  326. else:
  327. key_states = self.k(current_states)
  328. value_states = self.v(current_states)
  329. key_states = key_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
  330. value_states = value_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
  331. if past_key_value is not None:
  332. # save all key/value_states to cache to be re-used for fast auto-regressive generation
  333. cache_position = cache_position if not is_cross_attention else None
  334. key_states, value_states = curr_past_key_value.update(
  335. key_states, value_states, self.layer_idx, {"cache_position": cache_position}
  336. )
  337. # set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
  338. if is_cross_attention:
  339. past_key_value.is_updated[self.layer_idx] = True
  340. # compute scores, equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9
  341. scores = torch.matmul(query_states, key_states.transpose(3, 2))
  342. if position_bias is None:
  343. key_length = key_states.shape[-2]
  344. # cache position is 0-indexed so we add 1 to get the real length of queries (aka with past)
  345. real_seq_length = query_length if query_length is not None else cache_position[-1] + 1
  346. if not self.has_relative_attention_bias:
  347. position_bias = torch.zeros(
  348. (1, self.n_heads, seq_length, key_length), device=scores.device, dtype=scores.dtype
  349. )
  350. if self.gradient_checkpointing and self.training:
  351. position_bias.requires_grad = True
  352. else:
  353. position_bias = self.compute_bias(
  354. real_seq_length, key_length, device=scores.device, cache_position=cache_position
  355. )
  356. position_bias = position_bias[:, :, -seq_length:, :]
  357. if mask is not None:
  358. causal_mask = mask[:, :, :, : key_states.shape[-2]]
  359. position_bias = position_bias + causal_mask
  360. if self.pruned_heads:
  361. mask = torch.ones(position_bias.shape[1])
  362. mask[list(self.pruned_heads)] = 0
  363. position_bias_masked = position_bias[:, mask.bool()]
  364. else:
  365. position_bias_masked = position_bias
  366. scores += position_bias_masked
  367. # (batch_size, n_heads, seq_length, key_length)
  368. attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores)
  369. attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
  370. # Mask heads if we want to
  371. if layer_head_mask is not None:
  372. attn_weights = attn_weights * layer_head_mask
  373. attn_output = torch.matmul(attn_weights, value_states)
  374. attn_output = attn_output.transpose(1, 2).contiguous()
  375. attn_output = attn_output.view(batch_size, -1, self.inner_dim)
  376. attn_output = self.o(attn_output)
  377. outputs = (attn_output, past_key_value, position_bias)
  378. if output_attentions:
  379. outputs = outputs + (attn_weights,)
  380. return outputs
  381. # Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5->MT5
  382. class MT5LayerSelfAttention(nn.Module):
  383. def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int] = None):
  384. super().__init__()
  385. self.SelfAttention = MT5Attention(
  386. config, has_relative_attention_bias=has_relative_attention_bias, layer_idx=layer_idx
  387. )
  388. self.layer_norm = MT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
  389. self.dropout = nn.Dropout(config.dropout_rate)
  390. def forward(
  391. self,
  392. hidden_states,
  393. attention_mask=None,
  394. position_bias=None,
  395. layer_head_mask=None,
  396. past_key_value=None,
  397. use_cache=False,
  398. output_attentions=False,
  399. cache_position=None,
  400. ):
  401. normed_hidden_states = self.layer_norm(hidden_states)
  402. attention_output = self.SelfAttention(
  403. normed_hidden_states,
  404. mask=attention_mask,
  405. position_bias=position_bias,
  406. layer_head_mask=layer_head_mask,
  407. past_key_value=past_key_value,
  408. use_cache=use_cache,
  409. output_attentions=output_attentions,
  410. cache_position=cache_position,
  411. )
  412. hidden_states = hidden_states + self.dropout(attention_output[0])
  413. outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
  414. return outputs
  415. # Copied from transformers.models.t5.modeling_t5.T5LayerCrossAttention with T5->MT5
  416. class MT5LayerCrossAttention(nn.Module):
  417. def __init__(self, config, layer_idx: Optional[int] = None):
  418. super().__init__()
  419. self.EncDecAttention = MT5Attention(config, has_relative_attention_bias=False, layer_idx=layer_idx)
  420. self.layer_norm = MT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
  421. self.dropout = nn.Dropout(config.dropout_rate)
  422. def forward(
  423. self,
  424. hidden_states,
  425. key_value_states,
  426. attention_mask=None,
  427. position_bias=None,
  428. layer_head_mask=None,
  429. past_key_value=None,
  430. use_cache=False,
  431. query_length=None,
  432. output_attentions=False,
  433. cache_position=None,
  434. ):
  435. normed_hidden_states = self.layer_norm(hidden_states)
  436. attention_output = self.EncDecAttention(
  437. normed_hidden_states,
  438. mask=attention_mask,
  439. key_value_states=key_value_states,
  440. position_bias=position_bias,
  441. layer_head_mask=layer_head_mask,
  442. past_key_value=past_key_value,
  443. use_cache=use_cache,
  444. query_length=query_length,
  445. output_attentions=output_attentions,
  446. cache_position=cache_position,
  447. )
  448. layer_output = hidden_states + self.dropout(attention_output[0])
  449. outputs = (layer_output,) + attention_output[1:] # add attentions if we output them
  450. return outputs
  451. # Copied from transformers.models.t5.modeling_t5.T5Block with T5->MT5
  452. class MT5Block(nn.Module):
  453. def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int] = None):
  454. super().__init__()
  455. self.is_decoder = config.is_decoder
  456. self.layer = nn.ModuleList()
  457. self.layer.append(
  458. MT5LayerSelfAttention(config, has_relative_attention_bias=has_relative_attention_bias, layer_idx=layer_idx)
  459. )
  460. if self.is_decoder:
  461. self.layer.append(MT5LayerCrossAttention(config, layer_idx=layer_idx))
  462. self.layer.append(MT5LayerFF(config))
  463. def forward(
  464. self,
  465. hidden_states,
  466. attention_mask=None,
  467. position_bias=None,
  468. encoder_hidden_states=None,
  469. encoder_attention_mask=None,
  470. encoder_decoder_position_bias=None,
  471. layer_head_mask=None,
  472. cross_attn_layer_head_mask=None,
  473. past_key_value=None,
  474. use_cache=False,
  475. output_attentions=False,
  476. return_dict=True,
  477. cache_position=None,
  478. ):
  479. self_attention_outputs = self.layer[0](
  480. hidden_states,
  481. attention_mask=attention_mask,
  482. position_bias=position_bias,
  483. layer_head_mask=layer_head_mask,
  484. past_key_value=past_key_value,
  485. use_cache=use_cache,
  486. output_attentions=output_attentions,
  487. cache_position=cache_position,
  488. )
  489. hidden_states, past_key_value = self_attention_outputs[:2]
  490. attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights
  491. # clamp inf values to enable fp16 training
  492. if hidden_states.dtype == torch.float16:
  493. clamp_value = torch.where(
  494. torch.isinf(hidden_states).any(),
  495. torch.finfo(hidden_states.dtype).max - 1000,
  496. torch.finfo(hidden_states.dtype).max,
  497. )
  498. hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
  499. do_cross_attention = self.is_decoder and encoder_hidden_states is not None
  500. if do_cross_attention:
  501. cross_attention_outputs = self.layer[1](
  502. hidden_states,
  503. key_value_states=encoder_hidden_states,
  504. attention_mask=encoder_attention_mask,
  505. position_bias=encoder_decoder_position_bias,
  506. layer_head_mask=cross_attn_layer_head_mask,
  507. past_key_value=past_key_value,
  508. query_length=cache_position[-1] + 1,
  509. use_cache=use_cache,
  510. output_attentions=output_attentions,
  511. )
  512. hidden_states, past_key_value = cross_attention_outputs[:2]
  513. # clamp inf values to enable fp16 training
  514. if hidden_states.dtype == torch.float16:
  515. clamp_value = torch.where(
  516. torch.isinf(hidden_states).any(),
  517. torch.finfo(hidden_states.dtype).max - 1000,
  518. torch.finfo(hidden_states.dtype).max,
  519. )
  520. hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
  521. # Keep cross-attention outputs and relative position weights
  522. attention_outputs = attention_outputs + cross_attention_outputs[2:]
  523. # Apply Feed Forward layer
  524. hidden_states = self.layer[-1](hidden_states)
  525. # clamp inf values to enable fp16 training
  526. if hidden_states.dtype == torch.float16:
  527. clamp_value = torch.where(
  528. torch.isinf(hidden_states).any(),
  529. torch.finfo(hidden_states.dtype).max - 1000,
  530. torch.finfo(hidden_states.dtype).max,
  531. )
  532. hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
  533. outputs = (hidden_states,)
  534. if use_cache:
  535. outputs = outputs + (past_key_value,) + attention_outputs
  536. else:
  537. outputs = outputs + attention_outputs
  538. return outputs # hidden-states, past_key_value, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
  539. def load_tf_weights_in_mt5(model, config, tf_checkpoint_path):
  540. """Load tf checkpoints in a pytorch model."""
  541. try:
  542. import re
  543. import numpy as np
  544. import tensorflow as tf
  545. except ImportError:
  546. logger.error(
  547. "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
  548. "https://www.tensorflow.org/install/ for installation instructions."
  549. )
  550. raise
  551. tf_path = os.path.abspath(tf_checkpoint_path)
  552. logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
  553. # Load weights from TF model
  554. init_vars = tf.train.list_variables(tf_path)
  555. names = []
  556. tf_weights = {}
  557. for name, shape in init_vars:
  558. logger.info(f"Loading TF weight {name} with shape {shape}")
  559. array = tf.train.load_variable(tf_path, name)
  560. names.append(name)
  561. tf_weights[name] = array
  562. for txt_name in names:
  563. name = txt_name.split("/")
  564. # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
  565. # which are not required for using pretrained model
  566. if any(
  567. n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
  568. for n in name
  569. ):
  570. logger.info(f"Skipping {'/'.join(name)}")
  571. tf_weights.pop(txt_name, None)
  572. continue
  573. if "_slot_" in name[-1]:
  574. logger.info(f"Skipping {'/'.join(name)}")
  575. tf_weights.pop(txt_name, None)
  576. continue
  577. pointer = model
  578. array = tf_weights[txt_name]
  579. for m_name in name:
  580. if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
  581. scope_names = re.split(r"_(\d+)", m_name)
  582. else:
  583. scope_names = [m_name]
  584. if scope_names[0] in ["kernel", "scale", "embedding"]:
  585. pointer = getattr(pointer, "weight")
  586. elif scope_names[0] == "self_attention":
  587. pointer = getattr(pointer, "layer")
  588. pointer = pointer[0]
  589. elif scope_names[0] == "enc_dec_attention":
  590. pointer = getattr(pointer, "layer")
  591. pointer = pointer[1]
  592. elif scope_names[0] == "dense_relu_dense":
  593. pointer = getattr(pointer, "layer")
  594. pointer = pointer[2]
  595. elif scope_names[0] == "rms_norm":
  596. if hasattr(pointer, "layer_norm"):
  597. pointer = getattr(pointer, "layer_norm")
  598. elif hasattr(pointer, "final_layer_norm"):
  599. pointer = getattr(pointer, "final_layer_norm")
  600. elif scope_names[0] == "scale":
  601. pointer = getattr(pointer, "weight")
  602. elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
  603. pointer = getattr(pointer, "bias")
  604. elif scope_names[0] == "squad":
  605. pointer = getattr(pointer, "classifier")
  606. elif scope_names[0] == "decoder" and name[1] == "logits":
  607. continue
  608. elif scope_names[0] == "logits":
  609. pointer = getattr(pointer, "lm_head")
  610. elif scope_names[0] == "wi" and len(scope_names) > 1 and scope_names[1].isdigit():
  611. pointer = getattr(pointer, f"wi_{scope_names[1]}")
  612. continue
  613. else:
  614. try:
  615. pointer = getattr(pointer, scope_names[0])
  616. except AttributeError:
  617. logger.info(f"Skipping {'/'.join(name)}")
  618. continue
  619. if len(scope_names) >= 2:
  620. num = int(scope_names[1])
  621. pointer = pointer[num]
  622. if scope_names[0] not in ["kernel", "scale", "embedding"]:
  623. pointer = getattr(pointer, "weight")
  624. if scope_names[0] != "embedding":
  625. logger.info(f"Transposing numpy weight of shape {array.shape} for {name}")
  626. array = np.transpose(array)
  627. try:
  628. assert (
  629. pointer.shape == array.shape
  630. ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
  631. except AssertionError as e:
  632. e.args += (pointer.shape, array.shape)
  633. raise
  634. logger.info(f"Initialize PyTorch weight {name}")
  635. pointer.data = torch.from_numpy(array.astype(np.float32))
  636. tf_weights.pop(txt_name, None)
  637. logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}.")
  638. return model
  639. # Copied from transformers.models.t5.modeling_t5.T5ClassificationHead with T5->MT5
  640. class MT5ClassificationHead(nn.Module):
  641. """Head for sentence-level classification tasks."""
  642. def __init__(self, config: MT5Config):
  643. super().__init__()
  644. self.dense = nn.Linear(config.d_model, config.d_model)
  645. self.dropout = nn.Dropout(p=config.classifier_dropout)
  646. self.out_proj = nn.Linear(config.d_model, config.num_labels)
  647. def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
  648. hidden_states = self.dropout(hidden_states)
  649. hidden_states = self.dense(hidden_states)
  650. hidden_states = torch.tanh(hidden_states)
  651. hidden_states = self.dropout(hidden_states)
  652. hidden_states = self.out_proj(hidden_states)
  653. return hidden_states
  654. # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel with T5->MT5, t5->mt5
  655. class MT5PreTrainedModel(PreTrainedModel):
  656. """
  657. An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
  658. models.
  659. """
  660. config_class = MT5Config
  661. load_tf_weights = load_tf_weights_in_mt5
  662. base_model_prefix = "transformer"
  663. is_parallelizable = True
  664. supports_gradient_checkpointing = True
  665. _supports_quantized_cache = False # enc-dec models don't support yet
  666. _supports_static_cache = True
  667. _supports_cache_class = True
  668. _no_split_modules = ["MT5Block"]
  669. _keep_in_fp32_modules = ["wo"]
  670. @property
  671. def dummy_inputs(self):
  672. input_ids = torch.tensor(DUMMY_INPUTS)
  673. input_mask = torch.tensor(DUMMY_MASK)
  674. dummy_inputs = {
  675. "decoder_input_ids": input_ids,
  676. "input_ids": input_ids,
  677. "decoder_attention_mask": input_mask,
  678. }
  679. return dummy_inputs
  680. def _init_weights(self, module):
  681. """Initialize the weights"""
  682. factor = self.config.initializer_factor # Used for testing weights initialization
  683. if isinstance(module, MT5LayerNorm):
  684. module.weight.data.fill_(factor * 1.0)
  685. elif isinstance(
  686. module,
  687. (MT5Model, MT5ForConditionalGeneration, MT5EncoderModel, MT5ForQuestionAnswering),
  688. ):
  689. # Mesh TensorFlow embeddings initialization
  690. # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624
  691. module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0)
  692. if hasattr(module, "lm_head") and not self.config.tie_word_embeddings:
  693. module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0)
  694. if hasattr(module, "qa_outputs"):
  695. module.qa_outputs.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
  696. module.qa_outputs.bias.data.zero_()
  697. elif isinstance(module, MT5ForTokenClassification):
  698. if hasattr(module, "classifier"):
  699. module.classifier.weight.data.normal_(mean=0.0, std=factor * 1.0)
  700. module.classifier.bias.data.zero_()
  701. elif isinstance(module, MT5ClassificationHead):
  702. module.dense.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
  703. if hasattr(module.dense, "bias") and module.dense.bias is not None:
  704. module.dense.bias.data.zero_()
  705. module.out_proj.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
  706. if hasattr(module.out_proj, "bias") and module.out_proj.bias is not None:
  707. module.out_proj.bias.data.zero_()
  708. elif isinstance(module, MT5DenseActDense):
  709. # Mesh TensorFlow FF initialization
  710. # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56
  711. # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89
  712. module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
  713. if hasattr(module.wi, "bias") and module.wi.bias is not None:
  714. module.wi.bias.data.zero_()
  715. module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
  716. if hasattr(module.wo, "bias") and module.wo.bias is not None:
  717. module.wo.bias.data.zero_()
  718. elif isinstance(module, MT5DenseGatedActDense):
  719. module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
  720. if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None:
  721. module.wi_0.bias.data.zero_()
  722. module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
  723. if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None:
  724. module.wi_1.bias.data.zero_()
  725. module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
  726. if hasattr(module.wo, "bias") and module.wo.bias is not None:
  727. module.wo.bias.data.zero_()
  728. elif isinstance(module, MT5Attention):
  729. # Mesh TensorFlow attention initialization to avoid scaling before softmax
  730. # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136
  731. d_model = self.config.d_model
  732. key_value_proj_dim = self.config.d_kv
  733. n_heads = self.config.num_heads
  734. module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5))
  735. module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
  736. module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
  737. module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5))
  738. if module.has_relative_attention_bias:
  739. module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5))
  740. def _shift_right(self, input_ids):
  741. decoder_start_token_id = self.config.decoder_start_token_id
  742. pad_token_id = self.config.pad_token_id
  743. if decoder_start_token_id is None:
  744. raise ValueError(
  745. "self.model.config.decoder_start_token_id has to be defined. In MT5 it is usually set to the pad_token_id. "
  746. "See MT5 docs for more information."
  747. )
  748. # shift inputs to the right
  749. if is_torch_fx_proxy(input_ids):
  750. # Item assignment is not supported natively for proxies.
  751. shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id)
  752. shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1)
  753. else:
  754. shifted_input_ids = input_ids.new_zeros(input_ids.shape)
  755. shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
  756. shifted_input_ids[..., 0] = decoder_start_token_id
  757. if pad_token_id is None:
  758. raise ValueError("self.model.config.pad_token_id has to be defined.")
  759. # replace possible -100 values in labels by `pad_token_id`
  760. shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
  761. return shifted_input_ids
  762. # Copied from transformers.models.t5.modeling_t5.T5Stack with T5->MT5
  763. class MT5Stack(MT5PreTrainedModel):
  764. def __init__(self, config, embed_tokens=None):
  765. super().__init__(config)
  766. self.embed_tokens = embed_tokens
  767. self.is_decoder = config.is_decoder
  768. self.block = nn.ModuleList(
  769. [MT5Block(config, has_relative_attention_bias=bool(i == 0), layer_idx=i) for i in range(config.num_layers)]
  770. )
  771. self.final_layer_norm = MT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
  772. self.dropout = nn.Dropout(config.dropout_rate)
  773. # Initialize weights and apply final processing
  774. self.post_init()
  775. # Model parallel
  776. self.model_parallel = False
  777. self.device_map = None
  778. self.gradient_checkpointing = False
  779. @add_start_docstrings(PARALLELIZE_DOCSTRING)
  780. def parallelize(self, device_map=None):
  781. warnings.warn(
  782. "`MT5Stack.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your model"
  783. " with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own"
  784. " `device_map` but it needs to be a dictionary module_name to device, so for instance {'block.0': 0,"
  785. " 'block.1': 1, ...}",
  786. FutureWarning,
  787. )
  788. # Check validity of device_map
  789. self.device_map = (
  790. get_device_map(len(self.block), range(torch.cuda.device_count())) if device_map is None else device_map
  791. )
  792. assert_device_map(self.device_map, len(self.block))
  793. self.model_parallel = True
  794. self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys()))
  795. self.last_device = "cuda:" + str(max(self.device_map.keys()))
  796. # Load onto devices
  797. for k, v in self.device_map.items():
  798. for layer in v:
  799. cuda_device = "cuda:" + str(k)
  800. self.block[layer] = self.block[layer].to(cuda_device)
  801. # Set embed_tokens to first layer
  802. self.embed_tokens = self.embed_tokens.to(self.first_device)
  803. # Set final layer norm to last device
  804. self.final_layer_norm = self.final_layer_norm.to(self.last_device)
  805. @add_start_docstrings(DEPARALLELIZE_DOCSTRING)
  806. def deparallelize(self):
  807. warnings.warn(
  808. "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.",
  809. FutureWarning,
  810. )
  811. self.model_parallel = False
  812. self.device_map = None
  813. self.first_device = "cpu"
  814. self.last_device = "cpu"
  815. for i in range(len(self.block)):
  816. self.block[i] = self.block[i].to("cpu")
  817. self.embed_tokens = self.embed_tokens.to("cpu")
  818. self.final_layer_norm = self.final_layer_norm.to("cpu")
  819. torch.cuda.empty_cache()
  820. def get_input_embeddings(self):
  821. return self.embed_tokens
  822. def set_input_embeddings(self, new_embeddings):
  823. self.embed_tokens = new_embeddings
  824. def forward(
  825. self,
  826. input_ids=None,
  827. attention_mask=None,
  828. encoder_hidden_states=None,
  829. encoder_attention_mask=None,
  830. inputs_embeds=None,
  831. head_mask=None,
  832. cross_attn_head_mask=None,
  833. past_key_values=None,
  834. use_cache=None,
  835. output_attentions=None,
  836. output_hidden_states=None,
  837. return_dict=None,
  838. cache_position=None,
  839. ):
  840. # Model parallel
  841. if self.model_parallel:
  842. torch.cuda.set_device(self.first_device)
  843. self.embed_tokens = self.embed_tokens.to(self.first_device)
  844. use_cache = use_cache if use_cache is not None else self.config.use_cache
  845. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
  846. output_hidden_states = (
  847. output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
  848. )
  849. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  850. if input_ids is not None and inputs_embeds is not None:
  851. err_msg_prefix = "decoder_" if self.is_decoder else ""
  852. raise ValueError(
  853. f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time"
  854. )
  855. elif input_ids is not None:
  856. input_shape = input_ids.size()
  857. input_ids = input_ids.view(-1, input_shape[-1])
  858. elif inputs_embeds is not None:
  859. input_shape = inputs_embeds.size()[:-1]
  860. else:
  861. err_msg_prefix = "decoder_" if self.is_decoder else ""
  862. raise ValueError(f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds")
  863. if self.gradient_checkpointing and self.training:
  864. if use_cache:
  865. logger.warning_once(
  866. "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
  867. )
  868. use_cache = False
  869. if inputs_embeds is None:
  870. if self.embed_tokens is None:
  871. raise ValueError("You have to initialize the model with valid token embeddings")
  872. inputs_embeds = self.embed_tokens(input_ids)
  873. batch_size, seq_length = input_shape
  874. if use_cache is True:
  875. if not self.is_decoder:
  876. raise ValueError(f"`use_cache` can only be set to `True` if {self} is used as a decoder")
  877. # initialize past_key_values
  878. return_legacy_cache = False
  879. return_self_attention_cache = False
  880. if self.is_decoder and (use_cache or past_key_values is not None):
  881. if isinstance(past_key_values, Cache) and not isinstance(past_key_values, EncoderDecoderCache):
  882. return_self_attention_cache = True
  883. past_key_values = EncoderDecoderCache(past_key_values, DynamicCache())
  884. elif not isinstance(past_key_values, EncoderDecoderCache):
  885. return_legacy_cache = True
  886. logger.warning_once(
  887. "Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.48.0. "
  888. "You should pass an instance of `EncoderDecoderCache` instead, e.g. "
  889. "`past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`."
  890. )
  891. past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values)
  892. elif past_key_values is None:
  893. past_key_values = EncoderDecoderCache(DynamicCache(), DynamicCache())
  894. elif not self.is_decoder:
  895. # do not pass cache object down the line for encoder stack
  896. # it messes indexing later in decoder-stack because cache object is modified in-place
  897. past_key_values = None
  898. past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
  899. if cache_position is None:
  900. cache_position = torch.arange(
  901. past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device
  902. )
  903. if attention_mask is None and not is_torchdynamo_compiling():
  904. # required mask seq length can be calculated via length of past cache
  905. mask_seq_length = past_key_values_length + seq_length
  906. attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device)
  907. if self.config.is_decoder:
  908. causal_mask = self._update_causal_mask(
  909. attention_mask,
  910. inputs_embeds,
  911. cache_position,
  912. past_key_values.self_attention_cache if past_key_values is not None else None,
  913. output_attentions,
  914. )
  915. elif attention_mask is not None:
  916. causal_mask = attention_mask[:, None, None, :]
  917. causal_mask = causal_mask.to(dtype=inputs_embeds.dtype)
  918. causal_mask = (1.0 - causal_mask) * torch.finfo(inputs_embeds.dtype).min
  919. else:
  920. causal_mask = None
  921. # If a 2D or 3D attention mask is provided for the cross-attention
  922. # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
  923. if self.is_decoder and encoder_hidden_states is not None:
  924. encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
  925. encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
  926. if encoder_attention_mask is None:
  927. encoder_attention_mask = torch.ones(
  928. encoder_hidden_shape, device=inputs_embeds.device, dtype=torch.long
  929. )
  930. encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
  931. else:
  932. encoder_extended_attention_mask = None
  933. # Prepare head mask if needed
  934. head_mask = self.get_head_mask(head_mask, self.config.num_layers)
  935. cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers)
  936. all_hidden_states = () if output_hidden_states else None
  937. all_attentions = () if output_attentions else None
  938. all_cross_attentions = () if (output_attentions and self.is_decoder) else None
  939. position_bias = None
  940. encoder_decoder_position_bias = None
  941. hidden_states = self.dropout(inputs_embeds)
  942. for i, layer_module in enumerate(self.block):
  943. layer_head_mask = head_mask[i]
  944. cross_attn_layer_head_mask = cross_attn_head_mask[i]
  945. # Model parallel
  946. if self.model_parallel:
  947. torch.cuda.set_device(hidden_states.device)
  948. # Ensure that attention_mask is always on the same device as hidden_states
  949. if causal_mask is not None:
  950. causal_mask = causal_mask.to(hidden_states.device)
  951. if position_bias is not None:
  952. position_bias = position_bias.to(hidden_states.device)
  953. if encoder_hidden_states is not None:
  954. encoder_hidden_states = encoder_hidden_states.to(hidden_states.device)
  955. if encoder_extended_attention_mask is not None:
  956. encoder_extended_attention_mask = encoder_extended_attention_mask.to(hidden_states.device)
  957. if encoder_decoder_position_bias is not None:
  958. encoder_decoder_position_bias = encoder_decoder_position_bias.to(hidden_states.device)
  959. if layer_head_mask is not None:
  960. layer_head_mask = layer_head_mask.to(hidden_states.device)
  961. if cross_attn_layer_head_mask is not None:
  962. cross_attn_layer_head_mask = cross_attn_layer_head_mask.to(hidden_states.device)
  963. if output_hidden_states:
  964. all_hidden_states = all_hidden_states + (hidden_states,)
  965. if self.gradient_checkpointing and self.training:
  966. layer_outputs = self._gradient_checkpointing_func(
  967. layer_module.forward,
  968. hidden_states,
  969. causal_mask,
  970. position_bias,
  971. encoder_hidden_states,
  972. encoder_extended_attention_mask,
  973. encoder_decoder_position_bias,
  974. layer_head_mask,
  975. cross_attn_layer_head_mask,
  976. None, # past_key_value is always None with gradient checkpointing
  977. use_cache,
  978. output_attentions,
  979. return_dict,
  980. cache_position,
  981. )
  982. else:
  983. layer_outputs = layer_module(
  984. hidden_states,
  985. attention_mask=causal_mask,
  986. position_bias=position_bias,
  987. encoder_hidden_states=encoder_hidden_states,
  988. encoder_attention_mask=encoder_extended_attention_mask,
  989. encoder_decoder_position_bias=encoder_decoder_position_bias,
  990. layer_head_mask=layer_head_mask,
  991. cross_attn_layer_head_mask=cross_attn_layer_head_mask,
  992. past_key_value=past_key_values,
  993. use_cache=use_cache,
  994. output_attentions=output_attentions,
  995. return_dict=return_dict,
  996. cache_position=cache_position,
  997. )
  998. # layer_outputs is a tuple with:
  999. # hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
  1000. if use_cache is False:
  1001. layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:]
  1002. hidden_states, next_decoder_cache = layer_outputs[:2]
  1003. # We share the position biases between the layers - the first layer store them
  1004. # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights),
  1005. # (cross-attention position bias), (cross-attention weights)
  1006. position_bias = layer_outputs[2]
  1007. if self.is_decoder and encoder_hidden_states is not None:
  1008. encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3]
  1009. if output_attentions:
  1010. all_attentions = all_attentions + (layer_outputs[3],)
  1011. if self.is_decoder:
  1012. all_cross_attentions = all_cross_attentions + (layer_outputs[5],)
  1013. # Model Parallel: If it's the last layer for that device, put things on the next device
  1014. if self.model_parallel:
  1015. for k, v in self.device_map.items():
  1016. if i == v[-1] and "cuda:" + str(k) != self.last_device:
  1017. hidden_states = hidden_states.to("cuda:" + str(k + 1))
  1018. hidden_states = self.final_layer_norm(hidden_states)
  1019. hidden_states = self.dropout(hidden_states)
  1020. # Add last layer
  1021. if output_hidden_states:
  1022. all_hidden_states = all_hidden_states + (hidden_states,)
  1023. next_cache = next_decoder_cache if use_cache else None
  1024. if return_self_attention_cache:
  1025. next_cache = past_key_values.self_attention_cache
  1026. if return_legacy_cache:
  1027. next_cache = past_key_values.to_legacy_cache()
  1028. if not return_dict:
  1029. return tuple(
  1030. v
  1031. for v in [
  1032. hidden_states,
  1033. next_cache,
  1034. all_hidden_states,
  1035. all_attentions,
  1036. all_cross_attentions,
  1037. ]
  1038. if v is not None
  1039. )
  1040. return BaseModelOutputWithPastAndCrossAttentions(
  1041. last_hidden_state=hidden_states,
  1042. past_key_values=next_cache,
  1043. hidden_states=all_hidden_states,
  1044. attentions=all_attentions,
  1045. cross_attentions=all_cross_attentions,
  1046. )
  1047. # Copied from transformers.models.llama.modeling_llama.LlamaModel._update_causal_mask
  1048. def _update_causal_mask(
  1049. self,
  1050. attention_mask: torch.Tensor,
  1051. input_tensor: torch.Tensor,
  1052. cache_position: torch.Tensor,
  1053. past_key_values: Cache,
  1054. output_attentions: bool,
  1055. ):
  1056. if self.config._attn_implementation == "flash_attention_2":
  1057. if attention_mask is not None and 0.0 in attention_mask:
  1058. return attention_mask
  1059. return None
  1060. # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
  1061. # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
  1062. # to infer the attention mask.
  1063. past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
  1064. using_static_cache = isinstance(past_key_values, StaticCache)
  1065. # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
  1066. if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
  1067. if AttentionMaskConverter._ignore_causal_mask_sdpa(
  1068. attention_mask,
  1069. inputs_embeds=input_tensor,
  1070. past_key_values_length=past_seen_tokens,
  1071. is_training=self.training,
  1072. ):
  1073. return None
  1074. dtype, device = input_tensor.dtype, input_tensor.device
  1075. sequence_length = input_tensor.shape[1]
  1076. if using_static_cache:
  1077. target_length = past_key_values.get_max_cache_shape()
  1078. else:
  1079. target_length = (
  1080. attention_mask.shape[-1]
  1081. if isinstance(attention_mask, torch.Tensor)
  1082. else past_seen_tokens + sequence_length + 1
  1083. )
  1084. # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
  1085. causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
  1086. attention_mask,
  1087. sequence_length=sequence_length,
  1088. target_length=target_length,
  1089. dtype=dtype,
  1090. device=device,
  1091. cache_position=cache_position,
  1092. batch_size=input_tensor.shape[0],
  1093. )
  1094. if (
  1095. self.config._attn_implementation == "sdpa"
  1096. and attention_mask is not None
  1097. and attention_mask.device.type == "cuda"
  1098. and not output_attentions
  1099. ):
  1100. # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
  1101. # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
  1102. # Details: https://github.com/pytorch/pytorch/issues/110213
  1103. min_dtype = torch.finfo(dtype).min
  1104. causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
  1105. return causal_mask
  1106. @staticmethod
  1107. # Copied from transformers.models.llama.modeling_llama.LlamaPreTrainedModel._prepare_4d_causal_attention_mask_with_cache_position
  1108. def _prepare_4d_causal_attention_mask_with_cache_position(
  1109. attention_mask: torch.Tensor,
  1110. sequence_length: int,
  1111. target_length: int,
  1112. dtype: torch.dtype,
  1113. device: torch.device,
  1114. cache_position: torch.Tensor,
  1115. batch_size: int,
  1116. **kwargs,
  1117. ):
  1118. """
  1119. Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
  1120. `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
  1121. Args:
  1122. attention_mask (`torch.Tensor`):
  1123. A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
  1124. `(batch_size, 1, query_length, key_value_length)`.
  1125. sequence_length (`int`):
  1126. The sequence length being processed.
  1127. target_length (`int`):
  1128. The target length: when generating with static cache, the mask should be as long as the static cache,
  1129. to account for the 0 padding, the part of the cache that is not filled yet.
  1130. dtype (`torch.dtype`):
  1131. The dtype to use for the 4D attention mask.
  1132. device (`torch.device`):
  1133. The device to plcae the 4D attention mask on.
  1134. cache_position (`torch.Tensor`):
  1135. Indices depicting the position of the input sequence tokens in the sequence.
  1136. batch_size (`torch.Tensor`):
  1137. Batch size.
  1138. """
  1139. if attention_mask is not None and attention_mask.dim() == 4:
  1140. # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
  1141. causal_mask = attention_mask
  1142. else:
  1143. min_dtype = torch.finfo(dtype).min
  1144. causal_mask = torch.full(
  1145. (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
  1146. )
  1147. if sequence_length != 1:
  1148. causal_mask = torch.triu(causal_mask, diagonal=1)
  1149. causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
  1150. causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
  1151. if attention_mask is not None:
  1152. causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
  1153. mask_length = attention_mask.shape[-1]
  1154. padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
  1155. padding_mask = padding_mask == 0
  1156. causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
  1157. padding_mask, min_dtype
  1158. )
  1159. return causal_mask
  1160. MT5_START_DOCSTRING = r"""
  1161. The MT5 model was proposed in [Exploring the Limits of Transfer Learning with a Unified Text-to-Text
  1162. Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan
  1163. Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It's an encoder decoder transformer pre-trained in a
  1164. text-to-text denoising generative setting.
  1165. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
  1166. library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
  1167. etc.)
  1168. This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
  1169. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
  1170. and behavior.
  1171. Parameters:
  1172. config ([`MT5Config`]): Model configuration class with all the parameters of the model.
  1173. Initializing with a config file does not load the weights associated with the model, only the
  1174. configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
  1175. """
  1176. MT5_INPUTS_DOCSTRING = r"""
  1177. Args:
  1178. input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
  1179. Indices of input sequence tokens in the vocabulary. MT5 is a model with relative position embeddings so you
  1180. should be able to pad the inputs on both the right and the left.
  1181. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
  1182. [`PreTrainedTokenizer.__call__`] for detail.
  1183. [What are input IDs?](../glossary#input-ids)
  1184. To know more on how to prepare `input_ids` for pretraining take a look a [MT5 Training](./mt5#training).
  1185. attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
  1186. Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
  1187. - 1 for tokens that are **not masked**,
  1188. - 0 for tokens that are **masked**.
  1189. [What are attention masks?](../glossary#attention-mask)
  1190. decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
  1191. Indices of decoder input sequence tokens in the vocabulary.
  1192. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
  1193. [`PreTrainedTokenizer.__call__`] for details.
  1194. [What are decoder input IDs?](../glossary#decoder-input-ids)
  1195. MT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
  1196. is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
  1197. To know more on how to prepare `decoder_input_ids` for pretraining take a look at [MT5
  1198. Training](./mt5#training).
  1199. decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
  1200. Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
  1201. be used by default.
  1202. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
  1203. Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0,
  1204. 1]`:
  1205. - 1 indicates the head is **not masked**,
  1206. - 0 indicates the head is **masked**.
  1207. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
  1208. Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0,
  1209. 1]`:
  1210. - 1 indicates the head is **not masked**,
  1211. - 0 indicates the head is **masked**.
  1212. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
  1213. Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in
  1214. `[0, 1]`:
  1215. - 1 indicates the head is **not masked**,
  1216. - 0 indicates the head is **masked**.
  1217. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
  1218. Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*)
  1219. `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at
  1220. the output of the last layer of the encoder. Used in the cross-attention of the decoder.
  1221. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
  1222. Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
  1223. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
  1224. don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
  1225. `decoder_input_ids` of shape `(batch_size, sequence_length)`.
  1226. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
  1227. Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
  1228. is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
  1229. model's internal embedding lookup matrix.
  1230. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
  1231. Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
  1232. representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
  1233. input (see `past_key_values`). This is useful if you want more control over how to convert
  1234. `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
  1235. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
  1236. of `inputs_embeds`.
  1237. use_cache (`bool`, *optional*):
  1238. If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
  1239. `past_key_values`).
  1240. output_attentions (`bool`, *optional*):
  1241. Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
  1242. tensors for more detail.
  1243. output_hidden_states (`bool`, *optional*):
  1244. Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
  1245. more detail.
  1246. return_dict (`bool`, *optional*):
  1247. Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
  1248. """
  1249. MT5_ENCODER_INPUTS_DOCSTRING = r"""
  1250. Args:
  1251. input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
  1252. Indices of input sequence tokens in the vocabulary. MT5 is a model with relative position embeddings so you
  1253. should be able to pad the inputs on both the right and the left.
  1254. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
  1255. [`PreTrainedTokenizer.__call__`] for detail.
  1256. To know more on how to prepare `input_ids` for pretraining take a look a [MT5 Training](./mt5#training).
  1257. attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
  1258. Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
  1259. - 1 for tokens that are **not masked**,
  1260. - 0 for tokens that are **masked**.
  1261. [What are attention masks?](../glossary#attention-mask)
  1262. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
  1263. Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
  1264. - 1 indicates the head is **not masked**,
  1265. - 0 indicates the head is **masked**.
  1266. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
  1267. Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
  1268. is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
  1269. model's internal embedding lookup matrix.
  1270. output_attentions (`bool`, *optional*):
  1271. Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
  1272. tensors for more detail.
  1273. output_hidden_states (`bool`, *optional*):
  1274. Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
  1275. more detail.
  1276. return_dict (`bool`, *optional*):
  1277. Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
  1278. """
  1279. # Warning message for FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
  1280. __HEAD_MASK_WARNING_MSG = """
  1281. The input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently,
  1282. `decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated and will be removed in future versions.
  1283. If you do not want to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers,
  1284. num_heads)`.
  1285. """
  1286. @add_start_docstrings(
  1287. "The bare MT5 Model transformer outputting raw hidden-states without any specific head on top.",
  1288. MT5_START_DOCSTRING,
  1289. )
  1290. class MT5Model(MT5PreTrainedModel):
  1291. r"""
  1292. Examples:
  1293. ```python
  1294. >>> from transformers import MT5Model, AutoTokenizer
  1295. >>> model = MT5Model.from_pretrained("google/mt5-small")
  1296. >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
  1297. >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien."
  1298. >>> summary = "Weiter Verhandlung in Syrien."
  1299. >>> inputs = tokenizer(article, return_tensors="pt")
  1300. >>> labels = tokenizer(text_target=summary, return_tensors="pt")
  1301. >>> outputs = model(input_ids=inputs["input_ids"], decoder_input_ids=labels["input_ids"])
  1302. >>> hidden_states = outputs.last_hidden_state
  1303. ```"""
  1304. model_type = "mt5"
  1305. config_class = MT5Config
  1306. _keys_to_ignore_on_load_unexpected = ["decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight"]
  1307. _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"]
  1308. # Copied from transformers.models.t5.modeling_t5.T5Model.__init__ with T5->MT5
  1309. def __init__(self, config: MT5Config):
  1310. super().__init__(config)
  1311. self.shared = nn.Embedding(config.vocab_size, config.d_model)
  1312. encoder_config = copy.deepcopy(config)
  1313. encoder_config.is_decoder = False
  1314. encoder_config.use_cache = False
  1315. encoder_config.is_encoder_decoder = False
  1316. self.encoder = MT5Stack(encoder_config, self.shared)
  1317. decoder_config = copy.deepcopy(config)
  1318. decoder_config.is_decoder = True
  1319. decoder_config.is_encoder_decoder = False
  1320. decoder_config.num_layers = config.num_decoder_layers
  1321. self.decoder = MT5Stack(decoder_config, self.shared)
  1322. # Initialize weights and apply final processing
  1323. self.post_init()
  1324. # Model parallel
  1325. self.model_parallel = False
  1326. self.device_map = None
  1327. @add_start_docstrings(PARALLELIZE_DOCSTRING)
  1328. # Copied from transformers.models.t5.modeling_t5.T5Model.parallelize
  1329. def parallelize(self, device_map=None):
  1330. warnings.warn(
  1331. "`T5Model.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your model"
  1332. " with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own"
  1333. " `device_map` but it needs to be a dictionary module_name to device, so for instance {'encoder.block.0':"
  1334. " 0, 'encoder.block.1': 1, ...}",
  1335. FutureWarning,
  1336. )
  1337. self.device_map = (
  1338. get_device_map(len(self.encoder.block), range(torch.cuda.device_count()))
  1339. if device_map is None
  1340. else device_map
  1341. )
  1342. assert_device_map(self.device_map, len(self.encoder.block))
  1343. self.encoder.parallelize(self.device_map)
  1344. self.decoder.parallelize(self.device_map)
  1345. self.model_parallel = True
  1346. @add_start_docstrings(DEPARALLELIZE_DOCSTRING)
  1347. # Copied from transformers.models.t5.modeling_t5.T5Model.deparallelize
  1348. def deparallelize(self):
  1349. warnings.warn(
  1350. "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.",
  1351. FutureWarning,
  1352. )
  1353. self.encoder.deparallelize()
  1354. self.decoder.deparallelize()
  1355. self.encoder = self.encoder.to("cpu")
  1356. self.decoder = self.decoder.to("cpu")
  1357. self.model_parallel = False
  1358. self.device_map = None
  1359. torch.cuda.empty_cache()
  1360. # Copied from transformers.models.t5.modeling_t5.T5Model.get_input_embeddings
  1361. def get_input_embeddings(self):
  1362. return self.shared
  1363. # Copied from transformers.models.t5.modeling_t5.T5Model.set_input_embeddings
  1364. def set_input_embeddings(self, new_embeddings):
  1365. self.shared = new_embeddings
  1366. self.encoder.set_input_embeddings(new_embeddings)
  1367. self.decoder.set_input_embeddings(new_embeddings)
  1368. # Copied from transformers.models.t5.modeling_t5.T5Model.get_encoder
  1369. def get_encoder(self):
  1370. return self.encoder
  1371. # Copied from transformers.models.t5.modeling_t5.T5Model.get_decoder
  1372. def get_decoder(self):
  1373. return self.decoder
  1374. # Copied from transformers.models.t5.modeling_t5.T5Model._prune_heads
  1375. def _prune_heads(self, heads_to_prune):
  1376. """
  1377. Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
  1378. class PreTrainedModel
  1379. """
  1380. for layer, heads in heads_to_prune.items():
  1381. self.encoder.layer[layer].attention.prune_heads(heads)
  1382. @add_start_docstrings_to_model_forward(MT5_INPUTS_DOCSTRING)
  1383. @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
  1384. # Copied from transformers.models.t5.modeling_t5.T5Model.forward with google-t5/->google/, T5->MT5, t5->mt5
  1385. def forward(
  1386. self,
  1387. input_ids: Optional[torch.LongTensor] = None,
  1388. attention_mask: Optional[torch.FloatTensor] = None,
  1389. decoder_input_ids: Optional[torch.LongTensor] = None,
  1390. decoder_attention_mask: Optional[torch.BoolTensor] = None,
  1391. head_mask: Optional[torch.FloatTensor] = None,
  1392. decoder_head_mask: Optional[torch.FloatTensor] = None,
  1393. cross_attn_head_mask: Optional[torch.Tensor] = None,
  1394. encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
  1395. past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
  1396. inputs_embeds: Optional[torch.Tensor] = None,
  1397. decoder_inputs_embeds: Optional[torch.Tensor] = None,
  1398. use_cache: Optional[bool] = None,
  1399. output_attentions: Optional[bool] = None,
  1400. output_hidden_states: Optional[bool] = None,
  1401. return_dict: Optional[bool] = None,
  1402. cache_position: Optional[torch.LongTensor] = None,
  1403. ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]:
  1404. r"""
  1405. Returns:
  1406. Example:
  1407. ```python
  1408. >>> from transformers import AutoTokenizer, MT5Model
  1409. >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
  1410. >>> model = MT5Model.from_pretrained("google/mt5-small")
  1411. >>> input_ids = tokenizer(
  1412. ... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
  1413. ... ).input_ids # Batch size 1
  1414. >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
  1415. >>> # preprocess: Prepend decoder_input_ids with start token which is pad token for MT5Model.
  1416. >>> # This is not needed for torch's MT5ForConditionalGeneration as it does this internally using labels arg.
  1417. >>> decoder_input_ids = model._shift_right(decoder_input_ids)
  1418. >>> # forward pass
  1419. >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
  1420. >>> last_hidden_states = outputs.last_hidden_state
  1421. ```"""
  1422. use_cache = use_cache if use_cache is not None else self.config.use_cache
  1423. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1424. # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
  1425. if head_mask is not None and decoder_head_mask is None:
  1426. if self.config.num_layers == self.config.num_decoder_layers:
  1427. warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
  1428. decoder_head_mask = head_mask
  1429. # Encode if needed (training, first prediction pass)
  1430. if encoder_outputs is None:
  1431. encoder_outputs = self.encoder(
  1432. input_ids=input_ids,
  1433. attention_mask=attention_mask,
  1434. inputs_embeds=inputs_embeds,
  1435. head_mask=head_mask,
  1436. output_attentions=output_attentions,
  1437. output_hidden_states=output_hidden_states,
  1438. return_dict=return_dict,
  1439. )
  1440. elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
  1441. encoder_outputs = BaseModelOutput(
  1442. last_hidden_state=encoder_outputs[0],
  1443. hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
  1444. attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
  1445. )
  1446. hidden_states = encoder_outputs[0]
  1447. # Set device for model parallelism
  1448. if self.model_parallel:
  1449. torch.cuda.set_device(self.decoder.first_device)
  1450. hidden_states = hidden_states.to(self.decoder.first_device)
  1451. if decoder_input_ids is not None:
  1452. decoder_input_ids = decoder_input_ids.to(self.decoder.first_device)
  1453. if attention_mask is not None:
  1454. attention_mask = attention_mask.to(self.decoder.first_device)
  1455. if decoder_attention_mask is not None:
  1456. decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device)
  1457. # Decode
  1458. decoder_outputs = self.decoder(
  1459. input_ids=decoder_input_ids,
  1460. attention_mask=decoder_attention_mask,
  1461. inputs_embeds=decoder_inputs_embeds,
  1462. past_key_values=past_key_values,
  1463. encoder_hidden_states=hidden_states,
  1464. encoder_attention_mask=attention_mask,
  1465. head_mask=decoder_head_mask,
  1466. cross_attn_head_mask=cross_attn_head_mask,
  1467. use_cache=use_cache,
  1468. output_attentions=output_attentions,
  1469. output_hidden_states=output_hidden_states,
  1470. return_dict=return_dict,
  1471. cache_position=cache_position,
  1472. )
  1473. if not return_dict:
  1474. return decoder_outputs + encoder_outputs
  1475. return Seq2SeqModelOutput(
  1476. last_hidden_state=decoder_outputs.last_hidden_state,
  1477. past_key_values=decoder_outputs.past_key_values,
  1478. decoder_hidden_states=decoder_outputs.hidden_states,
  1479. decoder_attentions=decoder_outputs.attentions,
  1480. cross_attentions=decoder_outputs.cross_attentions,
  1481. encoder_last_hidden_state=encoder_outputs.last_hidden_state,
  1482. encoder_hidden_states=encoder_outputs.hidden_states,
  1483. encoder_attentions=encoder_outputs.attentions,
  1484. )
  1485. @add_start_docstrings("""MT5 Model with a `language modeling` head on top.""", MT5_START_DOCSTRING)
  1486. class MT5ForConditionalGeneration(MT5PreTrainedModel, GenerationMixin):
  1487. r"""
  1488. Examples:
  1489. ```python
  1490. >>> from transformers import MT5ForConditionalGeneration, AutoTokenizer
  1491. >>> model = MT5ForConditionalGeneration.from_pretrained("google/mt5-small")
  1492. >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
  1493. >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien."
  1494. >>> summary = "Weiter Verhandlung in Syrien."
  1495. >>> inputs = tokenizer(article, text_target=summary, return_tensors="pt")
  1496. >>> outputs = model(**inputs)
  1497. >>> loss = outputs.loss
  1498. ```"""
  1499. model_type = "mt5"
  1500. config_class = MT5Config
  1501. _keys_to_ignore_on_load_unexpected = ["decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight"]
  1502. _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"]
  1503. # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.__init__ with T5->MT5
  1504. def __init__(self, config: MT5Config):
  1505. super().__init__(config)
  1506. self.model_dim = config.d_model
  1507. self.shared = nn.Embedding(config.vocab_size, config.d_model)
  1508. encoder_config = copy.deepcopy(config)
  1509. encoder_config.is_decoder = False
  1510. encoder_config.use_cache = False
  1511. encoder_config.is_encoder_decoder = False
  1512. self.encoder = MT5Stack(encoder_config, self.shared)
  1513. decoder_config = copy.deepcopy(config)
  1514. decoder_config.is_decoder = True
  1515. decoder_config.is_encoder_decoder = False
  1516. decoder_config.num_layers = config.num_decoder_layers
  1517. self.decoder = MT5Stack(decoder_config, self.shared)
  1518. self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
  1519. # Initialize weights and apply final processing
  1520. self.post_init()
  1521. # Model parallel
  1522. self.model_parallel = False
  1523. self.device_map = None
  1524. @add_start_docstrings(PARALLELIZE_DOCSTRING)
  1525. # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.parallelize
  1526. def parallelize(self, device_map=None):
  1527. warnings.warn(
  1528. "`T5ForConditionalGeneration.parallelize` is deprecated and will be removed in v5 of Transformers, you"
  1529. " should load your model with `device_map='balanced'` in the call to `from_pretrained`. You can also"
  1530. " provide your own `device_map` but it needs to be a dictionary module_name to device, so for instance"
  1531. " {'encoder.block.0': 0, 'encoder.block.1': 1, ...}",
  1532. FutureWarning,
  1533. )
  1534. self.device_map = (
  1535. get_device_map(len(self.encoder.block), range(torch.cuda.device_count()))
  1536. if device_map is None
  1537. else device_map
  1538. )
  1539. assert_device_map(self.device_map, len(self.encoder.block))
  1540. self.encoder.parallelize(self.device_map)
  1541. self.decoder.parallelize(self.device_map)
  1542. self.lm_head = self.lm_head.to(self.decoder.first_device)
  1543. self.model_parallel = True
  1544. @add_start_docstrings(DEPARALLELIZE_DOCSTRING)
  1545. # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.deparallelize
  1546. def deparallelize(self):
  1547. warnings.warn(
  1548. "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.",
  1549. FutureWarning,
  1550. )
  1551. self.encoder.deparallelize()
  1552. self.decoder.deparallelize()
  1553. self.encoder = self.encoder.to("cpu")
  1554. self.decoder = self.decoder.to("cpu")
  1555. self.lm_head = self.lm_head.to("cpu")
  1556. self.model_parallel = False
  1557. self.device_map = None
  1558. torch.cuda.empty_cache()
  1559. # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.get_input_embeddings
  1560. def get_input_embeddings(self):
  1561. return self.shared
  1562. # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.set_input_embeddings
  1563. def set_input_embeddings(self, new_embeddings):
  1564. self.shared = new_embeddings
  1565. self.encoder.set_input_embeddings(new_embeddings)
  1566. self.decoder.set_input_embeddings(new_embeddings)
  1567. # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.set_output_embeddings
  1568. def set_output_embeddings(self, new_embeddings):
  1569. self.lm_head = new_embeddings
  1570. # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.get_output_embeddings
  1571. def get_output_embeddings(self):
  1572. return self.lm_head
  1573. # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.get_encoder
  1574. def get_encoder(self):
  1575. return self.encoder
  1576. # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.get_decoder
  1577. def get_decoder(self):
  1578. return self.decoder
  1579. @add_start_docstrings_to_model_forward(MT5_INPUTS_DOCSTRING)
  1580. @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
  1581. # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.forward with google-t5/->google/, T5->MT5, t5->mt5
  1582. def forward(
  1583. self,
  1584. input_ids: Optional[torch.LongTensor] = None,
  1585. attention_mask: Optional[torch.FloatTensor] = None,
  1586. decoder_input_ids: Optional[torch.LongTensor] = None,
  1587. decoder_attention_mask: Optional[torch.BoolTensor] = None,
  1588. head_mask: Optional[torch.FloatTensor] = None,
  1589. decoder_head_mask: Optional[torch.FloatTensor] = None,
  1590. cross_attn_head_mask: Optional[torch.Tensor] = None,
  1591. encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None,
  1592. past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
  1593. inputs_embeds: Optional[torch.FloatTensor] = None,
  1594. decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
  1595. labels: Optional[torch.LongTensor] = None,
  1596. use_cache: Optional[bool] = None,
  1597. output_attentions: Optional[bool] = None,
  1598. output_hidden_states: Optional[bool] = None,
  1599. return_dict: Optional[bool] = None,
  1600. cache_position: Optional[torch.LongTensor] = None,
  1601. ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:
  1602. r"""
  1603. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  1604. Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ...,
  1605. config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for
  1606. labels in `[0, ..., config.vocab_size]`
  1607. Returns:
  1608. Examples:
  1609. ```python
  1610. >>> from transformers import AutoTokenizer, MT5ForConditionalGeneration
  1611. >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
  1612. >>> model = MT5ForConditionalGeneration.from_pretrained("google/mt5-small")
  1613. >>> # training
  1614. >>> input_ids = tokenizer("The <extra_id_0> walks in <extra_id_1> park", return_tensors="pt").input_ids
  1615. >>> labels = tokenizer("<extra_id_0> cute dog <extra_id_1> the <extra_id_2>", return_tensors="pt").input_ids
  1616. >>> outputs = model(input_ids=input_ids, labels=labels)
  1617. >>> loss = outputs.loss
  1618. >>> logits = outputs.logits
  1619. >>> # inference
  1620. >>> input_ids = tokenizer(
  1621. ... "summarize: studies have shown that owning a dog is good for you", return_tensors="pt"
  1622. ... ).input_ids # Batch size 1
  1623. >>> outputs = model.generate(input_ids)
  1624. >>> print(tokenizer.decode(outputs[0], skip_special_tokens=True))
  1625. >>> # studies have shown that owning a dog is good for you.
  1626. ```"""
  1627. use_cache = use_cache if use_cache is not None else self.config.use_cache
  1628. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1629. # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
  1630. if head_mask is not None and decoder_head_mask is None:
  1631. if self.config.num_layers == self.config.num_decoder_layers:
  1632. warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
  1633. decoder_head_mask = head_mask
  1634. # Encode if needed (training, first prediction pass)
  1635. if encoder_outputs is None:
  1636. # Convert encoder inputs in embeddings if needed
  1637. encoder_outputs = self.encoder(
  1638. input_ids=input_ids,
  1639. attention_mask=attention_mask,
  1640. inputs_embeds=inputs_embeds,
  1641. head_mask=head_mask,
  1642. output_attentions=output_attentions,
  1643. output_hidden_states=output_hidden_states,
  1644. return_dict=return_dict,
  1645. )
  1646. elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
  1647. encoder_outputs = BaseModelOutput(
  1648. last_hidden_state=encoder_outputs[0],
  1649. hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
  1650. attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
  1651. )
  1652. hidden_states = encoder_outputs[0]
  1653. if self.model_parallel:
  1654. torch.cuda.set_device(self.decoder.first_device)
  1655. if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
  1656. # get decoder inputs from shifting lm labels to the right
  1657. decoder_input_ids = self._shift_right(labels)
  1658. # Set device for model parallelism
  1659. if self.model_parallel:
  1660. torch.cuda.set_device(self.decoder.first_device)
  1661. hidden_states = hidden_states.to(self.decoder.first_device)
  1662. if decoder_input_ids is not None:
  1663. decoder_input_ids = decoder_input_ids.to(self.decoder.first_device)
  1664. if attention_mask is not None:
  1665. attention_mask = attention_mask.to(self.decoder.first_device)
  1666. if decoder_attention_mask is not None:
  1667. decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device)
  1668. # Decode
  1669. decoder_outputs = self.decoder(
  1670. input_ids=decoder_input_ids,
  1671. attention_mask=decoder_attention_mask,
  1672. inputs_embeds=decoder_inputs_embeds,
  1673. past_key_values=past_key_values,
  1674. encoder_hidden_states=hidden_states,
  1675. encoder_attention_mask=attention_mask,
  1676. head_mask=decoder_head_mask,
  1677. cross_attn_head_mask=cross_attn_head_mask,
  1678. use_cache=use_cache,
  1679. output_attentions=output_attentions,
  1680. output_hidden_states=output_hidden_states,
  1681. return_dict=return_dict,
  1682. cache_position=cache_position,
  1683. )
  1684. sequence_output = decoder_outputs[0]
  1685. # Set device for model parallelism
  1686. if self.model_parallel:
  1687. torch.cuda.set_device(self.encoder.first_device)
  1688. self.lm_head = self.lm_head.to(self.encoder.first_device)
  1689. sequence_output = sequence_output.to(self.lm_head.weight.device)
  1690. if self.config.tie_word_embeddings:
  1691. # Rescale output before projecting on vocab
  1692. # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
  1693. sequence_output = sequence_output * (self.model_dim**-0.5)
  1694. lm_logits = self.lm_head(sequence_output)
  1695. loss = None
  1696. if labels is not None:
  1697. loss_fct = CrossEntropyLoss(ignore_index=-100)
  1698. # move labels to correct device to enable PP
  1699. labels = labels.to(lm_logits.device)
  1700. loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
  1701. # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666
  1702. if not return_dict:
  1703. output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs
  1704. return ((loss,) + output) if loss is not None else output
  1705. return Seq2SeqLMOutput(
  1706. loss=loss,
  1707. logits=lm_logits,
  1708. past_key_values=decoder_outputs.past_key_values,
  1709. decoder_hidden_states=decoder_outputs.hidden_states,
  1710. decoder_attentions=decoder_outputs.attentions,
  1711. cross_attentions=decoder_outputs.cross_attentions,
  1712. encoder_last_hidden_state=encoder_outputs.last_hidden_state,
  1713. encoder_hidden_states=encoder_outputs.hidden_states,
  1714. encoder_attentions=encoder_outputs.attentions,
  1715. )
  1716. # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.prepare_decoder_input_ids_from_labels
  1717. def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
  1718. return self._shift_right(labels)
  1719. # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration._reorder_cache
  1720. def _reorder_cache(self, past_key_values, beam_idx):
  1721. # if decoder past is not included in output
  1722. # speedy decoding is disabled and no need to reorder
  1723. if past_key_values is None:
  1724. logger.warning("You might want to consider setting `use_cache=True` to speed up decoding")
  1725. return past_key_values
  1726. reordered_decoder_past = ()
  1727. for layer_past_states in past_key_values:
  1728. # get the correct batch idx from layer past batch dim
  1729. # batch dim of `past` is at 2nd position
  1730. reordered_layer_past_states = ()
  1731. for layer_past_state in layer_past_states:
  1732. # need to set correct `past` for each of the four key / value states
  1733. reordered_layer_past_states = reordered_layer_past_states + (
  1734. layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)),
  1735. )
  1736. if reordered_layer_past_states[0].shape != layer_past_states[0].shape:
  1737. raise ValueError(
  1738. f"reordered_layer_past_states[0] shape {reordered_layer_past_states[0].shape} and layer_past_states[0] shape {layer_past_states[0].shape} mismatched"
  1739. )
  1740. if len(reordered_layer_past_states) != len(layer_past_states):
  1741. raise ValueError(
  1742. f"length of reordered_layer_past_states {len(reordered_layer_past_states)} and length of layer_past_states {len(layer_past_states)} mismatched"
  1743. )
  1744. reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,)
  1745. return reordered_decoder_past
  1746. @add_start_docstrings(
  1747. "The bare MT5 Model transformer outputting encoder's raw hidden-states without any specific head on top.",
  1748. MT5_START_DOCSTRING,
  1749. )
  1750. class MT5EncoderModel(MT5PreTrainedModel):
  1751. r"""
  1752. Examples:
  1753. ```python
  1754. >>> from transformers import MT5EncoderModel, AutoTokenizer
  1755. >>> model = MT5EncoderModel.from_pretrained("google/mt5-small")
  1756. >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
  1757. >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien."
  1758. >>> input_ids = tokenizer(article, return_tensors="pt").input_ids
  1759. >>> outputs = model(input_ids)
  1760. >>> hidden_state = outputs.last_hidden_state
  1761. ```"""
  1762. model_type = "mt5"
  1763. config_class = MT5Config
  1764. _tied_weights_keys = ["encoder.embed_tokens.weight"]
  1765. # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.__init__ with T5->MT5
  1766. def __init__(self, config: MT5Config):
  1767. super().__init__(config)
  1768. self.shared = nn.Embedding(config.vocab_size, config.d_model)
  1769. encoder_config = copy.deepcopy(config)
  1770. encoder_config.use_cache = False
  1771. encoder_config.is_encoder_decoder = False
  1772. self.encoder = MT5Stack(encoder_config, self.shared)
  1773. # Initialize weights and apply final processing
  1774. self.post_init()
  1775. # Model parallel
  1776. self.model_parallel = False
  1777. self.device_map = None
  1778. @add_start_docstrings(PARALLELIZE_DOCSTRING)
  1779. # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.parallelize
  1780. def parallelize(self, device_map=None):
  1781. warnings.warn(
  1782. "`T5EncoderModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load"
  1783. " your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own"
  1784. " `device_map` but it needs to be a dictionary module_name to device, so for instance {'block.0': 0,"
  1785. " 'block.1': 1, ...}",
  1786. FutureWarning,
  1787. )
  1788. self.device_map = (
  1789. get_device_map(len(self.encoder.block), range(torch.cuda.device_count()))
  1790. if device_map is None
  1791. else device_map
  1792. )
  1793. assert_device_map(self.device_map, len(self.encoder.block))
  1794. self.encoder.parallelize(self.device_map)
  1795. self.model_parallel = True
  1796. @add_start_docstrings(DEPARALLELIZE_DOCSTRING)
  1797. # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.deparallelize
  1798. def deparallelize(self):
  1799. warnings.warn(
  1800. "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.",
  1801. FutureWarning,
  1802. )
  1803. self.encoder.deparallelize()
  1804. self.encoder = self.encoder.to("cpu")
  1805. self.model_parallel = False
  1806. self.device_map = None
  1807. torch.cuda.empty_cache()
  1808. # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.get_input_embeddings
  1809. def get_input_embeddings(self):
  1810. return self.shared
  1811. # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.set_input_embeddings
  1812. def set_input_embeddings(self, new_embeddings):
  1813. self.shared = new_embeddings
  1814. self.encoder.set_input_embeddings(new_embeddings)
  1815. # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.get_encoder
  1816. def get_encoder(self):
  1817. return self.encoder
  1818. # Copied from transformers.models.t5.modeling_t5.T5EncoderModel._prune_heads
  1819. def _prune_heads(self, heads_to_prune):
  1820. """
  1821. Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
  1822. class PreTrainedModel
  1823. """
  1824. for layer, heads in heads_to_prune.items():
  1825. self.encoder.block[layer].layer[0].SelfAttention.prune_heads(heads)
  1826. @add_start_docstrings_to_model_forward(MT5_ENCODER_INPUTS_DOCSTRING)
  1827. @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)
  1828. # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.forward with google-t5/->google/, T5->MT5, t5->mt5
  1829. def forward(
  1830. self,
  1831. input_ids: Optional[torch.LongTensor] = None,
  1832. attention_mask: Optional[torch.FloatTensor] = None,
  1833. head_mask: Optional[torch.FloatTensor] = None,
  1834. inputs_embeds: Optional[torch.FloatTensor] = None,
  1835. output_attentions: Optional[bool] = None,
  1836. output_hidden_states: Optional[bool] = None,
  1837. return_dict: Optional[bool] = None,
  1838. ) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]:
  1839. r"""
  1840. Returns:
  1841. Example:
  1842. ```python
  1843. >>> from transformers import AutoTokenizer, MT5EncoderModel
  1844. >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
  1845. >>> model = MT5EncoderModel.from_pretrained("google/mt5-small")
  1846. >>> input_ids = tokenizer(
  1847. ... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
  1848. ... ).input_ids # Batch size 1
  1849. >>> outputs = model(input_ids=input_ids)
  1850. >>> last_hidden_states = outputs.last_hidden_state
  1851. ```"""
  1852. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1853. encoder_outputs = self.encoder(
  1854. input_ids=input_ids,
  1855. attention_mask=attention_mask,
  1856. inputs_embeds=inputs_embeds,
  1857. head_mask=head_mask,
  1858. output_attentions=output_attentions,
  1859. output_hidden_states=output_hidden_states,
  1860. return_dict=return_dict,
  1861. )
  1862. return encoder_outputs
  1863. @add_start_docstrings(
  1864. """
  1865. MT5 model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE
  1866. tasks.
  1867. """,
  1868. MT5_START_DOCSTRING,
  1869. )
  1870. class MT5ForSequenceClassification(MT5PreTrainedModel):
  1871. _keys_to_ignore_on_load_unexpected = ["decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight"]
  1872. _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"]
  1873. # Copied from transformers.models.t5.modeling_t5.T5ForSequenceClassification.__init__ with T5->MT5
  1874. def __init__(self, config: MT5Config):
  1875. super().__init__(config)
  1876. self.transformer = MT5Model(config)
  1877. self.classification_head = MT5ClassificationHead(config)
  1878. # Initialize weights and apply final processing
  1879. self.post_init()
  1880. self.model_parallel = False
  1881. @add_start_docstrings_to_model_forward(MT5_INPUTS_DOCSTRING)
  1882. @replace_return_docstrings(output_type=Seq2SeqSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
  1883. # Copied from transformers.models.t5.modeling_t5.T5ForSequenceClassification.forward
  1884. def forward(
  1885. self,
  1886. input_ids: torch.LongTensor = None,
  1887. attention_mask: Optional[torch.Tensor] = None,
  1888. decoder_input_ids: Optional[torch.LongTensor] = None,
  1889. decoder_attention_mask: Optional[torch.LongTensor] = None,
  1890. head_mask: Optional[torch.Tensor] = None,
  1891. decoder_head_mask: Optional[torch.Tensor] = None,
  1892. cross_attn_head_mask: Optional[torch.Tensor] = None,
  1893. encoder_outputs: Optional[List[torch.FloatTensor]] = None,
  1894. inputs_embeds: Optional[torch.FloatTensor] = None,
  1895. decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
  1896. labels: Optional[torch.LongTensor] = None,
  1897. use_cache: Optional[bool] = None,
  1898. output_attentions: Optional[bool] = None,
  1899. output_hidden_states: Optional[bool] = None,
  1900. return_dict: Optional[bool] = None,
  1901. ) -> Union[Tuple, Seq2SeqSequenceClassifierOutput]:
  1902. r"""
  1903. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  1904. Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
  1905. config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
  1906. Returns:
  1907. """
  1908. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1909. if labels is not None:
  1910. use_cache = False
  1911. if input_ids is None and inputs_embeds is not None:
  1912. raise NotImplementedError(
  1913. f"Passing input embeddings is currently not supported for {self.__class__.__name__}"
  1914. )
  1915. # Copied from models.bart.modeling_bart.BartModel.forward different to other models, T5 automatically creates
  1916. # decoder_input_ids from input_ids if no decoder_input_ids are provided
  1917. if decoder_input_ids is None and decoder_inputs_embeds is None:
  1918. if input_ids is None:
  1919. raise ValueError(
  1920. "If no `decoder_input_ids` or `decoder_inputs_embeds` are "
  1921. "passed, `input_ids` cannot be `None`. Please pass either "
  1922. "`input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`."
  1923. )
  1924. decoder_input_ids = self._shift_right(input_ids)
  1925. outputs = self.transformer(
  1926. input_ids,
  1927. attention_mask=attention_mask,
  1928. decoder_input_ids=decoder_input_ids,
  1929. decoder_attention_mask=decoder_attention_mask,
  1930. head_mask=head_mask,
  1931. decoder_head_mask=decoder_head_mask,
  1932. cross_attn_head_mask=cross_attn_head_mask,
  1933. encoder_outputs=encoder_outputs,
  1934. inputs_embeds=inputs_embeds,
  1935. decoder_inputs_embeds=decoder_inputs_embeds,
  1936. use_cache=use_cache,
  1937. output_attentions=output_attentions,
  1938. output_hidden_states=output_hidden_states,
  1939. return_dict=return_dict,
  1940. )
  1941. sequence_output = outputs[0]
  1942. eos_mask = input_ids.eq(self.config.eos_token_id).to(sequence_output.device)
  1943. if len(torch.unique_consecutive(eos_mask.sum(1))) > 1:
  1944. raise ValueError("All examples must have the same number of <eos> tokens.")
  1945. batch_size, _, hidden_size = sequence_output.shape
  1946. sentence_representation = sequence_output[eos_mask, :].view(batch_size, -1, hidden_size)[:, -1, :]
  1947. logits = self.classification_head(sentence_representation)
  1948. loss = None
  1949. if labels is not None:
  1950. labels = labels.to(logits.device)
  1951. if self.config.problem_type is None:
  1952. if self.config.num_labels == 1:
  1953. self.config.problem_type = "regression"
  1954. elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
  1955. self.config.problem_type = "single_label_classification"
  1956. else:
  1957. self.config.problem_type = "multi_label_classification"
  1958. if self.config.problem_type == "regression":
  1959. loss_fct = MSELoss()
  1960. if self.config.num_labels == 1:
  1961. loss = loss_fct(logits.squeeze(), labels.squeeze())
  1962. else:
  1963. loss = loss_fct(logits, labels)
  1964. elif self.config.problem_type == "single_label_classification":
  1965. loss_fct = CrossEntropyLoss()
  1966. loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
  1967. elif self.config.problem_type == "multi_label_classification":
  1968. loss_fct = BCEWithLogitsLoss()
  1969. loss = loss_fct(logits, labels)
  1970. if not return_dict:
  1971. output = (logits,) + outputs[1:]
  1972. return ((loss,) + output) if loss is not None else output
  1973. return Seq2SeqSequenceClassifierOutput(
  1974. loss=loss,
  1975. logits=logits,
  1976. past_key_values=outputs.past_key_values,
  1977. decoder_hidden_states=outputs.decoder_hidden_states,
  1978. decoder_attentions=outputs.decoder_attentions,
  1979. cross_attentions=outputs.cross_attentions,
  1980. encoder_last_hidden_state=outputs.encoder_last_hidden_state,
  1981. encoder_hidden_states=outputs.encoder_hidden_states,
  1982. encoder_attentions=outputs.encoder_attentions,
  1983. )
  1984. @add_start_docstrings(
  1985. """
  1986. MT5 Encoder Model with a token classification head on top (a linear layer on top of the hidden-states output)
  1987. e.g. for Named-Entity-Recognition (NER) tasks.
  1988. """,
  1989. MT5_START_DOCSTRING,
  1990. )
  1991. class MT5ForTokenClassification(MT5PreTrainedModel):
  1992. _tied_weights_keys = ["transformer.encoder.embed_tokens.weight"]
  1993. # Copied from transformers.models.t5.modeling_t5.T5ForTokenClassification.__init__ with T5->MT5
  1994. def __init__(self, config: MT5Config):
  1995. super().__init__(config)
  1996. self.num_labels = config.num_labels
  1997. self.transformer = MT5EncoderModel(config)
  1998. self.dropout = nn.Dropout(config.classifier_dropout)
  1999. self.classifier = nn.Linear(config.hidden_size, config.num_labels)
  2000. # Initialize weights and apply final processing
  2001. self.post_init()
  2002. @add_start_docstrings_to_model_forward(MT5_INPUTS_DOCSTRING)
  2003. @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
  2004. # Copied from transformers.models.t5.modeling_t5.T5ForTokenClassification.forward with T5->MT5
  2005. def forward(
  2006. self,
  2007. input_ids: Optional[torch.Tensor] = None,
  2008. attention_mask: Optional[torch.Tensor] = None,
  2009. head_mask: Optional[torch.Tensor] = None,
  2010. inputs_embeds: Optional[torch.Tensor] = None,
  2011. labels: Optional[torch.Tensor] = None,
  2012. output_attentions: Optional[bool] = None,
  2013. output_hidden_states: Optional[bool] = None,
  2014. return_dict: Optional[bool] = None,
  2015. ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
  2016. r"""
  2017. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
  2018. Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
  2019. Returns:
  2020. """
  2021. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  2022. outputs = self.transformer(
  2023. input_ids,
  2024. attention_mask=attention_mask,
  2025. head_mask=head_mask,
  2026. inputs_embeds=inputs_embeds,
  2027. output_attentions=output_attentions,
  2028. output_hidden_states=output_hidden_states,
  2029. return_dict=return_dict,
  2030. )
  2031. hidden_states = outputs[0]
  2032. hidden_states = self.dropout(hidden_states)
  2033. logits = self.classifier(hidden_states)
  2034. loss = None
  2035. if labels is not None:
  2036. loss_fct = CrossEntropyLoss()
  2037. loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
  2038. if not return_dict:
  2039. output = (logits, outputs[2:-1])
  2040. return ((loss,) + output) if loss is not None else output
  2041. return TokenClassifierOutput(
  2042. loss=loss,
  2043. logits=logits,
  2044. hidden_states=outputs.hidden_states,
  2045. attentions=outputs.attentions,
  2046. )
  2047. @add_start_docstrings(
  2048. """
  2049. MT5 Model with a span classification head on top for extractive question-answering tasks like SQuAD (linear layers
  2050. on top of the hidden-states output to compute `span start logits` and `span end logits`).
  2051. """,
  2052. MT5_START_DOCSTRING,
  2053. )
  2054. class MT5ForQuestionAnswering(MT5PreTrainedModel):
  2055. _keys_to_ignore_on_load_unexpected = ["decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight"]
  2056. _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"]
  2057. # Copied from transformers.models.t5.modeling_t5.T5ForQuestionAnswering.__init__ with T5->MT5
  2058. def __init__(self, config: MT5Config):
  2059. super().__init__(config)
  2060. self.model_dim = config.d_model
  2061. self.shared = nn.Embedding(config.vocab_size, config.d_model)
  2062. encoder_config = copy.deepcopy(config)
  2063. encoder_config.is_decoder = False
  2064. encoder_config.use_cache = False
  2065. encoder_config.is_encoder_decoder = False
  2066. self.encoder = MT5Stack(encoder_config, self.shared)
  2067. decoder_config = copy.deepcopy(config)
  2068. decoder_config.is_decoder = True
  2069. decoder_config.is_encoder_decoder = False
  2070. decoder_config.num_layers = config.num_decoder_layers
  2071. self.decoder = MT5Stack(decoder_config, self.shared)
  2072. self.num_labels = config.num_labels
  2073. self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
  2074. # Initialize weights and apply final processing
  2075. self.post_init()
  2076. self.model_parallel = False
  2077. # Copied from transformers.models.t5.modeling_t5.T5ForQuestionAnswering.get_input_embeddings
  2078. def get_input_embeddings(self):
  2079. return self.shared
  2080. # Copied from transformers.models.t5.modeling_t5.T5ForQuestionAnswering.set_input_embeddings
  2081. def set_input_embeddings(self, new_embeddings):
  2082. self.shared = new_embeddings
  2083. self.encoder.set_input_embeddings(new_embeddings)
  2084. self.decoder.set_input_embeddings(new_embeddings)
  2085. # Copied from transformers.models.t5.modeling_t5.T5ForQuestionAnswering.get_encoder
  2086. def get_encoder(self):
  2087. return self.encoder
  2088. # Copied from transformers.models.t5.modeling_t5.T5ForQuestionAnswering.get_decoder
  2089. def get_decoder(self):
  2090. return self.decoder
  2091. @add_start_docstrings_to_model_forward(MT5_INPUTS_DOCSTRING)
  2092. @replace_return_docstrings(output_type=Seq2SeqQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
  2093. # Copied from transformers.models.t5.modeling_t5.T5ForQuestionAnswering.forward
  2094. def forward(
  2095. self,
  2096. input_ids: Optional[torch.LongTensor] = None,
  2097. attention_mask: Optional[torch.FloatTensor] = None,
  2098. decoder_input_ids: Optional[torch.LongTensor] = None,
  2099. decoder_attention_mask: Optional[torch.BoolTensor] = None,
  2100. head_mask: Optional[torch.FloatTensor] = None,
  2101. decoder_head_mask: Optional[torch.FloatTensor] = None,
  2102. cross_attn_head_mask: Optional[torch.Tensor] = None,
  2103. encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None,
  2104. start_positions: Optional[torch.LongTensor] = None,
  2105. end_positions: Optional[torch.LongTensor] = None,
  2106. inputs_embeds: Optional[torch.FloatTensor] = None,
  2107. decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
  2108. use_cache: Optional[bool] = None,
  2109. output_attentions: Optional[bool] = None,
  2110. output_hidden_states: Optional[bool] = None,
  2111. return_dict: Optional[bool] = None,
  2112. ) -> Union[Tuple[torch.FloatTensor], Seq2SeqQuestionAnsweringModelOutput]:
  2113. r"""
  2114. start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  2115. Labels for position (index) of the start of the labelled span for computing the token classification loss.
  2116. Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence
  2117. are not taken into account for computing the loss.
  2118. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  2119. Labels for position (index) of the end of the labelled span for computing the token classification loss.
  2120. Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence
  2121. are not taken into account for computing the loss.
  2122. Returns:
  2123. """
  2124. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  2125. use_cache = use_cache if use_cache is not None else self.config.use_cache
  2126. if start_positions is not None and end_positions is not None:
  2127. use_cache = False
  2128. # Copied from models.bart.modeling_bart.BartModel.forward
  2129. # different to other models, T5 automatically creates decoder_input_ids from
  2130. # input_ids if no decoder_input_ids are provided
  2131. if decoder_input_ids is None and decoder_inputs_embeds is None:
  2132. if input_ids is None:
  2133. raise ValueError(
  2134. "If no `decoder_input_ids` or `decoder_inputs_embeds` are "
  2135. "passed, `input_ids` cannot be `None`. Please pass either "
  2136. "`input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`."
  2137. )
  2138. decoder_input_ids = self._shift_right(input_ids)
  2139. use_cache = use_cache if use_cache is not None else self.config.use_cache
  2140. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  2141. # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
  2142. if head_mask is not None and decoder_head_mask is None:
  2143. if self.config.num_layers == self.config.num_decoder_layers:
  2144. warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
  2145. decoder_head_mask = head_mask
  2146. # Encode if needed (training, first prediction pass)
  2147. if encoder_outputs is None:
  2148. encoder_outputs = self.encoder(
  2149. input_ids=input_ids,
  2150. attention_mask=attention_mask,
  2151. inputs_embeds=inputs_embeds,
  2152. head_mask=head_mask,
  2153. output_attentions=output_attentions,
  2154. output_hidden_states=output_hidden_states,
  2155. return_dict=return_dict,
  2156. )
  2157. elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
  2158. encoder_outputs = BaseModelOutput(
  2159. last_hidden_state=encoder_outputs[0],
  2160. hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
  2161. attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
  2162. )
  2163. hidden_states = encoder_outputs[0]
  2164. # Decode
  2165. decoder_outputs = self.decoder(
  2166. input_ids=decoder_input_ids,
  2167. attention_mask=decoder_attention_mask,
  2168. inputs_embeds=decoder_inputs_embeds,
  2169. past_key_values=None,
  2170. encoder_hidden_states=hidden_states,
  2171. encoder_attention_mask=attention_mask,
  2172. head_mask=decoder_head_mask,
  2173. cross_attn_head_mask=cross_attn_head_mask,
  2174. use_cache=use_cache,
  2175. output_attentions=output_attentions,
  2176. output_hidden_states=output_hidden_states,
  2177. return_dict=return_dict,
  2178. )
  2179. sequence_output = decoder_outputs[0]
  2180. logits = self.qa_outputs(sequence_output)
  2181. start_logits, end_logits = logits.split(1, dim=-1)
  2182. start_logits = start_logits.squeeze(-1).contiguous()
  2183. end_logits = end_logits.squeeze(-1).contiguous()
  2184. total_loss = None
  2185. if start_positions is not None and end_positions is not None:
  2186. # If we are on multi-GPU, split add a dimension
  2187. if len(start_positions.size()) > 1:
  2188. start_positions = start_positions.squeeze(-1).to(start_logits.device)
  2189. if len(end_positions.size()) > 1:
  2190. end_positions = end_positions.squeeze(-1).to(end_logits.device)
  2191. # sometimes the start/end positions are outside our model inputs, we ignore these terms
  2192. ignored_index = start_logits.size(1)
  2193. start_positions = start_positions.clamp(0, ignored_index)
  2194. end_positions = end_positions.clamp(0, ignored_index)
  2195. loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
  2196. start_loss = loss_fct(start_logits, start_positions)
  2197. end_loss = loss_fct(end_logits, end_positions)
  2198. total_loss = (start_loss + end_loss) / 2
  2199. if not return_dict:
  2200. output = (start_logits, end_logits) + decoder_outputs[1:] + encoder_outputs
  2201. return ((total_loss,) + output) if total_loss is not None else output
  2202. return Seq2SeqQuestionAnsweringModelOutput(
  2203. loss=total_loss,
  2204. start_logits=start_logits,
  2205. end_logits=end_logits,
  2206. past_key_values=decoder_outputs.past_key_values,
  2207. decoder_hidden_states=decoder_outputs.hidden_states,
  2208. decoder_attentions=decoder_outputs.attentions,
  2209. cross_attentions=decoder_outputs.cross_attentions,
  2210. encoder_last_hidden_state=encoder_outputs.last_hidden_state,
  2211. encoder_hidden_states=encoder_outputs.hidden_states,
  2212. encoder_attentions=encoder_outputs.attentions,
  2213. )