modeling_albert.py 63 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482
  1. # coding=utf-8
  2. # Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """PyTorch ALBERT model."""
  16. import math
  17. import os
  18. from dataclasses import dataclass
  19. from typing import Dict, List, Optional, Tuple, Union
  20. import torch
  21. from torch import nn
  22. from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
  23. from ...activations import ACT2FN
  24. from ...generation import GenerationMixin
  25. from ...modeling_attn_mask_utils import _prepare_4d_attention_mask_for_sdpa
  26. from ...modeling_outputs import (
  27. BaseModelOutput,
  28. BaseModelOutputWithPooling,
  29. MaskedLMOutput,
  30. MultipleChoiceModelOutput,
  31. QuestionAnsweringModelOutput,
  32. SequenceClassifierOutput,
  33. TokenClassifierOutput,
  34. )
  35. from ...modeling_utils import PreTrainedModel
  36. from ...pytorch_utils import (
  37. apply_chunking_to_forward,
  38. find_pruneable_heads_and_indices,
  39. is_torch_greater_or_equal_than_2_2,
  40. prune_linear_layer,
  41. )
  42. from ...utils import (
  43. ModelOutput,
  44. add_code_sample_docstrings,
  45. add_start_docstrings,
  46. add_start_docstrings_to_model_forward,
  47. logging,
  48. replace_return_docstrings,
  49. )
  50. from .configuration_albert import AlbertConfig
  51. logger = logging.get_logger(__name__)
  52. _CHECKPOINT_FOR_DOC = "albert/albert-base-v2"
  53. _CONFIG_FOR_DOC = "AlbertConfig"
  54. def load_tf_weights_in_albert(model, config, tf_checkpoint_path):
  55. """Load tf checkpoints in a pytorch model."""
  56. try:
  57. import re
  58. import numpy as np
  59. import tensorflow as tf
  60. except ImportError:
  61. logger.error(
  62. "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
  63. "https://www.tensorflow.org/install/ for installation instructions."
  64. )
  65. raise
  66. tf_path = os.path.abspath(tf_checkpoint_path)
  67. logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
  68. # Load weights from TF model
  69. init_vars = tf.train.list_variables(tf_path)
  70. names = []
  71. arrays = []
  72. for name, shape in init_vars:
  73. logger.info(f"Loading TF weight {name} with shape {shape}")
  74. array = tf.train.load_variable(tf_path, name)
  75. names.append(name)
  76. arrays.append(array)
  77. for name, array in zip(names, arrays):
  78. print(name)
  79. for name, array in zip(names, arrays):
  80. original_name = name
  81. # If saved from the TF HUB module
  82. name = name.replace("module/", "")
  83. # Renaming and simplifying
  84. name = name.replace("ffn_1", "ffn")
  85. name = name.replace("bert/", "albert/")
  86. name = name.replace("attention_1", "attention")
  87. name = name.replace("transform/", "")
  88. name = name.replace("LayerNorm_1", "full_layer_layer_norm")
  89. name = name.replace("LayerNorm", "attention/LayerNorm")
  90. name = name.replace("transformer/", "")
  91. # The feed forward layer had an 'intermediate' step which has been abstracted away
  92. name = name.replace("intermediate/dense/", "")
  93. name = name.replace("ffn/intermediate/output/dense/", "ffn_output/")
  94. # ALBERT attention was split between self and output which have been abstracted away
  95. name = name.replace("/output/", "/")
  96. name = name.replace("/self/", "/")
  97. # The pooler is a linear layer
  98. name = name.replace("pooler/dense", "pooler")
  99. # The classifier was simplified to predictions from cls/predictions
  100. name = name.replace("cls/predictions", "predictions")
  101. name = name.replace("predictions/attention", "predictions")
  102. # Naming was changed to be more explicit
  103. name = name.replace("embeddings/attention", "embeddings")
  104. name = name.replace("inner_group_", "albert_layers/")
  105. name = name.replace("group_", "albert_layer_groups/")
  106. # Classifier
  107. if len(name.split("/")) == 1 and ("output_bias" in name or "output_weights" in name):
  108. name = "classifier/" + name
  109. # No ALBERT model currently handles the next sentence prediction task
  110. if "seq_relationship" in name:
  111. name = name.replace("seq_relationship/output_", "sop_classifier/classifier/")
  112. name = name.replace("weights", "weight")
  113. name = name.split("/")
  114. # Ignore the gradients applied by the LAMB/ADAM optimizers.
  115. if (
  116. "adam_m" in name
  117. or "adam_v" in name
  118. or "AdamWeightDecayOptimizer" in name
  119. or "AdamWeightDecayOptimizer_1" in name
  120. or "global_step" in name
  121. ):
  122. logger.info(f"Skipping {'/'.join(name)}")
  123. continue
  124. pointer = model
  125. for m_name in name:
  126. if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
  127. scope_names = re.split(r"_(\d+)", m_name)
  128. else:
  129. scope_names = [m_name]
  130. if scope_names[0] == "kernel" or scope_names[0] == "gamma":
  131. pointer = getattr(pointer, "weight")
  132. elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
  133. pointer = getattr(pointer, "bias")
  134. elif scope_names[0] == "output_weights":
  135. pointer = getattr(pointer, "weight")
  136. elif scope_names[0] == "squad":
  137. pointer = getattr(pointer, "classifier")
  138. else:
  139. try:
  140. pointer = getattr(pointer, scope_names[0])
  141. except AttributeError:
  142. logger.info(f"Skipping {'/'.join(name)}")
  143. continue
  144. if len(scope_names) >= 2:
  145. num = int(scope_names[1])
  146. pointer = pointer[num]
  147. if m_name[-11:] == "_embeddings":
  148. pointer = getattr(pointer, "weight")
  149. elif m_name == "kernel":
  150. array = np.transpose(array)
  151. try:
  152. if pointer.shape != array.shape:
  153. raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
  154. except ValueError as e:
  155. e.args += (pointer.shape, array.shape)
  156. raise
  157. print(f"Initialize PyTorch weight {name} from {original_name}")
  158. pointer.data = torch.from_numpy(array)
  159. return model
  160. class AlbertEmbeddings(nn.Module):
  161. """
  162. Construct the embeddings from word, position and token_type embeddings.
  163. """
  164. def __init__(self, config: AlbertConfig):
  165. super().__init__()
  166. self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
  167. self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
  168. self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)
  169. # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
  170. # any TensorFlow checkpoint file
  171. self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
  172. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  173. # position_ids (1, len position emb) is contiguous in memory and exported when serialized
  174. self.register_buffer(
  175. "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
  176. )
  177. self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
  178. self.register_buffer(
  179. "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
  180. )
  181. # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.forward
  182. def forward(
  183. self,
  184. input_ids: Optional[torch.LongTensor] = None,
  185. token_type_ids: Optional[torch.LongTensor] = None,
  186. position_ids: Optional[torch.LongTensor] = None,
  187. inputs_embeds: Optional[torch.FloatTensor] = None,
  188. past_key_values_length: int = 0,
  189. ) -> torch.Tensor:
  190. if input_ids is not None:
  191. input_shape = input_ids.size()
  192. else:
  193. input_shape = inputs_embeds.size()[:-1]
  194. seq_length = input_shape[1]
  195. if position_ids is None:
  196. position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
  197. # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
  198. # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
  199. # issue #5664
  200. if token_type_ids is None:
  201. if hasattr(self, "token_type_ids"):
  202. buffered_token_type_ids = self.token_type_ids[:, :seq_length]
  203. buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
  204. token_type_ids = buffered_token_type_ids_expanded
  205. else:
  206. token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
  207. if inputs_embeds is None:
  208. inputs_embeds = self.word_embeddings(input_ids)
  209. token_type_embeddings = self.token_type_embeddings(token_type_ids)
  210. embeddings = inputs_embeds + token_type_embeddings
  211. if self.position_embedding_type == "absolute":
  212. position_embeddings = self.position_embeddings(position_ids)
  213. embeddings += position_embeddings
  214. embeddings = self.LayerNorm(embeddings)
  215. embeddings = self.dropout(embeddings)
  216. return embeddings
  217. class AlbertAttention(nn.Module):
  218. def __init__(self, config: AlbertConfig):
  219. super().__init__()
  220. if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
  221. raise ValueError(
  222. f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
  223. f"heads ({config.num_attention_heads}"
  224. )
  225. self.num_attention_heads = config.num_attention_heads
  226. self.hidden_size = config.hidden_size
  227. self.attention_head_size = config.hidden_size // config.num_attention_heads
  228. self.all_head_size = self.num_attention_heads * self.attention_head_size
  229. self.query = nn.Linear(config.hidden_size, self.all_head_size)
  230. self.key = nn.Linear(config.hidden_size, self.all_head_size)
  231. self.value = nn.Linear(config.hidden_size, self.all_head_size)
  232. self.attention_dropout = nn.Dropout(config.attention_probs_dropout_prob)
  233. self.output_dropout = nn.Dropout(config.hidden_dropout_prob)
  234. self.dense = nn.Linear(config.hidden_size, config.hidden_size)
  235. self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
  236. self.pruned_heads = set()
  237. self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
  238. if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
  239. self.max_position_embeddings = config.max_position_embeddings
  240. self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
  241. # Copied from transformers.models.bert.modeling_bert.BertSelfAttention.transpose_for_scores
  242. def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
  243. new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
  244. x = x.view(new_x_shape)
  245. return x.permute(0, 2, 1, 3)
  246. def prune_heads(self, heads: List[int]) -> None:
  247. if len(heads) == 0:
  248. return
  249. heads, index = find_pruneable_heads_and_indices(
  250. heads, self.num_attention_heads, self.attention_head_size, self.pruned_heads
  251. )
  252. # Prune linear layers
  253. self.query = prune_linear_layer(self.query, index)
  254. self.key = prune_linear_layer(self.key, index)
  255. self.value = prune_linear_layer(self.value, index)
  256. self.dense = prune_linear_layer(self.dense, index, dim=1)
  257. # Update hyper params and store pruned heads
  258. self.num_attention_heads = self.num_attention_heads - len(heads)
  259. self.all_head_size = self.attention_head_size * self.num_attention_heads
  260. self.pruned_heads = self.pruned_heads.union(heads)
  261. def forward(
  262. self,
  263. hidden_states: torch.Tensor,
  264. attention_mask: Optional[torch.FloatTensor] = None,
  265. head_mask: Optional[torch.FloatTensor] = None,
  266. output_attentions: bool = False,
  267. ) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]:
  268. mixed_query_layer = self.query(hidden_states)
  269. mixed_key_layer = self.key(hidden_states)
  270. mixed_value_layer = self.value(hidden_states)
  271. query_layer = self.transpose_for_scores(mixed_query_layer)
  272. key_layer = self.transpose_for_scores(mixed_key_layer)
  273. value_layer = self.transpose_for_scores(mixed_value_layer)
  274. # Take the dot product between "query" and "key" to get the raw attention scores.
  275. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
  276. attention_scores = attention_scores / math.sqrt(self.attention_head_size)
  277. if attention_mask is not None:
  278. # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
  279. attention_scores = attention_scores + attention_mask
  280. if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
  281. seq_length = hidden_states.size()[1]
  282. position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
  283. position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
  284. distance = position_ids_l - position_ids_r
  285. positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
  286. positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
  287. if self.position_embedding_type == "relative_key":
  288. relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
  289. attention_scores = attention_scores + relative_position_scores
  290. elif self.position_embedding_type == "relative_key_query":
  291. relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
  292. relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
  293. attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
  294. # Normalize the attention scores to probabilities.
  295. attention_probs = nn.functional.softmax(attention_scores, dim=-1)
  296. # This is actually dropping out entire tokens to attend to, which might
  297. # seem a bit unusual, but is taken from the original Transformer paper.
  298. attention_probs = self.attention_dropout(attention_probs)
  299. # Mask heads if we want to
  300. if head_mask is not None:
  301. attention_probs = attention_probs * head_mask
  302. context_layer = torch.matmul(attention_probs, value_layer)
  303. context_layer = context_layer.transpose(2, 1).flatten(2)
  304. projected_context_layer = self.dense(context_layer)
  305. projected_context_layer_dropout = self.output_dropout(projected_context_layer)
  306. layernormed_context_layer = self.LayerNorm(hidden_states + projected_context_layer_dropout)
  307. return (layernormed_context_layer, attention_probs) if output_attentions else (layernormed_context_layer,)
  308. class AlbertSdpaAttention(AlbertAttention):
  309. def __init__(self, config):
  310. super().__init__(config)
  311. self.dropout_prob = config.attention_probs_dropout_prob
  312. self.require_contiguous_qkv = not is_torch_greater_or_equal_than_2_2
  313. def forward(
  314. self,
  315. hidden_states: torch.Tensor,
  316. attention_mask: Optional[torch.FloatTensor] = None,
  317. head_mask: Optional[torch.FloatTensor] = None,
  318. output_attentions: bool = False,
  319. ) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]:
  320. if self.position_embedding_type != "absolute" or output_attentions or head_mask is not None:
  321. logger.warning(
  322. "AlbertSdpaAttention is used but `torch.nn.functional.scaled_dot_product_attention` does not support "
  323. "non-absolute `position_embedding_type` or `output_attentions=True` or `head_mask`. Falling back to "
  324. "the eager attention implementation, but specifying the eager implementation will be required from "
  325. "Transformers version v5.0.0 onwards. This warning can be removed using the argument "
  326. '`attn_implementation="eager"` when loading the model.'
  327. )
  328. return super().forward(hidden_states, attention_mask, head_mask, output_attentions)
  329. batch_size, seq_len, _ = hidden_states.size()
  330. query_layer = self.transpose_for_scores(self.query(hidden_states))
  331. key_layer = self.transpose_for_scores(self.key(hidden_states))
  332. value_layer = self.transpose_for_scores(self.value(hidden_states))
  333. # SDPA with memory-efficient backend is broken in torch==2.1.2 when using non-contiguous inputs and a custom
  334. # attn_mask, so we need to call `.contiguous()` here. This was fixed in torch==2.2.0.
  335. # Reference: https://github.com/pytorch/pytorch/issues/112577
  336. if self.require_contiguous_qkv and query_layer.device.type == "cuda" and attention_mask is not None:
  337. query_layer = query_layer.contiguous()
  338. key_layer = key_layer.contiguous()
  339. value_layer = value_layer.contiguous()
  340. attention_output = torch.nn.functional.scaled_dot_product_attention(
  341. query=query_layer,
  342. key=key_layer,
  343. value=value_layer,
  344. attn_mask=attention_mask,
  345. dropout_p=self.dropout_prob if self.training else 0.0,
  346. is_causal=False,
  347. )
  348. attention_output = attention_output.transpose(1, 2)
  349. attention_output = attention_output.reshape(batch_size, seq_len, self.all_head_size)
  350. projected_context_layer = self.dense(attention_output)
  351. projected_context_layer_dropout = self.output_dropout(projected_context_layer)
  352. layernormed_context_layer = self.LayerNorm(hidden_states + projected_context_layer_dropout)
  353. return (layernormed_context_layer,)
  354. ALBERT_ATTENTION_CLASSES = {
  355. "eager": AlbertAttention,
  356. "sdpa": AlbertSdpaAttention,
  357. }
  358. class AlbertLayer(nn.Module):
  359. def __init__(self, config: AlbertConfig):
  360. super().__init__()
  361. self.config = config
  362. self.chunk_size_feed_forward = config.chunk_size_feed_forward
  363. self.seq_len_dim = 1
  364. self.full_layer_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
  365. self.attention = ALBERT_ATTENTION_CLASSES[config._attn_implementation](config)
  366. self.ffn = nn.Linear(config.hidden_size, config.intermediate_size)
  367. self.ffn_output = nn.Linear(config.intermediate_size, config.hidden_size)
  368. self.activation = ACT2FN[config.hidden_act]
  369. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  370. def forward(
  371. self,
  372. hidden_states: torch.Tensor,
  373. attention_mask: Optional[torch.FloatTensor] = None,
  374. head_mask: Optional[torch.FloatTensor] = None,
  375. output_attentions: bool = False,
  376. output_hidden_states: bool = False,
  377. ) -> Tuple[torch.Tensor, torch.Tensor]:
  378. attention_output = self.attention(hidden_states, attention_mask, head_mask, output_attentions)
  379. ffn_output = apply_chunking_to_forward(
  380. self.ff_chunk,
  381. self.chunk_size_feed_forward,
  382. self.seq_len_dim,
  383. attention_output[0],
  384. )
  385. hidden_states = self.full_layer_layer_norm(ffn_output + attention_output[0])
  386. return (hidden_states,) + attention_output[1:] # add attentions if we output them
  387. def ff_chunk(self, attention_output: torch.Tensor) -> torch.Tensor:
  388. ffn_output = self.ffn(attention_output)
  389. ffn_output = self.activation(ffn_output)
  390. ffn_output = self.ffn_output(ffn_output)
  391. return ffn_output
  392. class AlbertLayerGroup(nn.Module):
  393. def __init__(self, config: AlbertConfig):
  394. super().__init__()
  395. self.albert_layers = nn.ModuleList([AlbertLayer(config) for _ in range(config.inner_group_num)])
  396. def forward(
  397. self,
  398. hidden_states: torch.Tensor,
  399. attention_mask: Optional[torch.FloatTensor] = None,
  400. head_mask: Optional[torch.FloatTensor] = None,
  401. output_attentions: bool = False,
  402. output_hidden_states: bool = False,
  403. ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]:
  404. layer_hidden_states = ()
  405. layer_attentions = ()
  406. for layer_index, albert_layer in enumerate(self.albert_layers):
  407. layer_output = albert_layer(hidden_states, attention_mask, head_mask[layer_index], output_attentions)
  408. hidden_states = layer_output[0]
  409. if output_attentions:
  410. layer_attentions = layer_attentions + (layer_output[1],)
  411. if output_hidden_states:
  412. layer_hidden_states = layer_hidden_states + (hidden_states,)
  413. outputs = (hidden_states,)
  414. if output_hidden_states:
  415. outputs = outputs + (layer_hidden_states,)
  416. if output_attentions:
  417. outputs = outputs + (layer_attentions,)
  418. return outputs # last-layer hidden state, (layer hidden states), (layer attentions)
  419. class AlbertTransformer(nn.Module):
  420. def __init__(self, config: AlbertConfig):
  421. super().__init__()
  422. self.config = config
  423. self.embedding_hidden_mapping_in = nn.Linear(config.embedding_size, config.hidden_size)
  424. self.albert_layer_groups = nn.ModuleList([AlbertLayerGroup(config) for _ in range(config.num_hidden_groups)])
  425. def forward(
  426. self,
  427. hidden_states: torch.Tensor,
  428. attention_mask: Optional[torch.FloatTensor] = None,
  429. head_mask: Optional[torch.FloatTensor] = None,
  430. output_attentions: bool = False,
  431. output_hidden_states: bool = False,
  432. return_dict: bool = True,
  433. ) -> Union[BaseModelOutput, Tuple]:
  434. hidden_states = self.embedding_hidden_mapping_in(hidden_states)
  435. all_hidden_states = (hidden_states,) if output_hidden_states else None
  436. all_attentions = () if output_attentions else None
  437. head_mask = [None] * self.config.num_hidden_layers if head_mask is None else head_mask
  438. for i in range(self.config.num_hidden_layers):
  439. # Number of layers in a hidden group
  440. layers_per_group = int(self.config.num_hidden_layers / self.config.num_hidden_groups)
  441. # Index of the hidden group
  442. group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups))
  443. layer_group_output = self.albert_layer_groups[group_idx](
  444. hidden_states,
  445. attention_mask,
  446. head_mask[group_idx * layers_per_group : (group_idx + 1) * layers_per_group],
  447. output_attentions,
  448. output_hidden_states,
  449. )
  450. hidden_states = layer_group_output[0]
  451. if output_attentions:
  452. all_attentions = all_attentions + layer_group_output[-1]
  453. if output_hidden_states:
  454. all_hidden_states = all_hidden_states + (hidden_states,)
  455. if not return_dict:
  456. return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
  457. return BaseModelOutput(
  458. last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
  459. )
  460. class AlbertPreTrainedModel(PreTrainedModel):
  461. """
  462. An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
  463. models.
  464. """
  465. config_class = AlbertConfig
  466. load_tf_weights = load_tf_weights_in_albert
  467. base_model_prefix = "albert"
  468. _supports_sdpa = True
  469. def _init_weights(self, module):
  470. """Initialize the weights."""
  471. if isinstance(module, nn.Linear):
  472. # Slightly different from the TF version which uses truncated_normal for initialization
  473. # cf https://github.com/pytorch/pytorch/pull/5617
  474. module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
  475. if module.bias is not None:
  476. module.bias.data.zero_()
  477. elif isinstance(module, nn.Embedding):
  478. module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
  479. if module.padding_idx is not None:
  480. module.weight.data[module.padding_idx].zero_()
  481. elif isinstance(module, nn.LayerNorm):
  482. module.bias.data.zero_()
  483. module.weight.data.fill_(1.0)
  484. @dataclass
  485. class AlbertForPreTrainingOutput(ModelOutput):
  486. """
  487. Output type of [`AlbertForPreTraining`].
  488. Args:
  489. loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
  490. Total loss as the sum of the masked language modeling loss and the next sequence prediction
  491. (classification) loss.
  492. prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
  493. Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
  494. sop_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
  495. Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
  496. before SoftMax).
  497. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
  498. Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
  499. shape `(batch_size, sequence_length, hidden_size)`.
  500. Hidden-states of the model at the output of each layer plus the initial embedding outputs.
  501. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
  502. Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
  503. sequence_length)`.
  504. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
  505. heads.
  506. """
  507. loss: Optional[torch.FloatTensor] = None
  508. prediction_logits: torch.FloatTensor = None
  509. sop_logits: torch.FloatTensor = None
  510. hidden_states: Optional[Tuple[torch.FloatTensor]] = None
  511. attentions: Optional[Tuple[torch.FloatTensor]] = None
  512. ALBERT_START_DOCSTRING = r"""
  513. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
  514. library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
  515. etc.)
  516. This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
  517. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
  518. and behavior.
  519. Args:
  520. config ([`AlbertConfig`]): Model configuration class with all the parameters of the model.
  521. Initializing with a config file does not load the weights associated with the model, only the
  522. configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
  523. """
  524. ALBERT_INPUTS_DOCSTRING = r"""
  525. Args:
  526. input_ids (`torch.LongTensor` of shape `({0})`):
  527. Indices of input sequence tokens in the vocabulary.
  528. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
  529. [`PreTrainedTokenizer.encode`] for details.
  530. [What are input IDs?](../glossary#input-ids)
  531. attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
  532. Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
  533. - 1 for tokens that are **not masked**,
  534. - 0 for tokens that are **masked**.
  535. [What are attention masks?](../glossary#attention-mask)
  536. token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
  537. Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
  538. 1]`:
  539. - 0 corresponds to a *sentence A* token,
  540. - 1 corresponds to a *sentence B* token.
  541. [What are token type IDs?](../glossary#token-type-ids)
  542. position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
  543. Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
  544. config.max_position_embeddings - 1]`.
  545. [What are position IDs?](../glossary#position-ids)
  546. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
  547. Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
  548. - 1 indicates the head is **not masked**,
  549. - 0 indicates the head is **masked**.
  550. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
  551. Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
  552. is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
  553. model's internal embedding lookup matrix.
  554. output_attentions (`bool`, *optional*):
  555. Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
  556. tensors for more detail.
  557. output_hidden_states (`bool`, *optional*):
  558. Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
  559. more detail.
  560. return_dict (`bool`, *optional*):
  561. Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
  562. """
  563. @add_start_docstrings(
  564. "The bare ALBERT Model transformer outputting raw hidden-states without any specific head on top.",
  565. ALBERT_START_DOCSTRING,
  566. )
  567. class AlbertModel(AlbertPreTrainedModel):
  568. config_class = AlbertConfig
  569. base_model_prefix = "albert"
  570. def __init__(self, config: AlbertConfig, add_pooling_layer: bool = True):
  571. super().__init__(config)
  572. self.config = config
  573. self.embeddings = AlbertEmbeddings(config)
  574. self.encoder = AlbertTransformer(config)
  575. if add_pooling_layer:
  576. self.pooler = nn.Linear(config.hidden_size, config.hidden_size)
  577. self.pooler_activation = nn.Tanh()
  578. else:
  579. self.pooler = None
  580. self.pooler_activation = None
  581. self.attn_implementation = config._attn_implementation
  582. self.position_embedding_type = config.position_embedding_type
  583. # Initialize weights and apply final processing
  584. self.post_init()
  585. def get_input_embeddings(self) -> nn.Embedding:
  586. return self.embeddings.word_embeddings
  587. def set_input_embeddings(self, value: nn.Embedding) -> None:
  588. self.embeddings.word_embeddings = value
  589. def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:
  590. """
  591. Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} ALBERT has
  592. a different architecture in that its layers are shared across groups, which then has inner groups. If an ALBERT
  593. model has 12 hidden layers and 2 hidden groups, with two inner groups, there is a total of 4 different layers.
  594. These layers are flattened: the indices [0,1] correspond to the two inner groups of the first hidden layer,
  595. while [2,3] correspond to the two inner groups of the second hidden layer.
  596. Any layer with in index other than [0,1,2,3] will result in an error. See base class PreTrainedModel for more
  597. information about head pruning
  598. """
  599. for layer, heads in heads_to_prune.items():
  600. group_idx = int(layer / self.config.inner_group_num)
  601. inner_group_idx = int(layer - group_idx * self.config.inner_group_num)
  602. self.encoder.albert_layer_groups[group_idx].albert_layers[inner_group_idx].attention.prune_heads(heads)
  603. @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  604. @add_code_sample_docstrings(
  605. checkpoint=_CHECKPOINT_FOR_DOC,
  606. output_type=BaseModelOutputWithPooling,
  607. config_class=_CONFIG_FOR_DOC,
  608. )
  609. def forward(
  610. self,
  611. input_ids: Optional[torch.LongTensor] = None,
  612. attention_mask: Optional[torch.FloatTensor] = None,
  613. token_type_ids: Optional[torch.LongTensor] = None,
  614. position_ids: Optional[torch.LongTensor] = None,
  615. head_mask: Optional[torch.FloatTensor] = None,
  616. inputs_embeds: Optional[torch.FloatTensor] = None,
  617. output_attentions: Optional[bool] = None,
  618. output_hidden_states: Optional[bool] = None,
  619. return_dict: Optional[bool] = None,
  620. ) -> Union[BaseModelOutputWithPooling, Tuple]:
  621. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
  622. output_hidden_states = (
  623. output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
  624. )
  625. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  626. if input_ids is not None and inputs_embeds is not None:
  627. raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
  628. elif input_ids is not None:
  629. self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
  630. input_shape = input_ids.size()
  631. elif inputs_embeds is not None:
  632. input_shape = inputs_embeds.size()[:-1]
  633. else:
  634. raise ValueError("You have to specify either input_ids or inputs_embeds")
  635. batch_size, seq_length = input_shape
  636. device = input_ids.device if input_ids is not None else inputs_embeds.device
  637. if attention_mask is None:
  638. attention_mask = torch.ones(input_shape, device=device)
  639. if token_type_ids is None:
  640. if hasattr(self.embeddings, "token_type_ids"):
  641. buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
  642. buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
  643. token_type_ids = buffered_token_type_ids_expanded
  644. else:
  645. token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
  646. embedding_output = self.embeddings(
  647. input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
  648. )
  649. use_sdpa_attention_mask = (
  650. self.attn_implementation == "sdpa"
  651. and self.position_embedding_type == "absolute"
  652. and head_mask is None
  653. and not output_attentions
  654. )
  655. if use_sdpa_attention_mask:
  656. extended_attention_mask = _prepare_4d_attention_mask_for_sdpa(
  657. attention_mask, embedding_output.dtype, tgt_len=seq_length
  658. )
  659. else:
  660. extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
  661. extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
  662. extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(self.dtype).min
  663. head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
  664. encoder_outputs = self.encoder(
  665. embedding_output,
  666. extended_attention_mask,
  667. head_mask=head_mask,
  668. output_attentions=output_attentions,
  669. output_hidden_states=output_hidden_states,
  670. return_dict=return_dict,
  671. )
  672. sequence_output = encoder_outputs[0]
  673. pooled_output = self.pooler_activation(self.pooler(sequence_output[:, 0])) if self.pooler is not None else None
  674. if not return_dict:
  675. return (sequence_output, pooled_output) + encoder_outputs[1:]
  676. return BaseModelOutputWithPooling(
  677. last_hidden_state=sequence_output,
  678. pooler_output=pooled_output,
  679. hidden_states=encoder_outputs.hidden_states,
  680. attentions=encoder_outputs.attentions,
  681. )
  682. @add_start_docstrings(
  683. """
  684. Albert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a
  685. `sentence order prediction (classification)` head.
  686. """,
  687. ALBERT_START_DOCSTRING,
  688. )
  689. class AlbertForPreTraining(AlbertPreTrainedModel):
  690. _tied_weights_keys = ["predictions.decoder.bias", "predictions.decoder.weight"]
  691. def __init__(self, config: AlbertConfig):
  692. super().__init__(config)
  693. self.albert = AlbertModel(config)
  694. self.predictions = AlbertMLMHead(config)
  695. self.sop_classifier = AlbertSOPHead(config)
  696. # Initialize weights and apply final processing
  697. self.post_init()
  698. def get_output_embeddings(self) -> nn.Linear:
  699. return self.predictions.decoder
  700. def set_output_embeddings(self, new_embeddings: nn.Linear) -> None:
  701. self.predictions.decoder = new_embeddings
  702. def get_input_embeddings(self) -> nn.Embedding:
  703. return self.albert.embeddings.word_embeddings
  704. @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  705. @replace_return_docstrings(output_type=AlbertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
  706. def forward(
  707. self,
  708. input_ids: Optional[torch.LongTensor] = None,
  709. attention_mask: Optional[torch.FloatTensor] = None,
  710. token_type_ids: Optional[torch.LongTensor] = None,
  711. position_ids: Optional[torch.LongTensor] = None,
  712. head_mask: Optional[torch.FloatTensor] = None,
  713. inputs_embeds: Optional[torch.FloatTensor] = None,
  714. labels: Optional[torch.LongTensor] = None,
  715. sentence_order_label: Optional[torch.LongTensor] = None,
  716. output_attentions: Optional[bool] = None,
  717. output_hidden_states: Optional[bool] = None,
  718. return_dict: Optional[bool] = None,
  719. ) -> Union[AlbertForPreTrainingOutput, Tuple]:
  720. r"""
  721. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
  722. Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
  723. config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
  724. loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
  725. sentence_order_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  726. Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
  727. (see `input_ids` docstring) Indices should be in `[0, 1]`. `0` indicates original order (sequence A, then
  728. sequence B), `1` indicates switched order (sequence B, then sequence A).
  729. Returns:
  730. Example:
  731. ```python
  732. >>> from transformers import AutoTokenizer, AlbertForPreTraining
  733. >>> import torch
  734. >>> tokenizer = AutoTokenizer.from_pretrained("albert/albert-base-v2")
  735. >>> model = AlbertForPreTraining.from_pretrained("albert/albert-base-v2")
  736. >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0)
  737. >>> # Batch size 1
  738. >>> outputs = model(input_ids)
  739. >>> prediction_logits = outputs.prediction_logits
  740. >>> sop_logits = outputs.sop_logits
  741. ```"""
  742. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  743. outputs = self.albert(
  744. input_ids,
  745. attention_mask=attention_mask,
  746. token_type_ids=token_type_ids,
  747. position_ids=position_ids,
  748. head_mask=head_mask,
  749. inputs_embeds=inputs_embeds,
  750. output_attentions=output_attentions,
  751. output_hidden_states=output_hidden_states,
  752. return_dict=return_dict,
  753. )
  754. sequence_output, pooled_output = outputs[:2]
  755. prediction_scores = self.predictions(sequence_output)
  756. sop_scores = self.sop_classifier(pooled_output)
  757. total_loss = None
  758. if labels is not None and sentence_order_label is not None:
  759. loss_fct = CrossEntropyLoss()
  760. masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
  761. sentence_order_loss = loss_fct(sop_scores.view(-1, 2), sentence_order_label.view(-1))
  762. total_loss = masked_lm_loss + sentence_order_loss
  763. if not return_dict:
  764. output = (prediction_scores, sop_scores) + outputs[2:]
  765. return ((total_loss,) + output) if total_loss is not None else output
  766. return AlbertForPreTrainingOutput(
  767. loss=total_loss,
  768. prediction_logits=prediction_scores,
  769. sop_logits=sop_scores,
  770. hidden_states=outputs.hidden_states,
  771. attentions=outputs.attentions,
  772. )
  773. class AlbertMLMHead(nn.Module):
  774. def __init__(self, config: AlbertConfig):
  775. super().__init__()
  776. self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
  777. self.bias = nn.Parameter(torch.zeros(config.vocab_size))
  778. self.dense = nn.Linear(config.hidden_size, config.embedding_size)
  779. self.decoder = nn.Linear(config.embedding_size, config.vocab_size)
  780. self.activation = ACT2FN[config.hidden_act]
  781. self.decoder.bias = self.bias
  782. def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
  783. hidden_states = self.dense(hidden_states)
  784. hidden_states = self.activation(hidden_states)
  785. hidden_states = self.LayerNorm(hidden_states)
  786. hidden_states = self.decoder(hidden_states)
  787. prediction_scores = hidden_states
  788. return prediction_scores
  789. def _tie_weights(self) -> None:
  790. # For accelerate compatibility and to not break backward compatibility
  791. if self.decoder.bias.device.type == "meta":
  792. self.decoder.bias = self.bias
  793. else:
  794. # To tie those two weights if they get disconnected (on TPU or when the bias is resized)
  795. self.bias = self.decoder.bias
  796. class AlbertSOPHead(nn.Module):
  797. def __init__(self, config: AlbertConfig):
  798. super().__init__()
  799. self.dropout = nn.Dropout(config.classifier_dropout_prob)
  800. self.classifier = nn.Linear(config.hidden_size, config.num_labels)
  801. def forward(self, pooled_output: torch.Tensor) -> torch.Tensor:
  802. dropout_pooled_output = self.dropout(pooled_output)
  803. logits = self.classifier(dropout_pooled_output)
  804. return logits
  805. @add_start_docstrings(
  806. "Albert Model with a `language modeling` head on top.",
  807. ALBERT_START_DOCSTRING,
  808. )
  809. class AlbertForMaskedLM(AlbertPreTrainedModel, GenerationMixin):
  810. _tied_weights_keys = ["predictions.decoder.bias", "predictions.decoder.weight"]
  811. def __init__(self, config):
  812. super().__init__(config)
  813. self.albert = AlbertModel(config, add_pooling_layer=False)
  814. self.predictions = AlbertMLMHead(config)
  815. # Initialize weights and apply final processing
  816. self.post_init()
  817. def get_output_embeddings(self) -> nn.Linear:
  818. return self.predictions.decoder
  819. def set_output_embeddings(self, new_embeddings: nn.Linear) -> None:
  820. self.predictions.decoder = new_embeddings
  821. self.predictions.bias = new_embeddings.bias
  822. def get_input_embeddings(self) -> nn.Embedding:
  823. return self.albert.embeddings.word_embeddings
  824. @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  825. @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC)
  826. def forward(
  827. self,
  828. input_ids: Optional[torch.LongTensor] = None,
  829. attention_mask: Optional[torch.FloatTensor] = None,
  830. token_type_ids: Optional[torch.LongTensor] = None,
  831. position_ids: Optional[torch.LongTensor] = None,
  832. head_mask: Optional[torch.FloatTensor] = None,
  833. inputs_embeds: Optional[torch.FloatTensor] = None,
  834. labels: Optional[torch.LongTensor] = None,
  835. output_attentions: Optional[bool] = None,
  836. output_hidden_states: Optional[bool] = None,
  837. return_dict: Optional[bool] = None,
  838. ) -> Union[MaskedLMOutput, Tuple]:
  839. r"""
  840. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
  841. Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
  842. config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
  843. loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
  844. Returns:
  845. Example:
  846. ```python
  847. >>> import torch
  848. >>> from transformers import AutoTokenizer, AlbertForMaskedLM
  849. >>> tokenizer = AutoTokenizer.from_pretrained("albert/albert-base-v2")
  850. >>> model = AlbertForMaskedLM.from_pretrained("albert/albert-base-v2")
  851. >>> # add mask_token
  852. >>> inputs = tokenizer("The capital of [MASK] is Paris.", return_tensors="pt")
  853. >>> with torch.no_grad():
  854. ... logits = model(**inputs).logits
  855. >>> # retrieve index of [MASK]
  856. >>> mask_token_index = (inputs.input_ids == tokenizer.mask_token_id)[0].nonzero(as_tuple=True)[0]
  857. >>> predicted_token_id = logits[0, mask_token_index].argmax(axis=-1)
  858. >>> tokenizer.decode(predicted_token_id)
  859. 'france'
  860. ```
  861. ```python
  862. >>> labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"]
  863. >>> labels = torch.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100)
  864. >>> outputs = model(**inputs, labels=labels)
  865. >>> round(outputs.loss.item(), 2)
  866. 0.81
  867. ```
  868. """
  869. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  870. outputs = self.albert(
  871. input_ids=input_ids,
  872. attention_mask=attention_mask,
  873. token_type_ids=token_type_ids,
  874. position_ids=position_ids,
  875. head_mask=head_mask,
  876. inputs_embeds=inputs_embeds,
  877. output_attentions=output_attentions,
  878. output_hidden_states=output_hidden_states,
  879. return_dict=return_dict,
  880. )
  881. sequence_outputs = outputs[0]
  882. prediction_scores = self.predictions(sequence_outputs)
  883. masked_lm_loss = None
  884. if labels is not None:
  885. loss_fct = CrossEntropyLoss()
  886. masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
  887. if not return_dict:
  888. output = (prediction_scores,) + outputs[2:]
  889. return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
  890. return MaskedLMOutput(
  891. loss=masked_lm_loss,
  892. logits=prediction_scores,
  893. hidden_states=outputs.hidden_states,
  894. attentions=outputs.attentions,
  895. )
  896. @add_start_docstrings(
  897. """
  898. Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
  899. output) e.g. for GLUE tasks.
  900. """,
  901. ALBERT_START_DOCSTRING,
  902. )
  903. class AlbertForSequenceClassification(AlbertPreTrainedModel):
  904. def __init__(self, config: AlbertConfig):
  905. super().__init__(config)
  906. self.num_labels = config.num_labels
  907. self.config = config
  908. self.albert = AlbertModel(config)
  909. self.dropout = nn.Dropout(config.classifier_dropout_prob)
  910. self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
  911. # Initialize weights and apply final processing
  912. self.post_init()
  913. @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  914. @add_code_sample_docstrings(
  915. checkpoint="textattack/albert-base-v2-imdb",
  916. output_type=SequenceClassifierOutput,
  917. config_class=_CONFIG_FOR_DOC,
  918. expected_output="'LABEL_1'",
  919. expected_loss=0.12,
  920. )
  921. def forward(
  922. self,
  923. input_ids: Optional[torch.LongTensor] = None,
  924. attention_mask: Optional[torch.FloatTensor] = None,
  925. token_type_ids: Optional[torch.LongTensor] = None,
  926. position_ids: Optional[torch.LongTensor] = None,
  927. head_mask: Optional[torch.FloatTensor] = None,
  928. inputs_embeds: Optional[torch.FloatTensor] = None,
  929. labels: Optional[torch.LongTensor] = None,
  930. output_attentions: Optional[bool] = None,
  931. output_hidden_states: Optional[bool] = None,
  932. return_dict: Optional[bool] = None,
  933. ) -> Union[SequenceClassifierOutput, Tuple]:
  934. r"""
  935. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  936. Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
  937. config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
  938. `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
  939. """
  940. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  941. outputs = self.albert(
  942. input_ids=input_ids,
  943. attention_mask=attention_mask,
  944. token_type_ids=token_type_ids,
  945. position_ids=position_ids,
  946. head_mask=head_mask,
  947. inputs_embeds=inputs_embeds,
  948. output_attentions=output_attentions,
  949. output_hidden_states=output_hidden_states,
  950. return_dict=return_dict,
  951. )
  952. pooled_output = outputs[1]
  953. pooled_output = self.dropout(pooled_output)
  954. logits = self.classifier(pooled_output)
  955. loss = None
  956. if labels is not None:
  957. if self.config.problem_type is None:
  958. if self.num_labels == 1:
  959. self.config.problem_type = "regression"
  960. elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
  961. self.config.problem_type = "single_label_classification"
  962. else:
  963. self.config.problem_type = "multi_label_classification"
  964. if self.config.problem_type == "regression":
  965. loss_fct = MSELoss()
  966. if self.num_labels == 1:
  967. loss = loss_fct(logits.squeeze(), labels.squeeze())
  968. else:
  969. loss = loss_fct(logits, labels)
  970. elif self.config.problem_type == "single_label_classification":
  971. loss_fct = CrossEntropyLoss()
  972. loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
  973. elif self.config.problem_type == "multi_label_classification":
  974. loss_fct = BCEWithLogitsLoss()
  975. loss = loss_fct(logits, labels)
  976. if not return_dict:
  977. output = (logits,) + outputs[2:]
  978. return ((loss,) + output) if loss is not None else output
  979. return SequenceClassifierOutput(
  980. loss=loss,
  981. logits=logits,
  982. hidden_states=outputs.hidden_states,
  983. attentions=outputs.attentions,
  984. )
  985. @add_start_docstrings(
  986. """
  987. Albert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
  988. Named-Entity-Recognition (NER) tasks.
  989. """,
  990. ALBERT_START_DOCSTRING,
  991. )
  992. class AlbertForTokenClassification(AlbertPreTrainedModel):
  993. def __init__(self, config: AlbertConfig):
  994. super().__init__(config)
  995. self.num_labels = config.num_labels
  996. self.albert = AlbertModel(config, add_pooling_layer=False)
  997. classifier_dropout_prob = (
  998. config.classifier_dropout_prob
  999. if config.classifier_dropout_prob is not None
  1000. else config.hidden_dropout_prob
  1001. )
  1002. self.dropout = nn.Dropout(classifier_dropout_prob)
  1003. self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
  1004. # Initialize weights and apply final processing
  1005. self.post_init()
  1006. @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  1007. @add_code_sample_docstrings(
  1008. checkpoint=_CHECKPOINT_FOR_DOC,
  1009. output_type=TokenClassifierOutput,
  1010. config_class=_CONFIG_FOR_DOC,
  1011. )
  1012. def forward(
  1013. self,
  1014. input_ids: Optional[torch.LongTensor] = None,
  1015. attention_mask: Optional[torch.FloatTensor] = None,
  1016. token_type_ids: Optional[torch.LongTensor] = None,
  1017. position_ids: Optional[torch.LongTensor] = None,
  1018. head_mask: Optional[torch.FloatTensor] = None,
  1019. inputs_embeds: Optional[torch.FloatTensor] = None,
  1020. labels: Optional[torch.LongTensor] = None,
  1021. output_attentions: Optional[bool] = None,
  1022. output_hidden_states: Optional[bool] = None,
  1023. return_dict: Optional[bool] = None,
  1024. ) -> Union[TokenClassifierOutput, Tuple]:
  1025. r"""
  1026. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
  1027. Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
  1028. """
  1029. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1030. outputs = self.albert(
  1031. input_ids,
  1032. attention_mask=attention_mask,
  1033. token_type_ids=token_type_ids,
  1034. position_ids=position_ids,
  1035. head_mask=head_mask,
  1036. inputs_embeds=inputs_embeds,
  1037. output_attentions=output_attentions,
  1038. output_hidden_states=output_hidden_states,
  1039. return_dict=return_dict,
  1040. )
  1041. sequence_output = outputs[0]
  1042. sequence_output = self.dropout(sequence_output)
  1043. logits = self.classifier(sequence_output)
  1044. loss = None
  1045. if labels is not None:
  1046. loss_fct = CrossEntropyLoss()
  1047. loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
  1048. if not return_dict:
  1049. output = (logits,) + outputs[2:]
  1050. return ((loss,) + output) if loss is not None else output
  1051. return TokenClassifierOutput(
  1052. loss=loss,
  1053. logits=logits,
  1054. hidden_states=outputs.hidden_states,
  1055. attentions=outputs.attentions,
  1056. )
  1057. @add_start_docstrings(
  1058. """
  1059. Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
  1060. layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
  1061. """,
  1062. ALBERT_START_DOCSTRING,
  1063. )
  1064. class AlbertForQuestionAnswering(AlbertPreTrainedModel):
  1065. def __init__(self, config: AlbertConfig):
  1066. super().__init__(config)
  1067. self.num_labels = config.num_labels
  1068. self.albert = AlbertModel(config, add_pooling_layer=False)
  1069. self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
  1070. # Initialize weights and apply final processing
  1071. self.post_init()
  1072. @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  1073. @add_code_sample_docstrings(
  1074. checkpoint="twmkn9/albert-base-v2-squad2",
  1075. output_type=QuestionAnsweringModelOutput,
  1076. config_class=_CONFIG_FOR_DOC,
  1077. qa_target_start_index=12,
  1078. qa_target_end_index=13,
  1079. expected_output="'a nice puppet'",
  1080. expected_loss=7.36,
  1081. )
  1082. def forward(
  1083. self,
  1084. input_ids: Optional[torch.LongTensor] = None,
  1085. attention_mask: Optional[torch.FloatTensor] = None,
  1086. token_type_ids: Optional[torch.LongTensor] = None,
  1087. position_ids: Optional[torch.LongTensor] = None,
  1088. head_mask: Optional[torch.FloatTensor] = None,
  1089. inputs_embeds: Optional[torch.FloatTensor] = None,
  1090. start_positions: Optional[torch.LongTensor] = None,
  1091. end_positions: Optional[torch.LongTensor] = None,
  1092. output_attentions: Optional[bool] = None,
  1093. output_hidden_states: Optional[bool] = None,
  1094. return_dict: Optional[bool] = None,
  1095. ) -> Union[AlbertForPreTrainingOutput, Tuple]:
  1096. r"""
  1097. start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  1098. Labels for position (index) of the start of the labelled span for computing the token classification loss.
  1099. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
  1100. are not taken into account for computing the loss.
  1101. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  1102. Labels for position (index) of the end of the labelled span for computing the token classification loss.
  1103. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
  1104. are not taken into account for computing the loss.
  1105. """
  1106. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1107. outputs = self.albert(
  1108. input_ids=input_ids,
  1109. attention_mask=attention_mask,
  1110. token_type_ids=token_type_ids,
  1111. position_ids=position_ids,
  1112. head_mask=head_mask,
  1113. inputs_embeds=inputs_embeds,
  1114. output_attentions=output_attentions,
  1115. output_hidden_states=output_hidden_states,
  1116. return_dict=return_dict,
  1117. )
  1118. sequence_output = outputs[0]
  1119. logits: torch.Tensor = self.qa_outputs(sequence_output)
  1120. start_logits, end_logits = logits.split(1, dim=-1)
  1121. start_logits = start_logits.squeeze(-1).contiguous()
  1122. end_logits = end_logits.squeeze(-1).contiguous()
  1123. total_loss = None
  1124. if start_positions is not None and end_positions is not None:
  1125. # If we are on multi-GPU, split add a dimension
  1126. if len(start_positions.size()) > 1:
  1127. start_positions = start_positions.squeeze(-1)
  1128. if len(end_positions.size()) > 1:
  1129. end_positions = end_positions.squeeze(-1)
  1130. # sometimes the start/end positions are outside our model inputs, we ignore these terms
  1131. ignored_index = start_logits.size(1)
  1132. start_positions = start_positions.clamp(0, ignored_index)
  1133. end_positions = end_positions.clamp(0, ignored_index)
  1134. loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
  1135. start_loss = loss_fct(start_logits, start_positions)
  1136. end_loss = loss_fct(end_logits, end_positions)
  1137. total_loss = (start_loss + end_loss) / 2
  1138. if not return_dict:
  1139. output = (start_logits, end_logits) + outputs[2:]
  1140. return ((total_loss,) + output) if total_loss is not None else output
  1141. return QuestionAnsweringModelOutput(
  1142. loss=total_loss,
  1143. start_logits=start_logits,
  1144. end_logits=end_logits,
  1145. hidden_states=outputs.hidden_states,
  1146. attentions=outputs.attentions,
  1147. )
  1148. @add_start_docstrings(
  1149. """
  1150. Albert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
  1151. softmax) e.g. for RocStories/SWAG tasks.
  1152. """,
  1153. ALBERT_START_DOCSTRING,
  1154. )
  1155. class AlbertForMultipleChoice(AlbertPreTrainedModel):
  1156. def __init__(self, config: AlbertConfig):
  1157. super().__init__(config)
  1158. self.albert = AlbertModel(config)
  1159. self.dropout = nn.Dropout(config.classifier_dropout_prob)
  1160. self.classifier = nn.Linear(config.hidden_size, 1)
  1161. # Initialize weights and apply final processing
  1162. self.post_init()
  1163. @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
  1164. @add_code_sample_docstrings(
  1165. checkpoint=_CHECKPOINT_FOR_DOC,
  1166. output_type=MultipleChoiceModelOutput,
  1167. config_class=_CONFIG_FOR_DOC,
  1168. )
  1169. def forward(
  1170. self,
  1171. input_ids: Optional[torch.LongTensor] = None,
  1172. attention_mask: Optional[torch.FloatTensor] = None,
  1173. token_type_ids: Optional[torch.LongTensor] = None,
  1174. position_ids: Optional[torch.LongTensor] = None,
  1175. head_mask: Optional[torch.FloatTensor] = None,
  1176. inputs_embeds: Optional[torch.FloatTensor] = None,
  1177. labels: Optional[torch.LongTensor] = None,
  1178. output_attentions: Optional[bool] = None,
  1179. output_hidden_states: Optional[bool] = None,
  1180. return_dict: Optional[bool] = None,
  1181. ) -> Union[AlbertForPreTrainingOutput, Tuple]:
  1182. r"""
  1183. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  1184. Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
  1185. num_choices-1]` where *num_choices* is the size of the second dimension of the input tensors. (see
  1186. *input_ids* above)
  1187. """
  1188. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1189. num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
  1190. input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
  1191. attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
  1192. token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
  1193. position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
  1194. inputs_embeds = (
  1195. inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
  1196. if inputs_embeds is not None
  1197. else None
  1198. )
  1199. outputs = self.albert(
  1200. input_ids,
  1201. attention_mask=attention_mask,
  1202. token_type_ids=token_type_ids,
  1203. position_ids=position_ids,
  1204. head_mask=head_mask,
  1205. inputs_embeds=inputs_embeds,
  1206. output_attentions=output_attentions,
  1207. output_hidden_states=output_hidden_states,
  1208. return_dict=return_dict,
  1209. )
  1210. pooled_output = outputs[1]
  1211. pooled_output = self.dropout(pooled_output)
  1212. logits: torch.Tensor = self.classifier(pooled_output)
  1213. reshaped_logits = logits.view(-1, num_choices)
  1214. loss = None
  1215. if labels is not None:
  1216. loss_fct = CrossEntropyLoss()
  1217. loss = loss_fct(reshaped_logits, labels)
  1218. if not return_dict:
  1219. output = (reshaped_logits,) + outputs[2:]
  1220. return ((loss,) + output) if loss is not None else output
  1221. return MultipleChoiceModelOutput(
  1222. loss=loss,
  1223. logits=reshaped_logits,
  1224. hidden_states=outputs.hidden_states,
  1225. attentions=outputs.attentions,
  1226. )
  1227. __all__ = [
  1228. "load_tf_weights_in_albert",
  1229. "AlbertPreTrainedModel",
  1230. "AlbertModel",
  1231. "AlbertForPreTraining",
  1232. "AlbertForMaskedLM",
  1233. "AlbertForSequenceClassification",
  1234. "AlbertForTokenClassification",
  1235. "AlbertForQuestionAnswering",
  1236. "AlbertForMultipleChoice",
  1237. ]