modeling_distilbert.py 59 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368
  1. # coding=utf-8
  2. # Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """
  16. PyTorch DistilBERT model adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) and in
  17. part from HuggingFace PyTorch version of Google AI Bert model (https://github.com/google-research/bert)
  18. """
  19. import math
  20. from typing import Dict, List, Optional, Set, Tuple, Union
  21. import numpy as np
  22. import torch
  23. from torch import nn
  24. from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
  25. from ...activations import get_activation
  26. from ...configuration_utils import PretrainedConfig
  27. from ...integrations.deepspeed import is_deepspeed_zero3_enabled
  28. from ...modeling_attn_mask_utils import _prepare_4d_attention_mask_for_sdpa
  29. from ...modeling_outputs import (
  30. BaseModelOutput,
  31. MaskedLMOutput,
  32. MultipleChoiceModelOutput,
  33. QuestionAnsweringModelOutput,
  34. SequenceClassifierOutput,
  35. TokenClassifierOutput,
  36. )
  37. from ...modeling_utils import PreTrainedModel
  38. from ...pytorch_utils import (
  39. apply_chunking_to_forward,
  40. find_pruneable_heads_and_indices,
  41. is_torch_greater_or_equal_than_2_2,
  42. prune_linear_layer,
  43. )
  44. from ...utils import (
  45. add_code_sample_docstrings,
  46. add_start_docstrings,
  47. add_start_docstrings_to_model_forward,
  48. is_flash_attn_2_available,
  49. is_flash_attn_greater_or_equal_2_10,
  50. logging,
  51. replace_return_docstrings,
  52. )
  53. from .configuration_distilbert import DistilBertConfig
  54. if is_flash_attn_2_available():
  55. from ...modeling_flash_attention_utils import _flash_attention_forward
  56. logger = logging.get_logger(__name__)
  57. _CHECKPOINT_FOR_DOC = "distilbert-base-uncased"
  58. _CONFIG_FOR_DOC = "DistilBertConfig"
  59. # UTILS AND BUILDING BLOCKS OF THE ARCHITECTURE #
  60. def create_sinusoidal_embeddings(n_pos: int, dim: int, out: torch.Tensor):
  61. if is_deepspeed_zero3_enabled():
  62. import deepspeed
  63. with deepspeed.zero.GatheredParameters(out, modifier_rank=0):
  64. if torch.distributed.get_rank() == 0:
  65. _create_sinusoidal_embeddings(n_pos=n_pos, dim=dim, out=out)
  66. else:
  67. _create_sinusoidal_embeddings(n_pos=n_pos, dim=dim, out=out)
  68. def _create_sinusoidal_embeddings(n_pos: int, dim: int, out: torch.Tensor):
  69. position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)])
  70. out.requires_grad = False
  71. out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
  72. out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
  73. out.detach_()
  74. class Embeddings(nn.Module):
  75. def __init__(self, config: PretrainedConfig):
  76. super().__init__()
  77. self.word_embeddings = nn.Embedding(config.vocab_size, config.dim, padding_idx=config.pad_token_id)
  78. self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.dim)
  79. self.LayerNorm = nn.LayerNorm(config.dim, eps=1e-12)
  80. self.dropout = nn.Dropout(config.dropout)
  81. self.register_buffer(
  82. "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
  83. )
  84. def forward(self, input_ids: torch.Tensor, input_embeds: Optional[torch.Tensor] = None) -> torch.Tensor:
  85. """
  86. Parameters:
  87. input_ids (torch.Tensor):
  88. torch.tensor(bs, max_seq_length) The token ids to embed.
  89. input_embeds (*optional*, torch.Tensor):
  90. The pre-computed word embeddings. Can only be passed if the input ids are `None`.
  91. Returns: torch.tensor(bs, max_seq_length, dim) The embedded tokens (plus position embeddings, no token_type
  92. embeddings)
  93. """
  94. if input_ids is not None:
  95. input_embeds = self.word_embeddings(input_ids) # (bs, max_seq_length, dim)
  96. seq_length = input_embeds.size(1)
  97. # Setting the position-ids to the registered buffer in constructor, it helps
  98. # when tracing the model without passing position-ids, solves
  99. # isues similar to issue #5664
  100. if hasattr(self, "position_ids"):
  101. position_ids = self.position_ids[:, :seq_length]
  102. else:
  103. position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) # (max_seq_length)
  104. position_ids = position_ids.unsqueeze(0).expand_as(input_ids) # (bs, max_seq_length)
  105. position_embeddings = self.position_embeddings(position_ids) # (bs, max_seq_length, dim)
  106. embeddings = input_embeds + position_embeddings # (bs, max_seq_length, dim)
  107. embeddings = self.LayerNorm(embeddings) # (bs, max_seq_length, dim)
  108. embeddings = self.dropout(embeddings) # (bs, max_seq_length, dim)
  109. return embeddings
  110. class MultiHeadSelfAttention(nn.Module):
  111. def __init__(self, config: PretrainedConfig):
  112. super().__init__()
  113. self.config = config
  114. self.n_heads = config.n_heads
  115. self.dim = config.dim
  116. self.dropout = nn.Dropout(p=config.attention_dropout)
  117. self.is_causal = False
  118. # Have an even number of multi heads that divide the dimensions
  119. if self.dim % self.n_heads != 0:
  120. # Raise value errors for even multi-head attention nodes
  121. raise ValueError(f"self.n_heads: {self.n_heads} must divide self.dim: {self.dim} evenly")
  122. self.q_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
  123. self.k_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
  124. self.v_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
  125. self.out_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
  126. self.pruned_heads: Set[int] = set()
  127. self.attention_head_size = self.dim // self.n_heads
  128. def prune_heads(self, heads: List[int]):
  129. if len(heads) == 0:
  130. return
  131. heads, index = find_pruneable_heads_and_indices(
  132. heads, self.n_heads, self.attention_head_size, self.pruned_heads
  133. )
  134. # Prune linear layers
  135. self.q_lin = prune_linear_layer(self.q_lin, index)
  136. self.k_lin = prune_linear_layer(self.k_lin, index)
  137. self.v_lin = prune_linear_layer(self.v_lin, index)
  138. self.out_lin = prune_linear_layer(self.out_lin, index, dim=1)
  139. # Update hyper params
  140. self.n_heads = self.n_heads - len(heads)
  141. self.dim = self.attention_head_size * self.n_heads
  142. self.pruned_heads = self.pruned_heads.union(heads)
  143. def forward(
  144. self,
  145. query: torch.Tensor,
  146. key: torch.Tensor,
  147. value: torch.Tensor,
  148. mask: torch.Tensor,
  149. head_mask: Optional[torch.Tensor] = None,
  150. output_attentions: bool = False,
  151. ) -> Tuple[torch.Tensor, ...]:
  152. """
  153. Parameters:
  154. query: torch.tensor(bs, seq_length, dim)
  155. key: torch.tensor(bs, seq_length, dim)
  156. value: torch.tensor(bs, seq_length, dim)
  157. mask: torch.tensor(bs, seq_length)
  158. Returns:
  159. weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs,
  160. seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
  161. """
  162. bs, q_length, dim = query.size()
  163. k_length = key.size(1)
  164. # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured'
  165. # assert key.size() == value.size()
  166. dim_per_head = self.dim // self.n_heads
  167. mask_reshp = (bs, 1, 1, k_length)
  168. def shape(x: torch.Tensor) -> torch.Tensor:
  169. """separate heads"""
  170. return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2)
  171. def unshape(x: torch.Tensor) -> torch.Tensor:
  172. """group heads"""
  173. return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head)
  174. q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head)
  175. k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head)
  176. v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head)
  177. q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_length, dim_per_head)
  178. scores = torch.matmul(q, k.transpose(2, 3)) # (bs, n_heads, q_length, k_length)
  179. mask = (mask == 0).view(mask_reshp).expand_as(scores) # (bs, n_heads, q_length, k_length)
  180. scores = scores.masked_fill(
  181. mask, torch.tensor(torch.finfo(scores.dtype).min)
  182. ) # (bs, n_heads, q_length, k_length)
  183. weights = nn.functional.softmax(scores, dim=-1) # (bs, n_heads, q_length, k_length)
  184. weights = self.dropout(weights) # (bs, n_heads, q_length, k_length)
  185. # Mask heads if we want to
  186. if head_mask is not None:
  187. weights = weights * head_mask
  188. context = torch.matmul(weights, v) # (bs, n_heads, q_length, dim_per_head)
  189. context = unshape(context) # (bs, q_length, dim)
  190. context = self.out_lin(context) # (bs, q_length, dim)
  191. if output_attentions:
  192. return (context, weights)
  193. else:
  194. return (context,)
  195. class DistilBertFlashAttention2(MultiHeadSelfAttention):
  196. """
  197. DistilBert flash attention module. This module inherits from `MultiHeadSelfAttention` as the weights of the module
  198. stays untouched. The only required change would be on the forward pass where it needs to correctly call the public
  199. API of flash attention and deal with padding tokens in case the input contains any of them.
  200. """
  201. # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
  202. def __init__(self, *args, **kwargs):
  203. super().__init__(*args, **kwargs)
  204. # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
  205. # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
  206. # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
  207. self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
  208. def forward(
  209. self,
  210. query: torch.Tensor,
  211. key: torch.Tensor,
  212. value: torch.Tensor,
  213. mask: torch.Tensor,
  214. head_mask: Optional[torch.Tensor] = None,
  215. output_attentions: bool = False,
  216. ) -> Tuple[torch.Tensor, ...]:
  217. """
  218. Parameters:
  219. query: torch.tensor(bs, seq_length, dim)
  220. key: torch.tensor(bs, seq_length, dim)
  221. value: torch.tensor(bs, seq_length, dim)
  222. mask: torch.tensor(bs, seq_length)
  223. Returns:
  224. weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs,
  225. seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
  226. """
  227. batch_size, q_length, dim = query.size()
  228. dim_per_head = self.dim // self.n_heads
  229. def reshape(x: torch.Tensor) -> torch.Tensor:
  230. """separate heads"""
  231. return x.view(batch_size, -1, self.n_heads, dim_per_head)
  232. # Flash attention requires the input to have the shape
  233. # batch_size x seq_length x head_dim x hidden_dim
  234. query_states = reshape(self.q_lin(query))
  235. key_states = reshape(self.k_lin(key))
  236. value_states = reshape(self.v_lin(value))
  237. attn_dropout = self.config.attention_dropout if self.training else 0.0
  238. # In PEFT, usually we cast the layer norms in float32 for training stability reasons
  239. # therefore the input hidden states gets silently casted in float32. Hence, we need
  240. # cast them back in the correct dtype just to be sure everything works as expected.
  241. # This might slowdown training & inference so it is recommended to not cast the LayerNorms
  242. # in fp32. (LlamaRMSNorm handles it correctly)
  243. if query_states.dtype == torch.float32:
  244. if torch.is_autocast_enabled():
  245. target_dtype = torch.get_autocast_gpu_dtype()
  246. # Handle the case where the model is quantized
  247. elif hasattr(self.config, "_pre_quantization_dtype"):
  248. target_dtype = self.config._pre_quantization_dtype
  249. else:
  250. target_dtype = self.q_lin.weight.dtype
  251. logger.warning_once(
  252. f"The input hidden states seems to be silently casted in float32, this might be related to"
  253. f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
  254. f" {target_dtype}."
  255. )
  256. query_states = query_states.to(target_dtype)
  257. key_states = key_states.to(target_dtype)
  258. value_states = value_states.to(target_dtype)
  259. attn_weights = _flash_attention_forward(
  260. query_states,
  261. key_states,
  262. value_states,
  263. mask,
  264. q_length,
  265. dropout=attn_dropout,
  266. use_top_left_mask=self._flash_attn_uses_top_left_mask,
  267. is_causal=self.is_causal,
  268. )
  269. attn_weights_reshaped = attn_weights.reshape(batch_size, q_length, self.n_heads * dim_per_head)
  270. attn_output = self.out_lin(attn_weights_reshaped)
  271. if output_attentions:
  272. return (attn_output, attn_weights)
  273. else:
  274. return (attn_output,)
  275. class DistilBertSdpaAttention(MultiHeadSelfAttention):
  276. def __init__(self, config: PretrainedConfig):
  277. super().__init__(config=config)
  278. self.dropout_prob = config.attention_dropout
  279. self.require_contiguous_qkv = not is_torch_greater_or_equal_than_2_2
  280. def forward(
  281. self,
  282. query: torch.Tensor,
  283. key: torch.Tensor,
  284. value: torch.Tensor,
  285. mask: torch.Tensor,
  286. head_mask: Optional[torch.Tensor] = None,
  287. output_attentions: bool = False,
  288. ) -> Tuple[torch.Tensor, ...]:
  289. """
  290. Parameters:
  291. query: torch.tensor(bs, seq_length, dim)
  292. key: torch.tensor(bs, seq_length, dim)
  293. value: torch.tensor(bs, seq_length, dim)
  294. mask: torch.tensor(bs, seq_length)
  295. Returns:
  296. weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs,
  297. seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
  298. """
  299. if output_attentions or head_mask is not None:
  300. logger.warning_once(
  301. "DistilBertSdpaAttention is used but `torch.nn.functional.scaled_dot_product_attention` does not support"
  302. " `output_attentions=True` or `head_mask`. Falling back to the manual attention implementation, but specifying"
  303. " the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be"
  304. ' removed using the argument `attn_implementation="eager"` when loading the model.'
  305. )
  306. return super().forward(
  307. query,
  308. key,
  309. value,
  310. mask,
  311. head_mask,
  312. output_attentions,
  313. )
  314. batch_size, _, _ = query.size()
  315. dim_per_head = self.dim // self.n_heads
  316. def shape(x: torch.Tensor) -> torch.Tensor:
  317. """separate heads"""
  318. return x.view(batch_size, -1, self.n_heads, dim_per_head).transpose(1, 2)
  319. def unshape(x: torch.Tensor) -> torch.Tensor:
  320. """group heads"""
  321. return x.transpose(1, 2).contiguous().view(batch_size, -1, self.n_heads * dim_per_head)
  322. q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head)
  323. k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head)
  324. v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head)
  325. # SDPA with memory-efficient backend is broken in torch==2.1.2 when using non-contiguous inputs and a custom
  326. # attn_mask, so we need to call `.contiguous()` here. This was fixed in torch==2.2.0.
  327. # Reference: https://github.com/pytorch/pytorch/issues/112577
  328. if self.require_contiguous_qkv and q.device.type == "cuda" and mask is not None:
  329. q = q.contiguous()
  330. k = k.contiguous()
  331. v = v.contiguous()
  332. attn_output = torch.nn.functional.scaled_dot_product_attention(
  333. q,
  334. k,
  335. v,
  336. attn_mask=mask,
  337. dropout_p=self.dropout_prob if self.training else 0.0,
  338. is_causal=False,
  339. )
  340. attn_output = unshape(attn_output)
  341. attn_output = self.out_lin(attn_output)
  342. return (attn_output,)
  343. class FFN(nn.Module):
  344. def __init__(self, config: PretrainedConfig):
  345. super().__init__()
  346. self.dropout = nn.Dropout(p=config.dropout)
  347. self.chunk_size_feed_forward = config.chunk_size_feed_forward
  348. self.seq_len_dim = 1
  349. self.lin1 = nn.Linear(in_features=config.dim, out_features=config.hidden_dim)
  350. self.lin2 = nn.Linear(in_features=config.hidden_dim, out_features=config.dim)
  351. self.activation = get_activation(config.activation)
  352. def forward(self, input: torch.Tensor) -> torch.Tensor:
  353. return apply_chunking_to_forward(self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, input)
  354. def ff_chunk(self, input: torch.Tensor) -> torch.Tensor:
  355. x = self.lin1(input)
  356. x = self.activation(x)
  357. x = self.lin2(x)
  358. x = self.dropout(x)
  359. return x
  360. DISTILBERT_ATTENTION_CLASSES = {
  361. "eager": MultiHeadSelfAttention,
  362. "flash_attention_2": DistilBertFlashAttention2,
  363. "sdpa": DistilBertSdpaAttention,
  364. }
  365. class TransformerBlock(nn.Module):
  366. def __init__(self, config: PretrainedConfig):
  367. super().__init__()
  368. # Have an even number of Configure multi-heads
  369. if config.dim % config.n_heads != 0:
  370. raise ValueError(f"config.n_heads {config.n_heads} must divide config.dim {config.dim} evenly")
  371. self.attention = DISTILBERT_ATTENTION_CLASSES[config._attn_implementation](config)
  372. self.sa_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)
  373. self.ffn = FFN(config)
  374. self.output_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)
  375. def forward(
  376. self,
  377. x: torch.Tensor,
  378. attn_mask: Optional[torch.Tensor] = None,
  379. head_mask: Optional[torch.Tensor] = None,
  380. output_attentions: bool = False,
  381. ) -> Tuple[torch.Tensor, ...]:
  382. """
  383. Parameters:
  384. x: torch.tensor(bs, seq_length, dim)
  385. attn_mask: torch.tensor(bs, seq_length)
  386. Returns:
  387. sa_weights: torch.tensor(bs, n_heads, seq_length, seq_length) The attention weights ffn_output:
  388. torch.tensor(bs, seq_length, dim) The output of the transformer block contextualization.
  389. """
  390. # Self-Attention
  391. sa_output = self.attention(
  392. query=x,
  393. key=x,
  394. value=x,
  395. mask=attn_mask,
  396. head_mask=head_mask,
  397. output_attentions=output_attentions,
  398. )
  399. if output_attentions:
  400. sa_output, sa_weights = sa_output # (bs, seq_length, dim), (bs, n_heads, seq_length, seq_length)
  401. else: # To handle these `output_attentions` or `output_hidden_states` cases returning tuples
  402. if type(sa_output) is not tuple:
  403. raise TypeError(f"sa_output must be a tuple but it is {type(sa_output)} type")
  404. sa_output = sa_output[0]
  405. sa_output = self.sa_layer_norm(sa_output + x) # (bs, seq_length, dim)
  406. # Feed Forward Network
  407. ffn_output = self.ffn(sa_output) # (bs, seq_length, dim)
  408. ffn_output: torch.Tensor = self.output_layer_norm(ffn_output + sa_output) # (bs, seq_length, dim)
  409. output = (ffn_output,)
  410. if output_attentions:
  411. output = (sa_weights,) + output
  412. return output
  413. class Transformer(nn.Module):
  414. def __init__(self, config: PretrainedConfig):
  415. super().__init__()
  416. self.n_layers = config.n_layers
  417. self.layer = nn.ModuleList([TransformerBlock(config) for _ in range(config.n_layers)])
  418. self.gradient_checkpointing = False
  419. def forward(
  420. self,
  421. x: torch.Tensor,
  422. attn_mask: Optional[torch.Tensor] = None,
  423. head_mask: Optional[torch.Tensor] = None,
  424. output_attentions: bool = False,
  425. output_hidden_states: bool = False,
  426. return_dict: Optional[bool] = None,
  427. ) -> Union[BaseModelOutput, Tuple[torch.Tensor, ...]]: # docstyle-ignore
  428. """
  429. Parameters:
  430. x: torch.tensor(bs, seq_length, dim) Input sequence embedded.
  431. attn_mask: torch.tensor(bs, seq_length) Attention mask on the sequence.
  432. Returns:
  433. hidden_state: torch.tensor(bs, seq_length, dim) Sequence of hidden states in the last (top)
  434. layer all_hidden_states: Tuple[torch.tensor(bs, seq_length, dim)]
  435. Tuple of length n_layers with the hidden states from each layer.
  436. Optional: only if output_hidden_states=True
  437. all_attentions: Tuple[torch.tensor(bs, n_heads, seq_length, seq_length)]
  438. Tuple of length n_layers with the attention weights from each layer
  439. Optional: only if output_attentions=True
  440. """
  441. all_hidden_states = () if output_hidden_states else None
  442. all_attentions = () if output_attentions else None
  443. hidden_state = x
  444. for i, layer_module in enumerate(self.layer):
  445. if output_hidden_states:
  446. all_hidden_states = all_hidden_states + (hidden_state,)
  447. if self.gradient_checkpointing and self.training:
  448. layer_outputs = self._gradient_checkpointing_func(
  449. layer_module.__call__,
  450. hidden_state,
  451. attn_mask,
  452. head_mask[i],
  453. output_attentions,
  454. )
  455. else:
  456. layer_outputs = layer_module(
  457. hidden_state,
  458. attn_mask,
  459. head_mask[i],
  460. output_attentions,
  461. )
  462. hidden_state = layer_outputs[-1]
  463. if output_attentions:
  464. if len(layer_outputs) != 2:
  465. raise ValueError(f"The length of the layer_outputs should be 2, but it is {len(layer_outputs)}")
  466. attentions = layer_outputs[0]
  467. all_attentions = all_attentions + (attentions,)
  468. else:
  469. if len(layer_outputs) != 1:
  470. raise ValueError(f"The length of the layer_outputs should be 1, but it is {len(layer_outputs)}")
  471. # Add last layer
  472. if output_hidden_states:
  473. all_hidden_states = all_hidden_states + (hidden_state,)
  474. if not return_dict:
  475. return tuple(v for v in [hidden_state, all_hidden_states, all_attentions] if v is not None)
  476. return BaseModelOutput(
  477. last_hidden_state=hidden_state, hidden_states=all_hidden_states, attentions=all_attentions
  478. )
  479. # INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL #
  480. class DistilBertPreTrainedModel(PreTrainedModel):
  481. """
  482. An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
  483. models.
  484. """
  485. config_class = DistilBertConfig
  486. load_tf_weights = None
  487. base_model_prefix = "distilbert"
  488. supports_gradient_checkpointing = True
  489. _supports_flash_attn_2 = True
  490. _supports_sdpa = True
  491. def _init_weights(self, module: nn.Module):
  492. """Initialize the weights."""
  493. if isinstance(module, nn.Linear):
  494. # Slightly different from the TF version which uses truncated_normal for initialization
  495. # cf https://github.com/pytorch/pytorch/pull/5617
  496. module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
  497. if module.bias is not None:
  498. module.bias.data.zero_()
  499. elif isinstance(module, nn.Embedding):
  500. module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
  501. if module.padding_idx is not None:
  502. module.weight.data[module.padding_idx].zero_()
  503. elif isinstance(module, nn.LayerNorm):
  504. module.bias.data.zero_()
  505. module.weight.data.fill_(1.0)
  506. elif isinstance(module, Embeddings) and self.config.sinusoidal_pos_embds:
  507. create_sinusoidal_embeddings(
  508. self.config.max_position_embeddings, self.config.dim, module.position_embeddings.weight
  509. )
  510. DISTILBERT_START_DOCSTRING = r"""
  511. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
  512. library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
  513. etc.)
  514. This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
  515. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
  516. and behavior.
  517. Parameters:
  518. config ([`DistilBertConfig`]): Model configuration class with all the parameters of the model.
  519. Initializing with a config file does not load the weights associated with the model, only the
  520. configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
  521. """
  522. DISTILBERT_INPUTS_DOCSTRING = r"""
  523. Args:
  524. input_ids (`torch.LongTensor` of shape `({0})`):
  525. Indices of input sequence tokens in the vocabulary.
  526. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
  527. [`PreTrainedTokenizer.__call__`] for details.
  528. [What are input IDs?](../glossary#input-ids)
  529. attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
  530. Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
  531. - 1 for tokens that are **not masked**,
  532. - 0 for tokens that are **masked**.
  533. [What are attention masks?](../glossary#attention-mask)
  534. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
  535. Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
  536. - 1 indicates the head is **not masked**,
  537. - 0 indicates the head is **masked**.
  538. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
  539. Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
  540. is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
  541. model's internal embedding lookup matrix.
  542. output_attentions (`bool`, *optional*):
  543. Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
  544. tensors for more detail.
  545. output_hidden_states (`bool`, *optional*):
  546. Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
  547. more detail.
  548. return_dict (`bool`, *optional*):
  549. Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
  550. """
  551. @add_start_docstrings(
  552. "The bare DistilBERT encoder/transformer outputting raw hidden-states without any specific head on top.",
  553. DISTILBERT_START_DOCSTRING,
  554. )
  555. class DistilBertModel(DistilBertPreTrainedModel):
  556. def __init__(self, config: PretrainedConfig):
  557. super().__init__(config)
  558. self.embeddings = Embeddings(config) # Embeddings
  559. self.transformer = Transformer(config) # Encoder
  560. self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
  561. self._use_sdpa = config._attn_implementation == "sdpa"
  562. # Initialize weights and apply final processing
  563. self.post_init()
  564. def get_position_embeddings(self) -> nn.Embedding:
  565. """
  566. Returns the position embeddings
  567. """
  568. return self.embeddings.position_embeddings
  569. def resize_position_embeddings(self, new_num_position_embeddings: int):
  570. """
  571. Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
  572. Arguments:
  573. new_num_position_embeddings (`int`):
  574. The number of new position embedding matrix. If position embeddings are learned, increasing the size
  575. will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
  576. end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
  577. size will add correct vectors at the end following the position encoding algorithm, whereas reducing
  578. the size will remove vectors from the end.
  579. """
  580. num_position_embeds_diff = new_num_position_embeddings - self.config.max_position_embeddings
  581. # no resizing needs to be done if the length stays the same
  582. if num_position_embeds_diff == 0:
  583. return
  584. logger.info(f"Setting `config.max_position_embeddings={new_num_position_embeddings}`...")
  585. self.config.max_position_embeddings = new_num_position_embeddings
  586. old_position_embeddings_weight = self.embeddings.position_embeddings.weight.clone()
  587. self.embeddings.position_embeddings = nn.Embedding(self.config.max_position_embeddings, self.config.dim)
  588. if self.config.sinusoidal_pos_embds:
  589. create_sinusoidal_embeddings(
  590. n_pos=self.config.max_position_embeddings, dim=self.config.dim, out=self.position_embeddings.weight
  591. )
  592. else:
  593. with torch.no_grad():
  594. if num_position_embeds_diff > 0:
  595. self.embeddings.position_embeddings.weight[:-num_position_embeds_diff] = nn.Parameter(
  596. old_position_embeddings_weight
  597. )
  598. else:
  599. self.embeddings.position_embeddings.weight = nn.Parameter(
  600. old_position_embeddings_weight[:num_position_embeds_diff]
  601. )
  602. # move position_embeddings to correct device
  603. self.embeddings.position_embeddings.to(self.device)
  604. def get_input_embeddings(self) -> nn.Embedding:
  605. return self.embeddings.word_embeddings
  606. def set_input_embeddings(self, new_embeddings: nn.Embedding):
  607. self.embeddings.word_embeddings = new_embeddings
  608. def _prune_heads(self, heads_to_prune: Dict[int, List[List[int]]]):
  609. """
  610. Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
  611. class PreTrainedModel
  612. """
  613. for layer, heads in heads_to_prune.items():
  614. self.transformer.layer[layer].attention.prune_heads(heads)
  615. @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices"))
  616. @add_code_sample_docstrings(
  617. checkpoint=_CHECKPOINT_FOR_DOC,
  618. output_type=BaseModelOutput,
  619. config_class=_CONFIG_FOR_DOC,
  620. )
  621. def forward(
  622. self,
  623. input_ids: Optional[torch.Tensor] = None,
  624. attention_mask: Optional[torch.Tensor] = None,
  625. head_mask: Optional[torch.Tensor] = None,
  626. inputs_embeds: Optional[torch.Tensor] = None,
  627. output_attentions: Optional[bool] = None,
  628. output_hidden_states: Optional[bool] = None,
  629. return_dict: Optional[bool] = None,
  630. ) -> Union[BaseModelOutput, Tuple[torch.Tensor, ...]]:
  631. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
  632. output_hidden_states = (
  633. output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
  634. )
  635. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  636. if input_ids is not None and inputs_embeds is not None:
  637. raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
  638. elif input_ids is not None:
  639. self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
  640. input_shape = input_ids.size()
  641. elif inputs_embeds is not None:
  642. input_shape = inputs_embeds.size()[:-1]
  643. else:
  644. raise ValueError("You have to specify either input_ids or inputs_embeds")
  645. device = input_ids.device if input_ids is not None else inputs_embeds.device
  646. head_mask_is_none = head_mask is None
  647. # Prepare head mask if needed
  648. head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
  649. embeddings = self.embeddings(input_ids, inputs_embeds) # (bs, seq_length, dim)
  650. if self._use_flash_attention_2:
  651. attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
  652. else:
  653. if attention_mask is None:
  654. attention_mask = torch.ones(input_shape, device=device) # (bs, seq_length)
  655. if self._use_sdpa and head_mask_is_none and not output_attentions:
  656. attention_mask = _prepare_4d_attention_mask_for_sdpa(
  657. attention_mask, embeddings.dtype, tgt_len=input_shape[1]
  658. )
  659. return self.transformer(
  660. x=embeddings,
  661. attn_mask=attention_mask,
  662. head_mask=head_mask,
  663. output_attentions=output_attentions,
  664. output_hidden_states=output_hidden_states,
  665. return_dict=return_dict,
  666. )
  667. @add_start_docstrings(
  668. """DistilBert Model with a `masked language modeling` head on top.""",
  669. DISTILBERT_START_DOCSTRING,
  670. )
  671. class DistilBertForMaskedLM(DistilBertPreTrainedModel):
  672. _tied_weights_keys = ["vocab_projector.weight"]
  673. def __init__(self, config: PretrainedConfig):
  674. super().__init__(config)
  675. self.activation = get_activation(config.activation)
  676. self.distilbert = DistilBertModel(config)
  677. self.vocab_transform = nn.Linear(config.dim, config.dim)
  678. self.vocab_layer_norm = nn.LayerNorm(config.dim, eps=1e-12)
  679. self.vocab_projector = nn.Linear(config.dim, config.vocab_size)
  680. # Initialize weights and apply final processing
  681. self.post_init()
  682. self.mlm_loss_fct = nn.CrossEntropyLoss()
  683. def get_position_embeddings(self) -> nn.Embedding:
  684. """
  685. Returns the position embeddings
  686. """
  687. return self.distilbert.get_position_embeddings()
  688. def resize_position_embeddings(self, new_num_position_embeddings: int):
  689. """
  690. Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
  691. Arguments:
  692. new_num_position_embeddings (`int`):
  693. The number of new position embedding matrix. If position embeddings are learned, increasing the size
  694. will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
  695. end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
  696. size will add correct vectors at the end following the position encoding algorithm, whereas reducing
  697. the size will remove vectors from the end.
  698. """
  699. self.distilbert.resize_position_embeddings(new_num_position_embeddings)
  700. def get_output_embeddings(self) -> nn.Module:
  701. return self.vocab_projector
  702. def set_output_embeddings(self, new_embeddings: nn.Module):
  703. self.vocab_projector = new_embeddings
  704. @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices"))
  705. @add_code_sample_docstrings(
  706. checkpoint=_CHECKPOINT_FOR_DOC,
  707. output_type=MaskedLMOutput,
  708. config_class=_CONFIG_FOR_DOC,
  709. )
  710. def forward(
  711. self,
  712. input_ids: Optional[torch.Tensor] = None,
  713. attention_mask: Optional[torch.Tensor] = None,
  714. head_mask: Optional[torch.Tensor] = None,
  715. inputs_embeds: Optional[torch.Tensor] = None,
  716. labels: Optional[torch.LongTensor] = None,
  717. output_attentions: Optional[bool] = None,
  718. output_hidden_states: Optional[bool] = None,
  719. return_dict: Optional[bool] = None,
  720. ) -> Union[MaskedLMOutput, Tuple[torch.Tensor, ...]]:
  721. r"""
  722. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
  723. Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
  724. config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
  725. loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
  726. """
  727. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  728. dlbrt_output = self.distilbert(
  729. input_ids=input_ids,
  730. attention_mask=attention_mask,
  731. head_mask=head_mask,
  732. inputs_embeds=inputs_embeds,
  733. output_attentions=output_attentions,
  734. output_hidden_states=output_hidden_states,
  735. return_dict=return_dict,
  736. )
  737. hidden_states = dlbrt_output[0] # (bs, seq_length, dim)
  738. prediction_logits = self.vocab_transform(hidden_states) # (bs, seq_length, dim)
  739. prediction_logits = self.activation(prediction_logits) # (bs, seq_length, dim)
  740. prediction_logits = self.vocab_layer_norm(prediction_logits) # (bs, seq_length, dim)
  741. prediction_logits = self.vocab_projector(prediction_logits) # (bs, seq_length, vocab_size)
  742. mlm_loss = None
  743. if labels is not None:
  744. mlm_loss = self.mlm_loss_fct(prediction_logits.view(-1, prediction_logits.size(-1)), labels.view(-1))
  745. if not return_dict:
  746. output = (prediction_logits,) + dlbrt_output[1:]
  747. return ((mlm_loss,) + output) if mlm_loss is not None else output
  748. return MaskedLMOutput(
  749. loss=mlm_loss,
  750. logits=prediction_logits,
  751. hidden_states=dlbrt_output.hidden_states,
  752. attentions=dlbrt_output.attentions,
  753. )
  754. @add_start_docstrings(
  755. """
  756. DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
  757. pooled output) e.g. for GLUE tasks.
  758. """,
  759. DISTILBERT_START_DOCSTRING,
  760. )
  761. class DistilBertForSequenceClassification(DistilBertPreTrainedModel):
  762. def __init__(self, config: PretrainedConfig):
  763. super().__init__(config)
  764. self.num_labels = config.num_labels
  765. self.config = config
  766. self.distilbert = DistilBertModel(config)
  767. self.pre_classifier = nn.Linear(config.dim, config.dim)
  768. self.classifier = nn.Linear(config.dim, config.num_labels)
  769. self.dropout = nn.Dropout(config.seq_classif_dropout)
  770. # Initialize weights and apply final processing
  771. self.post_init()
  772. def get_position_embeddings(self) -> nn.Embedding:
  773. """
  774. Returns the position embeddings
  775. """
  776. return self.distilbert.get_position_embeddings()
  777. def resize_position_embeddings(self, new_num_position_embeddings: int):
  778. """
  779. Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
  780. Arguments:
  781. new_num_position_embeddings (`int`):
  782. The number of new position embedding matrix. If position embeddings are learned, increasing the size
  783. will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
  784. end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
  785. size will add correct vectors at the end following the position encoding algorithm, whereas reducing
  786. the size will remove vectors from the end.
  787. """
  788. self.distilbert.resize_position_embeddings(new_num_position_embeddings)
  789. @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  790. @add_code_sample_docstrings(
  791. checkpoint=_CHECKPOINT_FOR_DOC,
  792. output_type=SequenceClassifierOutput,
  793. config_class=_CONFIG_FOR_DOC,
  794. )
  795. def forward(
  796. self,
  797. input_ids: Optional[torch.Tensor] = None,
  798. attention_mask: Optional[torch.Tensor] = None,
  799. head_mask: Optional[torch.Tensor] = None,
  800. inputs_embeds: Optional[torch.Tensor] = None,
  801. labels: Optional[torch.LongTensor] = None,
  802. output_attentions: Optional[bool] = None,
  803. output_hidden_states: Optional[bool] = None,
  804. return_dict: Optional[bool] = None,
  805. ) -> Union[SequenceClassifierOutput, Tuple[torch.Tensor, ...]]:
  806. r"""
  807. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  808. Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
  809. config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
  810. `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
  811. """
  812. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  813. distilbert_output = self.distilbert(
  814. input_ids=input_ids,
  815. attention_mask=attention_mask,
  816. head_mask=head_mask,
  817. inputs_embeds=inputs_embeds,
  818. output_attentions=output_attentions,
  819. output_hidden_states=output_hidden_states,
  820. return_dict=return_dict,
  821. )
  822. hidden_state = distilbert_output[0] # (bs, seq_len, dim)
  823. pooled_output = hidden_state[:, 0] # (bs, dim)
  824. pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
  825. pooled_output = nn.ReLU()(pooled_output) # (bs, dim)
  826. pooled_output = self.dropout(pooled_output) # (bs, dim)
  827. logits = self.classifier(pooled_output) # (bs, num_labels)
  828. loss = None
  829. if labels is not None:
  830. if self.config.problem_type is None:
  831. if self.num_labels == 1:
  832. self.config.problem_type = "regression"
  833. elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
  834. self.config.problem_type = "single_label_classification"
  835. else:
  836. self.config.problem_type = "multi_label_classification"
  837. if self.config.problem_type == "regression":
  838. loss_fct = MSELoss()
  839. if self.num_labels == 1:
  840. loss = loss_fct(logits.squeeze(), labels.squeeze())
  841. else:
  842. loss = loss_fct(logits, labels)
  843. elif self.config.problem_type == "single_label_classification":
  844. loss_fct = CrossEntropyLoss()
  845. loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
  846. elif self.config.problem_type == "multi_label_classification":
  847. loss_fct = BCEWithLogitsLoss()
  848. loss = loss_fct(logits, labels)
  849. if not return_dict:
  850. output = (logits,) + distilbert_output[1:]
  851. return ((loss,) + output) if loss is not None else output
  852. return SequenceClassifierOutput(
  853. loss=loss,
  854. logits=logits,
  855. hidden_states=distilbert_output.hidden_states,
  856. attentions=distilbert_output.attentions,
  857. )
  858. @add_start_docstrings(
  859. """
  860. DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
  861. linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
  862. """,
  863. DISTILBERT_START_DOCSTRING,
  864. )
  865. class DistilBertForQuestionAnswering(DistilBertPreTrainedModel):
  866. def __init__(self, config: PretrainedConfig):
  867. super().__init__(config)
  868. self.distilbert = DistilBertModel(config)
  869. self.qa_outputs = nn.Linear(config.dim, config.num_labels)
  870. if config.num_labels != 2:
  871. raise ValueError(f"config.num_labels should be 2, but it is {config.num_labels}")
  872. self.dropout = nn.Dropout(config.qa_dropout)
  873. # Initialize weights and apply final processing
  874. self.post_init()
  875. def get_position_embeddings(self) -> nn.Embedding:
  876. """
  877. Returns the position embeddings
  878. """
  879. return self.distilbert.get_position_embeddings()
  880. def resize_position_embeddings(self, new_num_position_embeddings: int):
  881. """
  882. Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
  883. Arguments:
  884. new_num_position_embeddings (`int`):
  885. The number of new position embedding matrix. If position embeddings are learned, increasing the size
  886. will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
  887. end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
  888. size will add correct vectors at the end following the position encoding algorithm, whereas reducing
  889. the size will remove vectors from the end.
  890. """
  891. self.distilbert.resize_position_embeddings(new_num_position_embeddings)
  892. @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices"))
  893. @add_code_sample_docstrings(
  894. checkpoint=_CHECKPOINT_FOR_DOC,
  895. output_type=QuestionAnsweringModelOutput,
  896. config_class=_CONFIG_FOR_DOC,
  897. )
  898. def forward(
  899. self,
  900. input_ids: Optional[torch.Tensor] = None,
  901. attention_mask: Optional[torch.Tensor] = None,
  902. head_mask: Optional[torch.Tensor] = None,
  903. inputs_embeds: Optional[torch.Tensor] = None,
  904. start_positions: Optional[torch.Tensor] = None,
  905. end_positions: Optional[torch.Tensor] = None,
  906. output_attentions: Optional[bool] = None,
  907. output_hidden_states: Optional[bool] = None,
  908. return_dict: Optional[bool] = None,
  909. ) -> Union[QuestionAnsweringModelOutput, Tuple[torch.Tensor, ...]]:
  910. r"""
  911. start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  912. Labels for position (index) of the start of the labelled span for computing the token classification loss.
  913. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
  914. are not taken into account for computing the loss.
  915. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  916. Labels for position (index) of the end of the labelled span for computing the token classification loss.
  917. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
  918. are not taken into account for computing the loss.
  919. """
  920. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  921. distilbert_output = self.distilbert(
  922. input_ids=input_ids,
  923. attention_mask=attention_mask,
  924. head_mask=head_mask,
  925. inputs_embeds=inputs_embeds,
  926. output_attentions=output_attentions,
  927. output_hidden_states=output_hidden_states,
  928. return_dict=return_dict,
  929. )
  930. hidden_states = distilbert_output[0] # (bs, max_query_len, dim)
  931. hidden_states = self.dropout(hidden_states) # (bs, max_query_len, dim)
  932. logits = self.qa_outputs(hidden_states) # (bs, max_query_len, 2)
  933. start_logits, end_logits = logits.split(1, dim=-1)
  934. start_logits = start_logits.squeeze(-1).contiguous() # (bs, max_query_len)
  935. end_logits = end_logits.squeeze(-1).contiguous() # (bs, max_query_len)
  936. total_loss = None
  937. if start_positions is not None and end_positions is not None:
  938. # If we are on multi-GPU, split add a dimension
  939. if len(start_positions.size()) > 1:
  940. start_positions = start_positions.squeeze(-1)
  941. if len(end_positions.size()) > 1:
  942. end_positions = end_positions.squeeze(-1)
  943. # sometimes the start/end positions are outside our model inputs, we ignore these terms
  944. ignored_index = start_logits.size(1)
  945. start_positions = start_positions.clamp(0, ignored_index)
  946. end_positions = end_positions.clamp(0, ignored_index)
  947. loss_fct = nn.CrossEntropyLoss(ignore_index=ignored_index)
  948. start_loss = loss_fct(start_logits, start_positions)
  949. end_loss = loss_fct(end_logits, end_positions)
  950. total_loss = (start_loss + end_loss) / 2
  951. if not return_dict:
  952. output = (start_logits, end_logits) + distilbert_output[1:]
  953. return ((total_loss,) + output) if total_loss is not None else output
  954. return QuestionAnsweringModelOutput(
  955. loss=total_loss,
  956. start_logits=start_logits,
  957. end_logits=end_logits,
  958. hidden_states=distilbert_output.hidden_states,
  959. attentions=distilbert_output.attentions,
  960. )
  961. @add_start_docstrings(
  962. """
  963. DistilBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
  964. for Named-Entity-Recognition (NER) tasks.
  965. """,
  966. DISTILBERT_START_DOCSTRING,
  967. )
  968. class DistilBertForTokenClassification(DistilBertPreTrainedModel):
  969. def __init__(self, config: PretrainedConfig):
  970. super().__init__(config)
  971. self.num_labels = config.num_labels
  972. self.distilbert = DistilBertModel(config)
  973. self.dropout = nn.Dropout(config.dropout)
  974. self.classifier = nn.Linear(config.hidden_size, config.num_labels)
  975. # Initialize weights and apply final processing
  976. self.post_init()
  977. def get_position_embeddings(self) -> nn.Embedding:
  978. """
  979. Returns the position embeddings
  980. """
  981. return self.distilbert.get_position_embeddings()
  982. def resize_position_embeddings(self, new_num_position_embeddings: int):
  983. """
  984. Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
  985. Arguments:
  986. new_num_position_embeddings (`int`):
  987. The number of new position embedding matrix. If position embeddings are learned, increasing the size
  988. will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
  989. end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
  990. size will add correct vectors at the end following the position encoding algorithm, whereas reducing
  991. the size will remove vectors from the end.
  992. """
  993. self.distilbert.resize_position_embeddings(new_num_position_embeddings)
  994. @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING)
  995. @add_code_sample_docstrings(
  996. checkpoint=_CHECKPOINT_FOR_DOC,
  997. output_type=TokenClassifierOutput,
  998. config_class=_CONFIG_FOR_DOC,
  999. )
  1000. def forward(
  1001. self,
  1002. input_ids: Optional[torch.Tensor] = None,
  1003. attention_mask: Optional[torch.Tensor] = None,
  1004. head_mask: Optional[torch.Tensor] = None,
  1005. inputs_embeds: Optional[torch.Tensor] = None,
  1006. labels: Optional[torch.LongTensor] = None,
  1007. output_attentions: Optional[bool] = None,
  1008. output_hidden_states: Optional[bool] = None,
  1009. return_dict: Optional[bool] = None,
  1010. ) -> Union[TokenClassifierOutput, Tuple[torch.Tensor, ...]]:
  1011. r"""
  1012. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
  1013. Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
  1014. """
  1015. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1016. outputs = self.distilbert(
  1017. input_ids,
  1018. attention_mask=attention_mask,
  1019. head_mask=head_mask,
  1020. inputs_embeds=inputs_embeds,
  1021. output_attentions=output_attentions,
  1022. output_hidden_states=output_hidden_states,
  1023. return_dict=return_dict,
  1024. )
  1025. sequence_output = outputs[0]
  1026. sequence_output = self.dropout(sequence_output)
  1027. logits = self.classifier(sequence_output)
  1028. loss = None
  1029. if labels is not None:
  1030. loss_fct = CrossEntropyLoss()
  1031. loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
  1032. if not return_dict:
  1033. output = (logits,) + outputs[1:]
  1034. return ((loss,) + output) if loss is not None else output
  1035. return TokenClassifierOutput(
  1036. loss=loss,
  1037. logits=logits,
  1038. hidden_states=outputs.hidden_states,
  1039. attentions=outputs.attentions,
  1040. )
  1041. @add_start_docstrings(
  1042. """
  1043. DistilBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
  1044. a softmax) e.g. for RocStories/SWAG tasks.
  1045. """,
  1046. DISTILBERT_START_DOCSTRING,
  1047. )
  1048. class DistilBertForMultipleChoice(DistilBertPreTrainedModel):
  1049. def __init__(self, config: PretrainedConfig):
  1050. super().__init__(config)
  1051. self.distilbert = DistilBertModel(config)
  1052. self.pre_classifier = nn.Linear(config.dim, config.dim)
  1053. self.classifier = nn.Linear(config.dim, 1)
  1054. self.dropout = nn.Dropout(config.seq_classif_dropout)
  1055. # Initialize weights and apply final processing
  1056. self.post_init()
  1057. def get_position_embeddings(self) -> nn.Embedding:
  1058. """
  1059. Returns the position embeddings
  1060. """
  1061. return self.distilbert.get_position_embeddings()
  1062. def resize_position_embeddings(self, new_num_position_embeddings: int):
  1063. """
  1064. Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
  1065. Arguments:
  1066. new_num_position_embeddings (`int`)
  1067. The number of new position embeddings. If position embeddings are learned, increasing the size will add
  1068. newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
  1069. position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
  1070. add correct vectors at the end following the position encoding algorithm, whereas reducing the size
  1071. will remove vectors from the end.
  1072. """
  1073. self.distilbert.resize_position_embeddings(new_num_position_embeddings)
  1074. @add_start_docstrings_to_model_forward(
  1075. DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
  1076. )
  1077. @replace_return_docstrings(output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC)
  1078. def forward(
  1079. self,
  1080. input_ids: Optional[torch.Tensor] = None,
  1081. attention_mask: Optional[torch.Tensor] = None,
  1082. head_mask: Optional[torch.Tensor] = None,
  1083. inputs_embeds: Optional[torch.Tensor] = None,
  1084. labels: Optional[torch.LongTensor] = None,
  1085. output_attentions: Optional[bool] = None,
  1086. output_hidden_states: Optional[bool] = None,
  1087. return_dict: Optional[bool] = None,
  1088. ) -> Union[MultipleChoiceModelOutput, Tuple[torch.Tensor, ...]]:
  1089. r"""
  1090. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  1091. Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
  1092. num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
  1093. `input_ids` above)
  1094. Returns:
  1095. Examples:
  1096. ```python
  1097. >>> from transformers import AutoTokenizer, DistilBertForMultipleChoice
  1098. >>> import torch
  1099. >>> tokenizer = AutoTokenizer.from_pretrained("distilbert-base-cased")
  1100. >>> model = DistilBertForMultipleChoice.from_pretrained("distilbert-base-cased")
  1101. >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
  1102. >>> choice0 = "It is eaten with a fork and a knife."
  1103. >>> choice1 = "It is eaten while held in the hand."
  1104. >>> labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1
  1105. >>> encoding = tokenizer([[prompt, choice0], [prompt, choice1]], return_tensors="pt", padding=True)
  1106. >>> outputs = model(**{k: v.unsqueeze(0) for k, v in encoding.items()}, labels=labels) # batch size is 1
  1107. >>> # the linear classifier still needs to be trained
  1108. >>> loss = outputs.loss
  1109. >>> logits = outputs.logits
  1110. ```"""
  1111. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1112. num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
  1113. input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
  1114. attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
  1115. inputs_embeds = (
  1116. inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
  1117. if inputs_embeds is not None
  1118. else None
  1119. )
  1120. outputs = self.distilbert(
  1121. input_ids,
  1122. attention_mask=attention_mask,
  1123. head_mask=head_mask,
  1124. inputs_embeds=inputs_embeds,
  1125. output_attentions=output_attentions,
  1126. output_hidden_states=output_hidden_states,
  1127. return_dict=return_dict,
  1128. )
  1129. hidden_state = outputs[0] # (bs * num_choices, seq_len, dim)
  1130. pooled_output = hidden_state[:, 0] # (bs * num_choices, dim)
  1131. pooled_output = self.pre_classifier(pooled_output) # (bs * num_choices, dim)
  1132. pooled_output = nn.ReLU()(pooled_output) # (bs * num_choices, dim)
  1133. pooled_output = self.dropout(pooled_output) # (bs * num_choices, dim)
  1134. logits = self.classifier(pooled_output) # (bs * num_choices, 1)
  1135. reshaped_logits = logits.view(-1, num_choices) # (bs, num_choices)
  1136. loss = None
  1137. if labels is not None:
  1138. loss_fct = CrossEntropyLoss()
  1139. loss = loss_fct(reshaped_logits, labels)
  1140. if not return_dict:
  1141. output = (reshaped_logits,) + outputs[1:]
  1142. return ((loss,) + output) if loss is not None else output
  1143. return MultipleChoiceModelOutput(
  1144. loss=loss,
  1145. logits=reshaped_logits,
  1146. hidden_states=outputs.hidden_states,
  1147. attentions=outputs.attentions,
  1148. )