modeling_splinter.py 52 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107
  1. # coding=utf-8
  2. # Copyright 2021 Tel AViv University, AllenAI and The HuggingFace Inc. team. All rights reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """PyTorch Splinter model."""
  16. import math
  17. from dataclasses import dataclass
  18. from typing import List, Optional, Tuple, Union
  19. import torch
  20. import torch.utils.checkpoint
  21. from torch import nn
  22. from torch.nn import CrossEntropyLoss
  23. from ...activations import ACT2FN
  24. from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, ModelOutput, QuestionAnsweringModelOutput
  25. from ...modeling_utils import PreTrainedModel
  26. from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
  27. from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
  28. from .configuration_splinter import SplinterConfig
  29. logger = logging.get_logger(__name__)
  30. _CHECKPOINT_FOR_DOC = "tau/splinter-base"
  31. _CONFIG_FOR_DOC = "SplinterConfig"
  32. class SplinterEmbeddings(nn.Module):
  33. """Construct the embeddings from word, position and token_type embeddings."""
  34. def __init__(self, config):
  35. super().__init__()
  36. self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
  37. self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
  38. self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
  39. # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
  40. # any TensorFlow checkpoint file
  41. self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
  42. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  43. # position_ids (1, len position emb) is contiguous in memory and exported when serialized
  44. self.register_buffer(
  45. "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
  46. )
  47. self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
  48. def forward(
  49. self,
  50. input_ids: Optional[torch.LongTensor] = None,
  51. token_type_ids: Optional[torch.LongTensor] = None,
  52. position_ids: Optional[torch.LongTensor] = None,
  53. inputs_embeds: Optional[torch.FloatTensor] = None,
  54. past_key_values_length: Optional[int] = 0,
  55. ) -> Tuple:
  56. if input_ids is not None:
  57. input_shape = input_ids.size()
  58. else:
  59. input_shape = inputs_embeds.size()[:-1]
  60. seq_length = input_shape[1]
  61. if position_ids is None:
  62. position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
  63. if token_type_ids is None:
  64. token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
  65. if inputs_embeds is None:
  66. inputs_embeds = self.word_embeddings(input_ids)
  67. token_type_embeddings = self.token_type_embeddings(token_type_ids)
  68. embeddings = inputs_embeds + token_type_embeddings
  69. if self.position_embedding_type == "absolute":
  70. position_embeddings = self.position_embeddings(position_ids)
  71. embeddings += position_embeddings
  72. embeddings = self.LayerNorm(embeddings)
  73. embeddings = self.dropout(embeddings)
  74. return embeddings
  75. # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Splinter
  76. class SplinterSelfAttention(nn.Module):
  77. def __init__(self, config, position_embedding_type=None):
  78. super().__init__()
  79. if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
  80. raise ValueError(
  81. f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
  82. f"heads ({config.num_attention_heads})"
  83. )
  84. self.num_attention_heads = config.num_attention_heads
  85. self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
  86. self.all_head_size = self.num_attention_heads * self.attention_head_size
  87. self.query = nn.Linear(config.hidden_size, self.all_head_size)
  88. self.key = nn.Linear(config.hidden_size, self.all_head_size)
  89. self.value = nn.Linear(config.hidden_size, self.all_head_size)
  90. self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
  91. self.position_embedding_type = position_embedding_type or getattr(
  92. config, "position_embedding_type", "absolute"
  93. )
  94. if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
  95. self.max_position_embeddings = config.max_position_embeddings
  96. self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
  97. self.is_decoder = config.is_decoder
  98. def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
  99. new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
  100. x = x.view(new_x_shape)
  101. return x.permute(0, 2, 1, 3)
  102. def forward(
  103. self,
  104. hidden_states: torch.Tensor,
  105. attention_mask: Optional[torch.FloatTensor] = None,
  106. head_mask: Optional[torch.FloatTensor] = None,
  107. encoder_hidden_states: Optional[torch.FloatTensor] = None,
  108. encoder_attention_mask: Optional[torch.FloatTensor] = None,
  109. past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
  110. output_attentions: Optional[bool] = False,
  111. ) -> Tuple[torch.Tensor]:
  112. mixed_query_layer = self.query(hidden_states)
  113. # If this is instantiated as a cross-attention module, the keys
  114. # and values come from an encoder; the attention mask needs to be
  115. # such that the encoder's padding tokens are not attended to.
  116. is_cross_attention = encoder_hidden_states is not None
  117. if is_cross_attention and past_key_value is not None:
  118. # reuse k,v, cross_attentions
  119. key_layer = past_key_value[0]
  120. value_layer = past_key_value[1]
  121. attention_mask = encoder_attention_mask
  122. elif is_cross_attention:
  123. key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
  124. value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
  125. attention_mask = encoder_attention_mask
  126. elif past_key_value is not None:
  127. key_layer = self.transpose_for_scores(self.key(hidden_states))
  128. value_layer = self.transpose_for_scores(self.value(hidden_states))
  129. key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
  130. value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
  131. else:
  132. key_layer = self.transpose_for_scores(self.key(hidden_states))
  133. value_layer = self.transpose_for_scores(self.value(hidden_states))
  134. query_layer = self.transpose_for_scores(mixed_query_layer)
  135. use_cache = past_key_value is not None
  136. if self.is_decoder:
  137. # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
  138. # Further calls to cross_attention layer can then reuse all cross-attention
  139. # key/value_states (first "if" case)
  140. # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
  141. # all previous decoder key/value_states. Further calls to uni-directional self-attention
  142. # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
  143. # if encoder bi-directional self-attention `past_key_value` is always `None`
  144. past_key_value = (key_layer, value_layer)
  145. # Take the dot product between "query" and "key" to get the raw attention scores.
  146. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
  147. if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
  148. query_length, key_length = query_layer.shape[2], key_layer.shape[2]
  149. if use_cache:
  150. position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
  151. -1, 1
  152. )
  153. else:
  154. position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
  155. position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
  156. distance = position_ids_l - position_ids_r
  157. positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
  158. positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
  159. if self.position_embedding_type == "relative_key":
  160. relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
  161. attention_scores = attention_scores + relative_position_scores
  162. elif self.position_embedding_type == "relative_key_query":
  163. relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
  164. relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
  165. attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
  166. attention_scores = attention_scores / math.sqrt(self.attention_head_size)
  167. if attention_mask is not None:
  168. # Apply the attention mask is (precomputed for all layers in SplinterModel forward() function)
  169. attention_scores = attention_scores + attention_mask
  170. # Normalize the attention scores to probabilities.
  171. attention_probs = nn.functional.softmax(attention_scores, dim=-1)
  172. # This is actually dropping out entire tokens to attend to, which might
  173. # seem a bit unusual, but is taken from the original Transformer paper.
  174. attention_probs = self.dropout(attention_probs)
  175. # Mask heads if we want to
  176. if head_mask is not None:
  177. attention_probs = attention_probs * head_mask
  178. context_layer = torch.matmul(attention_probs, value_layer)
  179. context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
  180. new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
  181. context_layer = context_layer.view(new_context_layer_shape)
  182. outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
  183. if self.is_decoder:
  184. outputs = outputs + (past_key_value,)
  185. return outputs
  186. # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->Splinter
  187. class SplinterSelfOutput(nn.Module):
  188. def __init__(self, config):
  189. super().__init__()
  190. self.dense = nn.Linear(config.hidden_size, config.hidden_size)
  191. self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
  192. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  193. def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
  194. hidden_states = self.dense(hidden_states)
  195. hidden_states = self.dropout(hidden_states)
  196. hidden_states = self.LayerNorm(hidden_states + input_tensor)
  197. return hidden_states
  198. SPLINTER_SELF_ATTENTION_CLASSES = {
  199. "eager": SplinterSelfAttention,
  200. }
  201. # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Splinter,BERT->SPLINTER
  202. class SplinterAttention(nn.Module):
  203. def __init__(self, config, position_embedding_type=None):
  204. super().__init__()
  205. self.self = SPLINTER_SELF_ATTENTION_CLASSES[config._attn_implementation](
  206. config, position_embedding_type=position_embedding_type
  207. )
  208. self.output = SplinterSelfOutput(config)
  209. self.pruned_heads = set()
  210. def prune_heads(self, heads):
  211. if len(heads) == 0:
  212. return
  213. heads, index = find_pruneable_heads_and_indices(
  214. heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
  215. )
  216. # Prune linear layers
  217. self.self.query = prune_linear_layer(self.self.query, index)
  218. self.self.key = prune_linear_layer(self.self.key, index)
  219. self.self.value = prune_linear_layer(self.self.value, index)
  220. self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
  221. # Update hyper params and store pruned heads
  222. self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
  223. self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
  224. self.pruned_heads = self.pruned_heads.union(heads)
  225. def forward(
  226. self,
  227. hidden_states: torch.Tensor,
  228. attention_mask: Optional[torch.FloatTensor] = None,
  229. head_mask: Optional[torch.FloatTensor] = None,
  230. encoder_hidden_states: Optional[torch.FloatTensor] = None,
  231. encoder_attention_mask: Optional[torch.FloatTensor] = None,
  232. past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
  233. output_attentions: Optional[bool] = False,
  234. ) -> Tuple[torch.Tensor]:
  235. self_outputs = self.self(
  236. hidden_states,
  237. attention_mask,
  238. head_mask,
  239. encoder_hidden_states,
  240. encoder_attention_mask,
  241. past_key_value,
  242. output_attentions,
  243. )
  244. attention_output = self.output(self_outputs[0], hidden_states)
  245. outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
  246. return outputs
  247. # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Splinter
  248. class SplinterIntermediate(nn.Module):
  249. def __init__(self, config):
  250. super().__init__()
  251. self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
  252. if isinstance(config.hidden_act, str):
  253. self.intermediate_act_fn = ACT2FN[config.hidden_act]
  254. else:
  255. self.intermediate_act_fn = config.hidden_act
  256. def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
  257. hidden_states = self.dense(hidden_states)
  258. hidden_states = self.intermediate_act_fn(hidden_states)
  259. return hidden_states
  260. # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->Splinter
  261. class SplinterOutput(nn.Module):
  262. def __init__(self, config):
  263. super().__init__()
  264. self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
  265. self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
  266. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  267. def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
  268. hidden_states = self.dense(hidden_states)
  269. hidden_states = self.dropout(hidden_states)
  270. hidden_states = self.LayerNorm(hidden_states + input_tensor)
  271. return hidden_states
  272. # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Splinter
  273. class SplinterLayer(nn.Module):
  274. def __init__(self, config):
  275. super().__init__()
  276. self.chunk_size_feed_forward = config.chunk_size_feed_forward
  277. self.seq_len_dim = 1
  278. self.attention = SplinterAttention(config)
  279. self.is_decoder = config.is_decoder
  280. self.add_cross_attention = config.add_cross_attention
  281. if self.add_cross_attention:
  282. if not self.is_decoder:
  283. raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
  284. self.crossattention = SplinterAttention(config, position_embedding_type="absolute")
  285. self.intermediate = SplinterIntermediate(config)
  286. self.output = SplinterOutput(config)
  287. def forward(
  288. self,
  289. hidden_states: torch.Tensor,
  290. attention_mask: Optional[torch.FloatTensor] = None,
  291. head_mask: Optional[torch.FloatTensor] = None,
  292. encoder_hidden_states: Optional[torch.FloatTensor] = None,
  293. encoder_attention_mask: Optional[torch.FloatTensor] = None,
  294. past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
  295. output_attentions: Optional[bool] = False,
  296. ) -> Tuple[torch.Tensor]:
  297. # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
  298. self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
  299. self_attention_outputs = self.attention(
  300. hidden_states,
  301. attention_mask,
  302. head_mask,
  303. output_attentions=output_attentions,
  304. past_key_value=self_attn_past_key_value,
  305. )
  306. attention_output = self_attention_outputs[0]
  307. # if decoder, the last output is tuple of self-attn cache
  308. if self.is_decoder:
  309. outputs = self_attention_outputs[1:-1]
  310. present_key_value = self_attention_outputs[-1]
  311. else:
  312. outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
  313. cross_attn_present_key_value = None
  314. if self.is_decoder and encoder_hidden_states is not None:
  315. if not hasattr(self, "crossattention"):
  316. raise ValueError(
  317. f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
  318. " by setting `config.add_cross_attention=True`"
  319. )
  320. # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
  321. cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
  322. cross_attention_outputs = self.crossattention(
  323. attention_output,
  324. attention_mask,
  325. head_mask,
  326. encoder_hidden_states,
  327. encoder_attention_mask,
  328. cross_attn_past_key_value,
  329. output_attentions,
  330. )
  331. attention_output = cross_attention_outputs[0]
  332. outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
  333. # add cross-attn cache to positions 3,4 of present_key_value tuple
  334. cross_attn_present_key_value = cross_attention_outputs[-1]
  335. present_key_value = present_key_value + cross_attn_present_key_value
  336. layer_output = apply_chunking_to_forward(
  337. self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
  338. )
  339. outputs = (layer_output,) + outputs
  340. # if decoder, return the attn key/values as the last output
  341. if self.is_decoder:
  342. outputs = outputs + (present_key_value,)
  343. return outputs
  344. def feed_forward_chunk(self, attention_output):
  345. intermediate_output = self.intermediate(attention_output)
  346. layer_output = self.output(intermediate_output, attention_output)
  347. return layer_output
  348. # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Splinter
  349. class SplinterEncoder(nn.Module):
  350. def __init__(self, config):
  351. super().__init__()
  352. self.config = config
  353. self.layer = nn.ModuleList([SplinterLayer(config) for _ in range(config.num_hidden_layers)])
  354. self.gradient_checkpointing = False
  355. def forward(
  356. self,
  357. hidden_states: torch.Tensor,
  358. attention_mask: Optional[torch.FloatTensor] = None,
  359. head_mask: Optional[torch.FloatTensor] = None,
  360. encoder_hidden_states: Optional[torch.FloatTensor] = None,
  361. encoder_attention_mask: Optional[torch.FloatTensor] = None,
  362. past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
  363. use_cache: Optional[bool] = None,
  364. output_attentions: Optional[bool] = False,
  365. output_hidden_states: Optional[bool] = False,
  366. return_dict: Optional[bool] = True,
  367. ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
  368. all_hidden_states = () if output_hidden_states else None
  369. all_self_attentions = () if output_attentions else None
  370. all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
  371. if self.gradient_checkpointing and self.training:
  372. if use_cache:
  373. logger.warning_once(
  374. "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
  375. )
  376. use_cache = False
  377. next_decoder_cache = () if use_cache else None
  378. for i, layer_module in enumerate(self.layer):
  379. if output_hidden_states:
  380. all_hidden_states = all_hidden_states + (hidden_states,)
  381. layer_head_mask = head_mask[i] if head_mask is not None else None
  382. past_key_value = past_key_values[i] if past_key_values is not None else None
  383. if self.gradient_checkpointing and self.training:
  384. layer_outputs = self._gradient_checkpointing_func(
  385. layer_module.__call__,
  386. hidden_states,
  387. attention_mask,
  388. layer_head_mask,
  389. encoder_hidden_states,
  390. encoder_attention_mask,
  391. past_key_value,
  392. output_attentions,
  393. )
  394. else:
  395. layer_outputs = layer_module(
  396. hidden_states,
  397. attention_mask,
  398. layer_head_mask,
  399. encoder_hidden_states,
  400. encoder_attention_mask,
  401. past_key_value,
  402. output_attentions,
  403. )
  404. hidden_states = layer_outputs[0]
  405. if use_cache:
  406. next_decoder_cache += (layer_outputs[-1],)
  407. if output_attentions:
  408. all_self_attentions = all_self_attentions + (layer_outputs[1],)
  409. if self.config.add_cross_attention:
  410. all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
  411. if output_hidden_states:
  412. all_hidden_states = all_hidden_states + (hidden_states,)
  413. if not return_dict:
  414. return tuple(
  415. v
  416. for v in [
  417. hidden_states,
  418. next_decoder_cache,
  419. all_hidden_states,
  420. all_self_attentions,
  421. all_cross_attentions,
  422. ]
  423. if v is not None
  424. )
  425. return BaseModelOutputWithPastAndCrossAttentions(
  426. last_hidden_state=hidden_states,
  427. past_key_values=next_decoder_cache,
  428. hidden_states=all_hidden_states,
  429. attentions=all_self_attentions,
  430. cross_attentions=all_cross_attentions,
  431. )
  432. class SplinterPreTrainedModel(PreTrainedModel):
  433. """
  434. An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
  435. models.
  436. """
  437. config_class = SplinterConfig
  438. base_model_prefix = "splinter"
  439. supports_gradient_checkpointing = True
  440. # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
  441. def _init_weights(self, module):
  442. """Initialize the weights"""
  443. if isinstance(module, nn.Linear):
  444. # Slightly different from the TF version which uses truncated_normal for initialization
  445. # cf https://github.com/pytorch/pytorch/pull/5617
  446. module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
  447. if module.bias is not None:
  448. module.bias.data.zero_()
  449. elif isinstance(module, nn.Embedding):
  450. module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
  451. if module.padding_idx is not None:
  452. module.weight.data[module.padding_idx].zero_()
  453. elif isinstance(module, nn.LayerNorm):
  454. module.bias.data.zero_()
  455. module.weight.data.fill_(1.0)
  456. SPLINTER_START_DOCSTRING = r"""
  457. This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
  458. it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
  459. behavior.
  460. Parameters:
  461. config ([`SplinterConfig`]): Model configuration class with all the parameters of the model.
  462. Initializing with a config file does not load the weights associated with the model, only the
  463. configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
  464. """
  465. SPLINTER_INPUTS_DOCSTRING = r"""
  466. Args:
  467. input_ids (`torch.LongTensor` of shape `({0})`):
  468. Indices of input sequence tokens in the vocabulary.
  469. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
  470. [`PreTrainedTokenizer.__call__`] for details.
  471. [What are input IDs?](../glossary#input-ids)
  472. attention_mask (`torch.FloatTensor` of shape `{0}`, *optional*):
  473. Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
  474. - 1 for tokens that are **not masked**,
  475. - 0 for tokens that are **masked**.
  476. [What are attention masks?](../glossary#attention-mask)
  477. token_type_ids (`torch.LongTensor` of shape `{0}`, *optional*):
  478. Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
  479. 1]`:
  480. - 0 corresponds to a *sentence A* token,
  481. - 1 corresponds to a *sentence B* token.
  482. [What are token type IDs?](../glossary#token-type-ids)
  483. position_ids (`torch.LongTensor` of shape `{0}`, *optional*):
  484. Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
  485. config.max_position_embeddings - 1]`.
  486. [What are position IDs?](../glossary#position-ids)
  487. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
  488. Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
  489. - 1 indicates the head is **not masked**,
  490. - 0 indicates the head is **masked**.
  491. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
  492. Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
  493. is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
  494. model's internal embedding lookup matrix.
  495. output_attentions (`bool`, *optional*):
  496. Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
  497. tensors for more detail.
  498. output_hidden_states (`bool`, *optional*):
  499. Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
  500. more detail.
  501. return_dict (`bool`, *optional*):
  502. Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
  503. """
  504. @add_start_docstrings(
  505. "The bare Splinter Model transformer outputting raw hidden-states without any specific head on top.",
  506. SPLINTER_START_DOCSTRING,
  507. )
  508. class SplinterModel(SplinterPreTrainedModel):
  509. """
  510. The model is an encoder (with only self-attention) following the architecture described in [Attention is all you
  511. need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones,
  512. Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
  513. """
  514. def __init__(self, config):
  515. super().__init__(config)
  516. self.config = config
  517. self.embeddings = SplinterEmbeddings(config)
  518. self.encoder = SplinterEncoder(config)
  519. # Initialize weights and apply final processing
  520. self.post_init()
  521. def get_input_embeddings(self):
  522. return self.embeddings.word_embeddings
  523. def set_input_embeddings(self, value):
  524. self.embeddings.word_embeddings = value
  525. def _prune_heads(self, heads_to_prune):
  526. """
  527. Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
  528. class PreTrainedModel
  529. """
  530. for layer, heads in heads_to_prune.items():
  531. self.encoder.layer[layer].attention.prune_heads(heads)
  532. @add_start_docstrings_to_model_forward(SPLINTER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  533. @add_code_sample_docstrings(
  534. checkpoint=_CHECKPOINT_FOR_DOC,
  535. output_type=BaseModelOutputWithPastAndCrossAttentions,
  536. config_class=_CONFIG_FOR_DOC,
  537. )
  538. def forward(
  539. self,
  540. input_ids: Optional[torch.Tensor] = None,
  541. attention_mask: Optional[torch.Tensor] = None,
  542. token_type_ids: Optional[torch.Tensor] = None,
  543. position_ids: Optional[torch.Tensor] = None,
  544. head_mask: Optional[torch.Tensor] = None,
  545. inputs_embeds: Optional[torch.Tensor] = None,
  546. encoder_hidden_states: Optional[torch.Tensor] = None,
  547. encoder_attention_mask: Optional[torch.Tensor] = None,
  548. past_key_values: Optional[List[torch.FloatTensor]] = None,
  549. use_cache: Optional[bool] = None,
  550. output_attentions: Optional[bool] = None,
  551. output_hidden_states: Optional[bool] = None,
  552. return_dict: Optional[bool] = None,
  553. ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
  554. r"""
  555. encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
  556. Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
  557. the model is configured as a decoder.
  558. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
  559. Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
  560. the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
  561. - 1 for tokens that are **not masked**,
  562. - 0 for tokens that are **masked**.
  563. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
  564. Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
  565. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
  566. don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
  567. `decoder_input_ids` of shape `(batch_size, sequence_length)`.
  568. use_cache (`bool`, *optional*):
  569. If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
  570. `past_key_values`).
  571. """
  572. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
  573. output_hidden_states = (
  574. output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
  575. )
  576. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  577. if self.config.is_decoder:
  578. use_cache = use_cache if use_cache is not None else self.config.use_cache
  579. else:
  580. use_cache = False
  581. if input_ids is not None and inputs_embeds is not None:
  582. raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
  583. elif input_ids is not None:
  584. self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
  585. input_shape = input_ids.size()
  586. elif inputs_embeds is not None:
  587. input_shape = inputs_embeds.size()[:-1]
  588. else:
  589. raise ValueError("You have to specify either input_ids or inputs_embeds")
  590. batch_size, seq_length = input_shape
  591. device = input_ids.device if input_ids is not None else inputs_embeds.device
  592. # past_key_values_length
  593. past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
  594. if attention_mask is None:
  595. attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
  596. if token_type_ids is None:
  597. token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
  598. # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
  599. # ourselves in which case we just need to make it broadcastable to all heads.
  600. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
  601. # If a 2D or 3D attention mask is provided for the cross-attention
  602. # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
  603. if self.config.is_decoder and encoder_hidden_states is not None:
  604. encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
  605. encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
  606. if encoder_attention_mask is None:
  607. encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
  608. encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
  609. else:
  610. encoder_extended_attention_mask = None
  611. # Prepare head mask if needed
  612. # 1.0 in head_mask indicate we keep the head
  613. # attention_probs has shape bsz x n_heads x N x N
  614. # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
  615. # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
  616. head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
  617. embedding_output = self.embeddings(
  618. input_ids=input_ids,
  619. position_ids=position_ids,
  620. token_type_ids=token_type_ids,
  621. inputs_embeds=inputs_embeds,
  622. past_key_values_length=past_key_values_length,
  623. )
  624. encoder_outputs = self.encoder(
  625. embedding_output,
  626. attention_mask=extended_attention_mask,
  627. head_mask=head_mask,
  628. encoder_hidden_states=encoder_hidden_states,
  629. encoder_attention_mask=encoder_extended_attention_mask,
  630. past_key_values=past_key_values,
  631. use_cache=use_cache,
  632. output_attentions=output_attentions,
  633. output_hidden_states=output_hidden_states,
  634. return_dict=return_dict,
  635. )
  636. sequence_output = encoder_outputs[0]
  637. if not return_dict:
  638. return (sequence_output,) + encoder_outputs[1:]
  639. return BaseModelOutputWithPastAndCrossAttentions(
  640. last_hidden_state=sequence_output,
  641. past_key_values=encoder_outputs.past_key_values,
  642. hidden_states=encoder_outputs.hidden_states,
  643. attentions=encoder_outputs.attentions,
  644. cross_attentions=encoder_outputs.cross_attentions,
  645. )
  646. class SplinterFullyConnectedLayer(nn.Module):
  647. def __init__(self, input_dim, output_dim, hidden_act="gelu"):
  648. super().__init__()
  649. self.input_dim = input_dim
  650. self.output_dim = output_dim
  651. self.dense = nn.Linear(self.input_dim, self.output_dim)
  652. self.act_fn = ACT2FN[hidden_act]
  653. self.LayerNorm = nn.LayerNorm(self.output_dim)
  654. def forward(self, inputs: torch.Tensor) -> torch.Tensor:
  655. hidden_states = self.dense(inputs)
  656. hidden_states = self.act_fn(hidden_states)
  657. hidden_states = self.LayerNorm(hidden_states)
  658. return hidden_states
  659. class QuestionAwareSpanSelectionHead(nn.Module):
  660. """
  661. Implementation of Question-Aware Span Selection (QASS) head, described in Splinter's paper:
  662. """
  663. def __init__(self, config):
  664. super().__init__()
  665. self.query_start_transform = SplinterFullyConnectedLayer(config.hidden_size, config.hidden_size)
  666. self.query_end_transform = SplinterFullyConnectedLayer(config.hidden_size, config.hidden_size)
  667. self.start_transform = SplinterFullyConnectedLayer(config.hidden_size, config.hidden_size)
  668. self.end_transform = SplinterFullyConnectedLayer(config.hidden_size, config.hidden_size)
  669. self.start_classifier = nn.Linear(config.hidden_size, config.hidden_size, bias=False)
  670. self.end_classifier = nn.Linear(config.hidden_size, config.hidden_size, bias=False)
  671. def forward(self, inputs, positions):
  672. _, _, dim = inputs.size()
  673. index = positions.unsqueeze(-1).repeat(1, 1, dim) # [batch_size, num_positions, dim]
  674. gathered_reps = torch.gather(inputs, dim=1, index=index) # [batch_size, num_positions, dim]
  675. query_start_reps = self.query_start_transform(gathered_reps) # [batch_size, num_positions, dim]
  676. query_end_reps = self.query_end_transform(gathered_reps) # [batch_size, num_positions, dim]
  677. start_reps = self.start_transform(inputs) # [batch_size, seq_length, dim]
  678. end_reps = self.end_transform(inputs) # [batch_size, seq_length, dim]
  679. hidden_states = self.start_classifier(query_start_reps) # [batch_size, num_positions, dim]
  680. start_reps = start_reps.permute(0, 2, 1) # [batch_size, dim, seq_length]
  681. start_logits = torch.matmul(hidden_states, start_reps)
  682. hidden_states = self.end_classifier(query_end_reps)
  683. end_reps = end_reps.permute(0, 2, 1)
  684. end_logits = torch.matmul(hidden_states, end_reps)
  685. return start_logits, end_logits
  686. @add_start_docstrings(
  687. """
  688. Splinter Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
  689. layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
  690. """,
  691. SPLINTER_START_DOCSTRING,
  692. )
  693. class SplinterForQuestionAnswering(SplinterPreTrainedModel):
  694. def __init__(self, config):
  695. super().__init__(config)
  696. self.splinter = SplinterModel(config)
  697. self.splinter_qass = QuestionAwareSpanSelectionHead(config)
  698. self.question_token_id = config.question_token_id
  699. # Initialize weights and apply final processing
  700. self.post_init()
  701. @add_start_docstrings_to_model_forward(SPLINTER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  702. @add_code_sample_docstrings(
  703. checkpoint=_CHECKPOINT_FOR_DOC,
  704. output_type=QuestionAnsweringModelOutput,
  705. config_class=_CONFIG_FOR_DOC,
  706. )
  707. def forward(
  708. self,
  709. input_ids: Optional[torch.Tensor] = None,
  710. attention_mask: Optional[torch.Tensor] = None,
  711. token_type_ids: Optional[torch.Tensor] = None,
  712. position_ids: Optional[torch.Tensor] = None,
  713. head_mask: Optional[torch.Tensor] = None,
  714. inputs_embeds: Optional[torch.Tensor] = None,
  715. start_positions: Optional[torch.LongTensor] = None,
  716. end_positions: Optional[torch.LongTensor] = None,
  717. output_attentions: Optional[bool] = None,
  718. output_hidden_states: Optional[bool] = None,
  719. return_dict: Optional[bool] = None,
  720. question_positions: Optional[torch.LongTensor] = None,
  721. ) -> Union[Tuple, QuestionAnsweringModelOutput]:
  722. r"""
  723. start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  724. Labels for position (index) of the start of the labelled span for computing the token classification loss.
  725. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
  726. are not taken into account for computing the loss.
  727. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  728. Labels for position (index) of the end of the labelled span for computing the token classification loss.
  729. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
  730. are not taken into account for computing the loss.
  731. question_positions (`torch.LongTensor` of shape `(batch_size, num_questions)`, *optional*):
  732. The positions of all question tokens. If given, start_logits and end_logits will be of shape `(batch_size,
  733. num_questions, sequence_length)`. If None, the first question token in each sequence in the batch will be
  734. the only one for which start_logits and end_logits are calculated and they will be of shape `(batch_size,
  735. sequence_length)`.
  736. """
  737. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  738. question_positions_were_none = False
  739. if question_positions is None:
  740. if input_ids is not None:
  741. question_position_for_each_example = torch.argmax(
  742. (torch.eq(input_ids, self.question_token_id)).int(), dim=-1
  743. )
  744. else:
  745. question_position_for_each_example = torch.zeros(
  746. inputs_embeds.size(0), dtype=torch.long, layout=inputs_embeds.layout, device=inputs_embeds.device
  747. )
  748. question_positions = question_position_for_each_example.unsqueeze(-1)
  749. question_positions_were_none = True
  750. outputs = self.splinter(
  751. input_ids,
  752. attention_mask=attention_mask,
  753. token_type_ids=token_type_ids,
  754. position_ids=position_ids,
  755. head_mask=head_mask,
  756. inputs_embeds=inputs_embeds,
  757. output_attentions=output_attentions,
  758. output_hidden_states=output_hidden_states,
  759. return_dict=return_dict,
  760. )
  761. sequence_output = outputs[0]
  762. start_logits, end_logits = self.splinter_qass(sequence_output, question_positions)
  763. if question_positions_were_none:
  764. start_logits, end_logits = start_logits.squeeze(1), end_logits.squeeze(1)
  765. if attention_mask is not None:
  766. start_logits = start_logits + (1 - attention_mask) * torch.finfo(start_logits.dtype).min
  767. end_logits = end_logits + (1 - attention_mask) * torch.finfo(end_logits.dtype).min
  768. total_loss = None
  769. if start_positions is not None and end_positions is not None:
  770. # If we are on multi-GPU, split add a dimension
  771. if len(start_positions.size()) > 1:
  772. start_positions = start_positions.squeeze(-1)
  773. if len(end_positions.size()) > 1:
  774. end_positions = end_positions.squeeze(-1)
  775. # sometimes the start/end positions are outside our model inputs, we ignore these terms
  776. ignored_index = start_logits.size(1)
  777. start_positions.clamp_(0, ignored_index)
  778. end_positions.clamp_(0, ignored_index)
  779. loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
  780. start_loss = loss_fct(start_logits, start_positions)
  781. end_loss = loss_fct(end_logits, end_positions)
  782. total_loss = (start_loss + end_loss) / 2
  783. if not return_dict:
  784. output = (start_logits, end_logits) + outputs[1:]
  785. return ((total_loss,) + output) if total_loss is not None else output
  786. return QuestionAnsweringModelOutput(
  787. loss=total_loss,
  788. start_logits=start_logits,
  789. end_logits=end_logits,
  790. hidden_states=outputs.hidden_states,
  791. attentions=outputs.attentions,
  792. )
  793. @dataclass
  794. class SplinterForPreTrainingOutput(ModelOutput):
  795. """
  796. Class for outputs of Splinter as a span selection model.
  797. Args:
  798. loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when start and end positions are provided):
  799. Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
  800. start_logits (`torch.FloatTensor` of shape `(batch_size, num_questions, sequence_length)`):
  801. Span-start scores (before SoftMax).
  802. end_logits (`torch.FloatTensor` of shape `(batch_size, num_questions, sequence_length)`):
  803. Span-end scores (before SoftMax).
  804. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
  805. Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
  806. one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
  807. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
  808. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
  809. Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
  810. sequence_length)`.
  811. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
  812. heads.
  813. """
  814. loss: Optional[torch.FloatTensor] = None
  815. start_logits: torch.FloatTensor = None
  816. end_logits: torch.FloatTensor = None
  817. hidden_states: Optional[Tuple[torch.FloatTensor]] = None
  818. attentions: Optional[Tuple[torch.FloatTensor]] = None
  819. @add_start_docstrings(
  820. """
  821. Splinter Model for the recurring span selection task as done during the pretraining. The difference to the QA task
  822. is that we do not have a question, but multiple question tokens that replace the occurrences of recurring spans
  823. instead.
  824. """,
  825. SPLINTER_START_DOCSTRING,
  826. )
  827. class SplinterForPreTraining(SplinterPreTrainedModel):
  828. def __init__(self, config):
  829. super().__init__(config)
  830. self.splinter = SplinterModel(config)
  831. self.splinter_qass = QuestionAwareSpanSelectionHead(config)
  832. self.question_token_id = config.question_token_id
  833. # Initialize weights and apply final processing
  834. self.post_init()
  835. @add_start_docstrings_to_model_forward(
  836. SPLINTER_INPUTS_DOCSTRING.format("batch_size, num_questions, sequence_length")
  837. )
  838. def forward(
  839. self,
  840. input_ids: Optional[torch.Tensor] = None,
  841. attention_mask: Optional[torch.Tensor] = None,
  842. token_type_ids: Optional[torch.Tensor] = None,
  843. position_ids: Optional[torch.Tensor] = None,
  844. head_mask: Optional[torch.Tensor] = None,
  845. inputs_embeds: Optional[torch.Tensor] = None,
  846. start_positions: Optional[torch.LongTensor] = None,
  847. end_positions: Optional[torch.LongTensor] = None,
  848. output_attentions: Optional[bool] = None,
  849. output_hidden_states: Optional[bool] = None,
  850. return_dict: Optional[bool] = None,
  851. question_positions: Optional[torch.LongTensor] = None,
  852. ) -> Union[Tuple, SplinterForPreTrainingOutput]:
  853. r"""
  854. start_positions (`torch.LongTensor` of shape `(batch_size, num_questions)`, *optional*):
  855. Labels for position (index) of the start of the labelled span for computing the token classification loss.
  856. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
  857. are not taken into account for computing the loss.
  858. end_positions (`torch.LongTensor` of shape `(batch_size, num_questions)`, *optional*):
  859. Labels for position (index) of the end of the labelled span for computing the token classification loss.
  860. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
  861. are not taken into account for computing the loss.
  862. question_positions (`torch.LongTensor` of shape `(batch_size, num_questions)`, *optional*):
  863. The positions of all question tokens. If given, start_logits and end_logits will be of shape `(batch_size,
  864. num_questions, sequence_length)`. If None, the first question token in each sequence in the batch will be
  865. the only one for which start_logits and end_logits are calculated and they will be of shape `(batch_size,
  866. sequence_length)`.
  867. """
  868. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  869. if question_positions is None and start_positions is not None and end_positions is not None:
  870. raise TypeError("question_positions must be specified in order to calculate the loss")
  871. elif question_positions is None and input_ids is None:
  872. raise TypeError("question_positions must be specified when input_embeds is used")
  873. elif question_positions is None:
  874. question_positions = self._prepare_question_positions(input_ids)
  875. outputs = self.splinter(
  876. input_ids,
  877. attention_mask=attention_mask,
  878. token_type_ids=token_type_ids,
  879. position_ids=position_ids,
  880. head_mask=head_mask,
  881. inputs_embeds=inputs_embeds,
  882. output_attentions=output_attentions,
  883. output_hidden_states=output_hidden_states,
  884. return_dict=return_dict,
  885. )
  886. sequence_output = outputs[0]
  887. batch_size, sequence_length, dim = sequence_output.size()
  888. # [batch_size, num_questions, sequence_length]
  889. start_logits, end_logits = self.splinter_qass(sequence_output, question_positions)
  890. num_questions = question_positions.size(1)
  891. if attention_mask is not None:
  892. attention_mask_for_each_question = attention_mask.unsqueeze(1).expand(
  893. batch_size, num_questions, sequence_length
  894. )
  895. start_logits = start_logits + (1 - attention_mask_for_each_question) * torch.finfo(start_logits.dtype).min
  896. end_logits = end_logits + (1 - attention_mask_for_each_question) * torch.finfo(end_logits.dtype).min
  897. total_loss = None
  898. # [batch_size, num_questions, sequence_length]
  899. if start_positions is not None and end_positions is not None:
  900. # sometimes the start/end positions are outside our model inputs, we ignore these terms
  901. start_positions.clamp_(0, max(0, sequence_length - 1))
  902. end_positions.clamp_(0, max(0, sequence_length - 1))
  903. # Ignore zero positions in the loss. Splinter never predicts zero
  904. # during pretraining and zero is used for padding question
  905. # tokens as well as for start and end positions of padded
  906. # question tokens.
  907. loss_fct = CrossEntropyLoss(ignore_index=self.config.pad_token_id)
  908. start_loss = loss_fct(
  909. start_logits.view(batch_size * num_questions, sequence_length),
  910. start_positions.view(batch_size * num_questions),
  911. )
  912. end_loss = loss_fct(
  913. end_logits.view(batch_size * num_questions, sequence_length),
  914. end_positions.view(batch_size * num_questions),
  915. )
  916. total_loss = (start_loss + end_loss) / 2
  917. if not return_dict:
  918. output = (start_logits, end_logits) + outputs[1:]
  919. return ((total_loss,) + output) if total_loss is not None else output
  920. return SplinterForPreTrainingOutput(
  921. loss=total_loss,
  922. start_logits=start_logits,
  923. end_logits=end_logits,
  924. hidden_states=outputs.hidden_states,
  925. attentions=outputs.attentions,
  926. )
  927. def _prepare_question_positions(self, input_ids: torch.Tensor) -> torch.Tensor:
  928. rows, flat_positions = torch.where(input_ids == self.config.question_token_id)
  929. num_questions = torch.bincount(rows)
  930. positions = torch.full(
  931. (input_ids.size(0), num_questions.max()),
  932. self.config.pad_token_id,
  933. dtype=torch.long,
  934. device=input_ids.device,
  935. )
  936. cols = torch.cat([torch.arange(n) for n in num_questions])
  937. positions[rows, cols] = flat_positions
  938. return positions