modeling_fnet.py 48 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185
  1. # coding=utf-8
  2. # Copyright 2021 Google Research and The HuggingFace Inc. team. All rights reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """PyTorch FNet model."""
  16. import warnings
  17. from dataclasses import dataclass
  18. from functools import partial
  19. from typing import Optional, Tuple, Union
  20. import torch
  21. import torch.utils.checkpoint
  22. from torch import nn
  23. from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
  24. from ...utils import is_scipy_available
  25. if is_scipy_available():
  26. from scipy import linalg
  27. from ...activations import ACT2FN
  28. from ...modeling_outputs import (
  29. BaseModelOutput,
  30. BaseModelOutputWithPooling,
  31. MaskedLMOutput,
  32. ModelOutput,
  33. MultipleChoiceModelOutput,
  34. NextSentencePredictorOutput,
  35. QuestionAnsweringModelOutput,
  36. SequenceClassifierOutput,
  37. TokenClassifierOutput,
  38. )
  39. from ...modeling_utils import PreTrainedModel
  40. from ...pytorch_utils import apply_chunking_to_forward
  41. from ...utils import (
  42. add_code_sample_docstrings,
  43. add_start_docstrings,
  44. add_start_docstrings_to_model_forward,
  45. logging,
  46. replace_return_docstrings,
  47. )
  48. from .configuration_fnet import FNetConfig
  49. logger = logging.get_logger(__name__)
  50. _CHECKPOINT_FOR_DOC = "google/fnet-base"
  51. _CONFIG_FOR_DOC = "FNetConfig"
  52. # Adapted from https://github.com/google-research/google-research/blob/master/f_net/fourier.py
  53. def _two_dim_matmul(x, matrix_dim_one, matrix_dim_two):
  54. """Applies 2D matrix multiplication to 3D input arrays."""
  55. seq_length = x.shape[1]
  56. matrix_dim_one = matrix_dim_one[:seq_length, :seq_length]
  57. x = x.type(torch.complex64)
  58. return torch.einsum("bij,jk,ni->bnk", x, matrix_dim_two, matrix_dim_one)
  59. # # Adapted from https://github.com/google-research/google-research/blob/master/f_net/fourier.py
  60. def two_dim_matmul(x, matrix_dim_one, matrix_dim_two):
  61. return _two_dim_matmul(x, matrix_dim_one, matrix_dim_two)
  62. # Adapted from https://github.com/google-research/google-research/blob/master/f_net/fourier.py
  63. def fftn(x):
  64. """
  65. Applies n-dimensional Fast Fourier Transform (FFT) to input array.
  66. Args:
  67. x: Input n-dimensional array.
  68. Returns:
  69. n-dimensional Fourier transform of input n-dimensional array.
  70. """
  71. out = x
  72. for axis in reversed(range(x.ndim)[1:]): # We don't need to apply FFT to last axis
  73. out = torch.fft.fft(out, axis=axis)
  74. return out
  75. class FNetEmbeddings(nn.Module):
  76. """Construct the embeddings from word, position and token_type embeddings."""
  77. def __init__(self, config):
  78. super().__init__()
  79. self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
  80. self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
  81. self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
  82. # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
  83. # any TensorFlow checkpoint file
  84. self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
  85. # NOTE: This is the project layer and will be needed. The original code allows for different embedding and different model dimensions.
  86. self.projection = nn.Linear(config.hidden_size, config.hidden_size)
  87. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  88. # position_ids (1, len position emb) is contiguous in memory and exported when serialized
  89. self.register_buffer(
  90. "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
  91. )
  92. self.register_buffer(
  93. "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
  94. )
  95. def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
  96. if input_ids is not None:
  97. input_shape = input_ids.size()
  98. else:
  99. input_shape = inputs_embeds.size()[:-1]
  100. seq_length = input_shape[1]
  101. if position_ids is None:
  102. position_ids = self.position_ids[:, :seq_length]
  103. # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
  104. # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
  105. # issue #5664
  106. if token_type_ids is None:
  107. if hasattr(self, "token_type_ids"):
  108. buffered_token_type_ids = self.token_type_ids[:, :seq_length]
  109. buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
  110. token_type_ids = buffered_token_type_ids_expanded
  111. else:
  112. token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
  113. if inputs_embeds is None:
  114. inputs_embeds = self.word_embeddings(input_ids)
  115. token_type_embeddings = self.token_type_embeddings(token_type_ids)
  116. embeddings = inputs_embeds + token_type_embeddings
  117. position_embeddings = self.position_embeddings(position_ids)
  118. embeddings += position_embeddings
  119. embeddings = self.LayerNorm(embeddings)
  120. embeddings = self.projection(embeddings)
  121. embeddings = self.dropout(embeddings)
  122. return embeddings
  123. class FNetBasicFourierTransform(nn.Module):
  124. def __init__(self, config):
  125. super().__init__()
  126. self._init_fourier_transform(config)
  127. def _init_fourier_transform(self, config):
  128. if not config.use_tpu_fourier_optimizations:
  129. self.fourier_transform = partial(torch.fft.fftn, dim=(1, 2))
  130. elif config.max_position_embeddings <= 4096:
  131. if is_scipy_available():
  132. self.register_buffer(
  133. "dft_mat_hidden", torch.tensor(linalg.dft(config.hidden_size), dtype=torch.complex64)
  134. )
  135. self.register_buffer(
  136. "dft_mat_seq", torch.tensor(linalg.dft(config.tpu_short_seq_length), dtype=torch.complex64)
  137. )
  138. self.fourier_transform = partial(
  139. two_dim_matmul, matrix_dim_one=self.dft_mat_seq, matrix_dim_two=self.dft_mat_hidden
  140. )
  141. else:
  142. logging.warning(
  143. "SciPy is needed for DFT matrix calculation and is not found. Using TPU optimized fast fourier"
  144. " transform instead."
  145. )
  146. self.fourier_transform = fftn
  147. else:
  148. self.fourier_transform = fftn
  149. def forward(self, hidden_states):
  150. # NOTE: We do not use torch.vmap as it is not integrated into PyTorch stable versions.
  151. # Interested users can modify the code to use vmap from the nightly versions, getting the vmap from here:
  152. # https://pytorch.org/docs/master/generated/torch.vmap.html. Note that fourier transform methods will need
  153. # change accordingly.
  154. outputs = self.fourier_transform(hidden_states).real
  155. return (outputs,)
  156. class FNetBasicOutput(nn.Module):
  157. def __init__(self, config):
  158. super().__init__()
  159. self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
  160. def forward(self, hidden_states, input_tensor):
  161. hidden_states = self.LayerNorm(input_tensor + hidden_states)
  162. return hidden_states
  163. class FNetFourierTransform(nn.Module):
  164. def __init__(self, config):
  165. super().__init__()
  166. self.self = FNetBasicFourierTransform(config)
  167. self.output = FNetBasicOutput(config)
  168. def forward(self, hidden_states):
  169. self_outputs = self.self(hidden_states)
  170. fourier_output = self.output(self_outputs[0], hidden_states)
  171. outputs = (fourier_output,)
  172. return outputs
  173. # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->FNet
  174. class FNetIntermediate(nn.Module):
  175. def __init__(self, config):
  176. super().__init__()
  177. self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
  178. if isinstance(config.hidden_act, str):
  179. self.intermediate_act_fn = ACT2FN[config.hidden_act]
  180. else:
  181. self.intermediate_act_fn = config.hidden_act
  182. def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
  183. hidden_states = self.dense(hidden_states)
  184. hidden_states = self.intermediate_act_fn(hidden_states)
  185. return hidden_states
  186. # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->FNet
  187. class FNetOutput(nn.Module):
  188. def __init__(self, config):
  189. super().__init__()
  190. self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
  191. self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
  192. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  193. def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
  194. hidden_states = self.dense(hidden_states)
  195. hidden_states = self.dropout(hidden_states)
  196. hidden_states = self.LayerNorm(hidden_states + input_tensor)
  197. return hidden_states
  198. class FNetLayer(nn.Module):
  199. def __init__(self, config):
  200. super().__init__()
  201. self.chunk_size_feed_forward = config.chunk_size_feed_forward
  202. self.seq_len_dim = 1 # The dimension which has the sequence length
  203. self.fourier = FNetFourierTransform(config)
  204. self.intermediate = FNetIntermediate(config)
  205. self.output = FNetOutput(config)
  206. def forward(self, hidden_states):
  207. self_fourier_outputs = self.fourier(hidden_states)
  208. fourier_output = self_fourier_outputs[0]
  209. layer_output = apply_chunking_to_forward(
  210. self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, fourier_output
  211. )
  212. outputs = (layer_output,)
  213. return outputs
  214. def feed_forward_chunk(self, fourier_output):
  215. intermediate_output = self.intermediate(fourier_output)
  216. layer_output = self.output(intermediate_output, fourier_output)
  217. return layer_output
  218. class FNetEncoder(nn.Module):
  219. def __init__(self, config):
  220. super().__init__()
  221. self.config = config
  222. self.layer = nn.ModuleList([FNetLayer(config) for _ in range(config.num_hidden_layers)])
  223. self.gradient_checkpointing = False
  224. def forward(self, hidden_states, output_hidden_states=False, return_dict=True):
  225. all_hidden_states = () if output_hidden_states else None
  226. for i, layer_module in enumerate(self.layer):
  227. if output_hidden_states:
  228. all_hidden_states = all_hidden_states + (hidden_states,)
  229. if self.gradient_checkpointing and self.training:
  230. layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states)
  231. else:
  232. layer_outputs = layer_module(hidden_states)
  233. hidden_states = layer_outputs[0]
  234. if output_hidden_states:
  235. all_hidden_states = all_hidden_states + (hidden_states,)
  236. if not return_dict:
  237. return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
  238. return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
  239. # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->FNet
  240. class FNetPooler(nn.Module):
  241. def __init__(self, config):
  242. super().__init__()
  243. self.dense = nn.Linear(config.hidden_size, config.hidden_size)
  244. self.activation = nn.Tanh()
  245. def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
  246. # We "pool" the model by simply taking the hidden state corresponding
  247. # to the first token.
  248. first_token_tensor = hidden_states[:, 0]
  249. pooled_output = self.dense(first_token_tensor)
  250. pooled_output = self.activation(pooled_output)
  251. return pooled_output
  252. # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->FNet
  253. class FNetPredictionHeadTransform(nn.Module):
  254. def __init__(self, config):
  255. super().__init__()
  256. self.dense = nn.Linear(config.hidden_size, config.hidden_size)
  257. if isinstance(config.hidden_act, str):
  258. self.transform_act_fn = ACT2FN[config.hidden_act]
  259. else:
  260. self.transform_act_fn = config.hidden_act
  261. self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
  262. def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
  263. hidden_states = self.dense(hidden_states)
  264. hidden_states = self.transform_act_fn(hidden_states)
  265. hidden_states = self.LayerNorm(hidden_states)
  266. return hidden_states
  267. class FNetLMPredictionHead(nn.Module):
  268. def __init__(self, config):
  269. super().__init__()
  270. self.transform = FNetPredictionHeadTransform(config)
  271. # The output weights are the same as the input embeddings, but there is
  272. # an output-only bias for each token.
  273. self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
  274. self.bias = nn.Parameter(torch.zeros(config.vocab_size))
  275. self.decoder.bias = self.bias
  276. def forward(self, hidden_states):
  277. hidden_states = self.transform(hidden_states)
  278. hidden_states = self.decoder(hidden_states)
  279. return hidden_states
  280. def _tie_weights(self) -> None:
  281. # For accelerate compatibility and to not break backward compatibility
  282. if self.decoder.bias.device.type == "meta":
  283. self.decoder.bias = self.bias
  284. else:
  285. # To tie those two weights if they get disconnected (on TPU or when the bias is resized)
  286. self.bias = self.decoder.bias
  287. class FNetOnlyMLMHead(nn.Module):
  288. def __init__(self, config):
  289. super().__init__()
  290. self.predictions = FNetLMPredictionHead(config)
  291. def forward(self, sequence_output):
  292. prediction_scores = self.predictions(sequence_output)
  293. return prediction_scores
  294. # Copied from transformers.models.bert.modeling_bert.BertOnlyNSPHead with Bert->FNet
  295. class FNetOnlyNSPHead(nn.Module):
  296. def __init__(self, config):
  297. super().__init__()
  298. self.seq_relationship = nn.Linear(config.hidden_size, 2)
  299. def forward(self, pooled_output):
  300. seq_relationship_score = self.seq_relationship(pooled_output)
  301. return seq_relationship_score
  302. # Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->FNet
  303. class FNetPreTrainingHeads(nn.Module):
  304. def __init__(self, config):
  305. super().__init__()
  306. self.predictions = FNetLMPredictionHead(config)
  307. self.seq_relationship = nn.Linear(config.hidden_size, 2)
  308. def forward(self, sequence_output, pooled_output):
  309. prediction_scores = self.predictions(sequence_output)
  310. seq_relationship_score = self.seq_relationship(pooled_output)
  311. return prediction_scores, seq_relationship_score
  312. class FNetPreTrainedModel(PreTrainedModel):
  313. """
  314. An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
  315. models.
  316. """
  317. config_class = FNetConfig
  318. base_model_prefix = "fnet"
  319. supports_gradient_checkpointing = True
  320. def _init_weights(self, module):
  321. """Initialize the weights"""
  322. if isinstance(module, nn.Linear):
  323. # Slightly different from the TF version which uses truncated_normal for initialization
  324. # cf https://github.com/pytorch/pytorch/pull/5617
  325. module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
  326. # NOTE: Original code uses same initialization as weights for biases as well.
  327. if module.bias is not None:
  328. module.bias.data.zero_()
  329. elif isinstance(module, nn.Embedding):
  330. module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
  331. if module.padding_idx is not None:
  332. module.weight.data[module.padding_idx].zero_()
  333. elif isinstance(module, nn.LayerNorm):
  334. module.bias.data.zero_()
  335. module.weight.data.fill_(1.0)
  336. @dataclass
  337. class FNetForPreTrainingOutput(ModelOutput):
  338. """
  339. Output type of [`FNetForPreTraining`].
  340. Args:
  341. loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
  342. Total loss as the sum of the masked language modeling loss and the next sequence prediction
  343. (classification) loss.
  344. prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
  345. Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
  346. seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
  347. Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
  348. before SoftMax).
  349. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
  350. Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
  351. shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
  352. plus the initial embedding outputs.
  353. """
  354. loss: Optional[torch.FloatTensor] = None
  355. prediction_logits: torch.FloatTensor = None
  356. seq_relationship_logits: torch.FloatTensor = None
  357. hidden_states: Optional[Tuple[torch.FloatTensor]] = None
  358. FNET_START_DOCSTRING = r"""
  359. This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
  360. it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
  361. behavior.
  362. Parameters:
  363. config ([`FNetConfig`]): Model configuration class with all the parameters of the model.
  364. Initializing with a config file does not load the weights associated with the model, only the
  365. configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
  366. """
  367. FNET_INPUTS_DOCSTRING = r"""
  368. Args:
  369. input_ids (`torch.LongTensor` of shape `({0})`):
  370. Indices of input sequence tokens in the vocabulary.
  371. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
  372. [`PreTrainedTokenizer.__call__`] for details.
  373. [What are input IDs?](../glossary#input-ids)
  374. token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
  375. Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
  376. 1]`:
  377. - 0 corresponds to a *sentence A* token,
  378. - 1 corresponds to a *sentence B* token.
  379. [What are token type IDs?](../glossary#token-type-ids)
  380. position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
  381. Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
  382. config.max_position_embeddings - 1]`.
  383. [What are position IDs?](../glossary#position-ids)
  384. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
  385. Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
  386. is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
  387. model's internal embedding lookup matrix.
  388. output_hidden_states (`bool`, *optional*):
  389. Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
  390. more detail.
  391. return_dict (`bool`, *optional*):
  392. Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
  393. """
  394. @add_start_docstrings(
  395. "The bare FNet Model transformer outputting raw hidden-states without any specific head on top.",
  396. FNET_START_DOCSTRING,
  397. )
  398. class FNetModel(FNetPreTrainedModel):
  399. """
  400. The model can behave as an encoder, following the architecture described in [FNet: Mixing Tokens with Fourier
  401. Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon.
  402. """
  403. def __init__(self, config, add_pooling_layer=True):
  404. super().__init__(config)
  405. self.config = config
  406. self.embeddings = FNetEmbeddings(config)
  407. self.encoder = FNetEncoder(config)
  408. self.pooler = FNetPooler(config) if add_pooling_layer else None
  409. # Initialize weights and apply final processing
  410. self.post_init()
  411. def get_input_embeddings(self):
  412. return self.embeddings.word_embeddings
  413. def set_input_embeddings(self, value):
  414. self.embeddings.word_embeddings = value
  415. @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  416. @add_code_sample_docstrings(
  417. checkpoint=_CHECKPOINT_FOR_DOC,
  418. output_type=BaseModelOutput,
  419. config_class=_CONFIG_FOR_DOC,
  420. )
  421. def forward(
  422. self,
  423. input_ids: Optional[torch.LongTensor] = None,
  424. token_type_ids: Optional[torch.LongTensor] = None,
  425. position_ids: Optional[torch.LongTensor] = None,
  426. inputs_embeds: Optional[torch.FloatTensor] = None,
  427. output_hidden_states: Optional[bool] = None,
  428. return_dict: Optional[bool] = None,
  429. ) -> Union[tuple, BaseModelOutput]:
  430. output_hidden_states = (
  431. output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
  432. )
  433. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  434. if input_ids is not None and inputs_embeds is not None:
  435. raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
  436. elif input_ids is not None:
  437. input_shape = input_ids.size()
  438. batch_size, seq_length = input_shape
  439. elif inputs_embeds is not None:
  440. input_shape = inputs_embeds.size()[:-1]
  441. batch_size, seq_length = input_shape
  442. else:
  443. raise ValueError("You have to specify either input_ids or inputs_embeds")
  444. if (
  445. self.config.use_tpu_fourier_optimizations
  446. and seq_length <= 4096
  447. and self.config.tpu_short_seq_length != seq_length
  448. ):
  449. raise ValueError(
  450. "The `tpu_short_seq_length` in FNetConfig should be set equal to the sequence length being passed to"
  451. " the model when using TPU optimizations."
  452. )
  453. device = input_ids.device if input_ids is not None else inputs_embeds.device
  454. if token_type_ids is None:
  455. if hasattr(self.embeddings, "token_type_ids"):
  456. buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
  457. buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
  458. token_type_ids = buffered_token_type_ids_expanded
  459. else:
  460. token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
  461. embedding_output = self.embeddings(
  462. input_ids=input_ids,
  463. position_ids=position_ids,
  464. token_type_ids=token_type_ids,
  465. inputs_embeds=inputs_embeds,
  466. )
  467. encoder_outputs = self.encoder(
  468. embedding_output,
  469. output_hidden_states=output_hidden_states,
  470. return_dict=return_dict,
  471. )
  472. sequence_output = encoder_outputs[0]
  473. pooler_output = self.pooler(sequence_output) if self.pooler is not None else None
  474. if not return_dict:
  475. return (sequence_output, pooler_output) + encoder_outputs[1:]
  476. return BaseModelOutputWithPooling(
  477. last_hidden_state=sequence_output,
  478. pooler_output=pooler_output,
  479. hidden_states=encoder_outputs.hidden_states,
  480. )
  481. @add_start_docstrings(
  482. """
  483. FNet Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
  484. sentence prediction (classification)` head.
  485. """,
  486. FNET_START_DOCSTRING,
  487. )
  488. class FNetForPreTraining(FNetPreTrainedModel):
  489. _tied_weights_keys = ["cls.predictions.decoder.bias", "cls.predictions.decoder.weight"]
  490. def __init__(self, config):
  491. super().__init__(config)
  492. self.fnet = FNetModel(config)
  493. self.cls = FNetPreTrainingHeads(config)
  494. # Initialize weights and apply final processing
  495. self.post_init()
  496. def get_output_embeddings(self):
  497. return self.cls.predictions.decoder
  498. def set_output_embeddings(self, new_embeddings):
  499. self.cls.predictions.decoder = new_embeddings
  500. self.cls.predictions.bias = new_embeddings.bias
  501. @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  502. @replace_return_docstrings(output_type=FNetForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
  503. def forward(
  504. self,
  505. input_ids: Optional[torch.Tensor] = None,
  506. token_type_ids: Optional[torch.Tensor] = None,
  507. position_ids: Optional[torch.Tensor] = None,
  508. inputs_embeds: Optional[torch.Tensor] = None,
  509. labels: Optional[torch.Tensor] = None,
  510. next_sentence_label: Optional[torch.Tensor] = None,
  511. output_hidden_states: Optional[bool] = None,
  512. return_dict: Optional[bool] = None,
  513. ) -> Union[Tuple, FNetForPreTrainingOutput]:
  514. r"""
  515. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
  516. Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
  517. config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
  518. loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
  519. next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  520. Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
  521. (see `input_ids` docstring) Indices should be in `[0, 1]`:
  522. - 0 indicates sequence B is a continuation of sequence A,
  523. - 1 indicates sequence B is a random sequence.
  524. kwargs (`Dict[str, any]`, *optional*, defaults to `{}`):
  525. Used to hide legacy arguments that have been deprecated.
  526. Returns:
  527. Example:
  528. ```python
  529. >>> from transformers import AutoTokenizer, FNetForPreTraining
  530. >>> import torch
  531. >>> tokenizer = AutoTokenizer.from_pretrained("google/fnet-base")
  532. >>> model = FNetForPreTraining.from_pretrained("google/fnet-base")
  533. >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
  534. >>> outputs = model(**inputs)
  535. >>> prediction_logits = outputs.prediction_logits
  536. >>> seq_relationship_logits = outputs.seq_relationship_logits
  537. ```"""
  538. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  539. outputs = self.fnet(
  540. input_ids,
  541. token_type_ids=token_type_ids,
  542. position_ids=position_ids,
  543. inputs_embeds=inputs_embeds,
  544. output_hidden_states=output_hidden_states,
  545. return_dict=return_dict,
  546. )
  547. sequence_output, pooled_output = outputs[:2]
  548. prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
  549. total_loss = None
  550. if labels is not None and next_sentence_label is not None:
  551. loss_fct = CrossEntropyLoss()
  552. masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
  553. next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
  554. total_loss = masked_lm_loss + next_sentence_loss
  555. if not return_dict:
  556. output = (prediction_scores, seq_relationship_score) + outputs[2:]
  557. return ((total_loss,) + output) if total_loss is not None else output
  558. return FNetForPreTrainingOutput(
  559. loss=total_loss,
  560. prediction_logits=prediction_scores,
  561. seq_relationship_logits=seq_relationship_score,
  562. hidden_states=outputs.hidden_states,
  563. )
  564. @add_start_docstrings("""FNet Model with a `language modeling` head on top.""", FNET_START_DOCSTRING)
  565. class FNetForMaskedLM(FNetPreTrainedModel):
  566. _tied_weights_keys = ["cls.predictions.decoder.bias", "cls.predictions.decoder.weight"]
  567. def __init__(self, config):
  568. super().__init__(config)
  569. self.fnet = FNetModel(config)
  570. self.cls = FNetOnlyMLMHead(config)
  571. # Initialize weights and apply final processing
  572. self.post_init()
  573. def get_output_embeddings(self):
  574. return self.cls.predictions.decoder
  575. def set_output_embeddings(self, new_embeddings):
  576. self.cls.predictions.decoder = new_embeddings
  577. self.cls.predictions.bias = new_embeddings.bias
  578. @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  579. @add_code_sample_docstrings(
  580. checkpoint=_CHECKPOINT_FOR_DOC,
  581. output_type=MaskedLMOutput,
  582. config_class=_CONFIG_FOR_DOC,
  583. )
  584. def forward(
  585. self,
  586. input_ids: Optional[torch.Tensor] = None,
  587. token_type_ids: Optional[torch.Tensor] = None,
  588. position_ids: Optional[torch.Tensor] = None,
  589. inputs_embeds: Optional[torch.Tensor] = None,
  590. labels: Optional[torch.Tensor] = None,
  591. output_hidden_states: Optional[bool] = None,
  592. return_dict: Optional[bool] = None,
  593. ) -> Union[Tuple, MaskedLMOutput]:
  594. r"""
  595. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
  596. Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
  597. config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
  598. loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
  599. """
  600. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  601. outputs = self.fnet(
  602. input_ids,
  603. token_type_ids=token_type_ids,
  604. position_ids=position_ids,
  605. inputs_embeds=inputs_embeds,
  606. output_hidden_states=output_hidden_states,
  607. return_dict=return_dict,
  608. )
  609. sequence_output = outputs[0]
  610. prediction_scores = self.cls(sequence_output)
  611. masked_lm_loss = None
  612. if labels is not None:
  613. loss_fct = CrossEntropyLoss() # -100 index = padding token
  614. masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
  615. if not return_dict:
  616. output = (prediction_scores,) + outputs[2:]
  617. return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
  618. return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states)
  619. @add_start_docstrings(
  620. """FNet Model with a `next sentence prediction (classification)` head on top.""",
  621. FNET_START_DOCSTRING,
  622. )
  623. class FNetForNextSentencePrediction(FNetPreTrainedModel):
  624. def __init__(self, config):
  625. super().__init__(config)
  626. self.fnet = FNetModel(config)
  627. self.cls = FNetOnlyNSPHead(config)
  628. # Initialize weights and apply final processing
  629. self.post_init()
  630. @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  631. @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
  632. def forward(
  633. self,
  634. input_ids: Optional[torch.Tensor] = None,
  635. token_type_ids: Optional[torch.Tensor] = None,
  636. position_ids: Optional[torch.Tensor] = None,
  637. inputs_embeds: Optional[torch.Tensor] = None,
  638. labels: Optional[torch.Tensor] = None,
  639. output_hidden_states: Optional[bool] = None,
  640. return_dict: Optional[bool] = None,
  641. **kwargs,
  642. ) -> Union[Tuple, NextSentencePredictorOutput]:
  643. r"""
  644. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  645. Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
  646. (see `input_ids` docstring). Indices should be in `[0, 1]`:
  647. - 0 indicates sequence B is a continuation of sequence A,
  648. - 1 indicates sequence B is a random sequence.
  649. Returns:
  650. Example:
  651. ```python
  652. >>> from transformers import AutoTokenizer, FNetForNextSentencePrediction
  653. >>> import torch
  654. >>> tokenizer = AutoTokenizer.from_pretrained("google/fnet-base")
  655. >>> model = FNetForNextSentencePrediction.from_pretrained("google/fnet-base")
  656. >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
  657. >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
  658. >>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
  659. >>> outputs = model(**encoding, labels=torch.LongTensor([1]))
  660. >>> logits = outputs.logits
  661. >>> assert logits[0, 0] < logits[0, 1] # next sentence was random
  662. ```"""
  663. if "next_sentence_label" in kwargs:
  664. warnings.warn(
  665. "The `next_sentence_label` argument is deprecated and will be removed in a future version, use"
  666. " `labels` instead.",
  667. FutureWarning,
  668. )
  669. labels = kwargs.pop("next_sentence_label")
  670. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  671. outputs = self.fnet(
  672. input_ids,
  673. token_type_ids=token_type_ids,
  674. position_ids=position_ids,
  675. inputs_embeds=inputs_embeds,
  676. output_hidden_states=output_hidden_states,
  677. return_dict=return_dict,
  678. )
  679. pooled_output = outputs[1]
  680. seq_relationship_scores = self.cls(pooled_output)
  681. next_sentence_loss = None
  682. if labels is not None:
  683. loss_fct = CrossEntropyLoss()
  684. next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
  685. if not return_dict:
  686. output = (seq_relationship_scores,) + outputs[2:]
  687. return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
  688. return NextSentencePredictorOutput(
  689. loss=next_sentence_loss,
  690. logits=seq_relationship_scores,
  691. hidden_states=outputs.hidden_states,
  692. )
  693. @add_start_docstrings(
  694. """
  695. FNet Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
  696. output) e.g. for GLUE tasks.
  697. """,
  698. FNET_START_DOCSTRING,
  699. )
  700. class FNetForSequenceClassification(FNetPreTrainedModel):
  701. def __init__(self, config):
  702. super().__init__(config)
  703. self.num_labels = config.num_labels
  704. self.fnet = FNetModel(config)
  705. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  706. self.classifier = nn.Linear(config.hidden_size, config.num_labels)
  707. # Initialize weights and apply final processing
  708. self.post_init()
  709. @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  710. @add_code_sample_docstrings(
  711. checkpoint=_CHECKPOINT_FOR_DOC,
  712. output_type=SequenceClassifierOutput,
  713. config_class=_CONFIG_FOR_DOC,
  714. )
  715. def forward(
  716. self,
  717. input_ids: Optional[torch.Tensor] = None,
  718. token_type_ids: Optional[torch.Tensor] = None,
  719. position_ids: Optional[torch.Tensor] = None,
  720. inputs_embeds: Optional[torch.Tensor] = None,
  721. labels: Optional[torch.Tensor] = None,
  722. output_hidden_states: Optional[bool] = None,
  723. return_dict: Optional[bool] = None,
  724. ) -> Union[Tuple, SequenceClassifierOutput]:
  725. r"""
  726. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  727. Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
  728. config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
  729. `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
  730. """
  731. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  732. outputs = self.fnet(
  733. input_ids,
  734. token_type_ids=token_type_ids,
  735. position_ids=position_ids,
  736. inputs_embeds=inputs_embeds,
  737. output_hidden_states=output_hidden_states,
  738. return_dict=return_dict,
  739. )
  740. pooled_output = outputs[1]
  741. pooled_output = self.dropout(pooled_output)
  742. logits = self.classifier(pooled_output)
  743. loss = None
  744. if labels is not None:
  745. if self.config.problem_type is None:
  746. if self.num_labels == 1:
  747. self.config.problem_type = "regression"
  748. elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
  749. self.config.problem_type = "single_label_classification"
  750. else:
  751. self.config.problem_type = "multi_label_classification"
  752. if self.config.problem_type == "regression":
  753. loss_fct = MSELoss()
  754. if self.num_labels == 1:
  755. loss = loss_fct(logits.squeeze(), labels.squeeze())
  756. else:
  757. loss = loss_fct(logits, labels)
  758. elif self.config.problem_type == "single_label_classification":
  759. loss_fct = CrossEntropyLoss()
  760. loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
  761. elif self.config.problem_type == "multi_label_classification":
  762. loss_fct = BCEWithLogitsLoss()
  763. loss = loss_fct(logits, labels)
  764. if not return_dict:
  765. output = (logits,) + outputs[2:]
  766. return ((loss,) + output) if loss is not None else output
  767. return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
  768. @add_start_docstrings(
  769. """
  770. FNet Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
  771. softmax) e.g. for RocStories/SWAG tasks.
  772. """,
  773. FNET_START_DOCSTRING,
  774. )
  775. class FNetForMultipleChoice(FNetPreTrainedModel):
  776. def __init__(self, config):
  777. super().__init__(config)
  778. self.fnet = FNetModel(config)
  779. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  780. self.classifier = nn.Linear(config.hidden_size, 1)
  781. # Initialize weights and apply final processing
  782. self.post_init()
  783. @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
  784. @add_code_sample_docstrings(
  785. checkpoint=_CHECKPOINT_FOR_DOC,
  786. output_type=MultipleChoiceModelOutput,
  787. config_class=_CONFIG_FOR_DOC,
  788. )
  789. def forward(
  790. self,
  791. input_ids: Optional[torch.Tensor] = None,
  792. token_type_ids: Optional[torch.Tensor] = None,
  793. position_ids: Optional[torch.Tensor] = None,
  794. inputs_embeds: Optional[torch.Tensor] = None,
  795. labels: Optional[torch.Tensor] = None,
  796. output_hidden_states: Optional[bool] = None,
  797. return_dict: Optional[bool] = None,
  798. ) -> Union[Tuple, MultipleChoiceModelOutput]:
  799. r"""
  800. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  801. Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
  802. num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
  803. `input_ids` above)
  804. """
  805. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  806. num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
  807. input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
  808. token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
  809. position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
  810. inputs_embeds = (
  811. inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
  812. if inputs_embeds is not None
  813. else None
  814. )
  815. outputs = self.fnet(
  816. input_ids,
  817. token_type_ids=token_type_ids,
  818. position_ids=position_ids,
  819. inputs_embeds=inputs_embeds,
  820. output_hidden_states=output_hidden_states,
  821. return_dict=return_dict,
  822. )
  823. pooled_output = outputs[1]
  824. pooled_output = self.dropout(pooled_output)
  825. logits = self.classifier(pooled_output)
  826. reshaped_logits = logits.view(-1, num_choices)
  827. loss = None
  828. if labels is not None:
  829. loss_fct = CrossEntropyLoss()
  830. loss = loss_fct(reshaped_logits, labels)
  831. if not return_dict:
  832. output = (reshaped_logits,) + outputs[2:]
  833. return ((loss,) + output) if loss is not None else output
  834. return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states)
  835. @add_start_docstrings(
  836. """
  837. FNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
  838. Named-Entity-Recognition (NER) tasks.
  839. """,
  840. FNET_START_DOCSTRING,
  841. )
  842. class FNetForTokenClassification(FNetPreTrainedModel):
  843. def __init__(self, config):
  844. super().__init__(config)
  845. self.num_labels = config.num_labels
  846. self.fnet = FNetModel(config)
  847. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  848. self.classifier = nn.Linear(config.hidden_size, config.num_labels)
  849. # Initialize weights and apply final processing
  850. self.post_init()
  851. @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  852. @add_code_sample_docstrings(
  853. checkpoint=_CHECKPOINT_FOR_DOC,
  854. output_type=TokenClassifierOutput,
  855. config_class=_CONFIG_FOR_DOC,
  856. )
  857. def forward(
  858. self,
  859. input_ids: Optional[torch.Tensor] = None,
  860. token_type_ids: Optional[torch.Tensor] = None,
  861. position_ids: Optional[torch.Tensor] = None,
  862. inputs_embeds: Optional[torch.Tensor] = None,
  863. labels: Optional[torch.Tensor] = None,
  864. output_hidden_states: Optional[bool] = None,
  865. return_dict: Optional[bool] = None,
  866. ) -> Union[Tuple, TokenClassifierOutput]:
  867. r"""
  868. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
  869. Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
  870. """
  871. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  872. outputs = self.fnet(
  873. input_ids,
  874. token_type_ids=token_type_ids,
  875. position_ids=position_ids,
  876. inputs_embeds=inputs_embeds,
  877. output_hidden_states=output_hidden_states,
  878. return_dict=return_dict,
  879. )
  880. sequence_output = outputs[0]
  881. sequence_output = self.dropout(sequence_output)
  882. logits = self.classifier(sequence_output)
  883. loss = None
  884. if labels is not None:
  885. loss_fct = CrossEntropyLoss()
  886. # Only keep active parts of the loss
  887. loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
  888. if not return_dict:
  889. output = (logits,) + outputs[2:]
  890. return ((loss,) + output) if loss is not None else output
  891. return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
  892. @add_start_docstrings(
  893. """
  894. FNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
  895. layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
  896. """,
  897. FNET_START_DOCSTRING,
  898. )
  899. class FNetForQuestionAnswering(FNetPreTrainedModel):
  900. def __init__(self, config):
  901. super().__init__(config)
  902. self.num_labels = config.num_labels
  903. self.fnet = FNetModel(config)
  904. self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
  905. # Initialize weights and apply final processing
  906. self.post_init()
  907. @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  908. @add_code_sample_docstrings(
  909. checkpoint=_CHECKPOINT_FOR_DOC,
  910. output_type=QuestionAnsweringModelOutput,
  911. config_class=_CONFIG_FOR_DOC,
  912. )
  913. def forward(
  914. self,
  915. input_ids: Optional[torch.Tensor] = None,
  916. token_type_ids: Optional[torch.Tensor] = None,
  917. position_ids: Optional[torch.Tensor] = None,
  918. inputs_embeds: Optional[torch.Tensor] = None,
  919. start_positions: Optional[torch.Tensor] = None,
  920. end_positions: Optional[torch.Tensor] = None,
  921. output_hidden_states: Optional[bool] = None,
  922. return_dict: Optional[bool] = None,
  923. ) -> Union[Tuple, QuestionAnsweringModelOutput]:
  924. r"""
  925. start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  926. Labels for position (index) of the start of the labelled span for computing the token classification loss.
  927. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
  928. are not taken into account for computing the loss.
  929. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  930. Labels for position (index) of the end of the labelled span for computing the token classification loss.
  931. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
  932. are not taken into account for computing the loss.
  933. """
  934. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  935. outputs = self.fnet(
  936. input_ids,
  937. token_type_ids=token_type_ids,
  938. position_ids=position_ids,
  939. inputs_embeds=inputs_embeds,
  940. output_hidden_states=output_hidden_states,
  941. return_dict=return_dict,
  942. )
  943. sequence_output = outputs[0]
  944. logits = self.qa_outputs(sequence_output)
  945. start_logits, end_logits = logits.split(1, dim=-1)
  946. start_logits = start_logits.squeeze(-1).contiguous()
  947. end_logits = end_logits.squeeze(-1).contiguous()
  948. total_loss = None
  949. if start_positions is not None and end_positions is not None:
  950. # If we are on multi-GPU, split add a dimension
  951. if len(start_positions.size()) > 1:
  952. start_positions = start_positions.squeeze(-1)
  953. if len(end_positions.size()) > 1:
  954. end_positions = end_positions.squeeze(-1)
  955. # sometimes the start/end positions are outside our model inputs, we ignore these terms
  956. ignored_index = start_logits.size(1)
  957. start_positions = start_positions.clamp(0, ignored_index)
  958. end_positions = end_positions.clamp(0, ignored_index)
  959. loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
  960. start_loss = loss_fct(start_logits, start_positions)
  961. end_loss = loss_fct(end_logits, end_positions)
  962. total_loss = (start_loss + end_loss) / 2
  963. if not return_dict:
  964. output = (start_logits, end_logits) + outputs[2:]
  965. return ((total_loss,) + output) if total_loss is not None else output
  966. return QuestionAnsweringModelOutput(
  967. loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states
  968. )