modeling_tapas.py 108 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389
  1. # coding=utf-8
  2. # Copyright 2020 Google Research and The HuggingFace Inc. team.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """PyTorch TAPAS model."""
  16. import enum
  17. import math
  18. import os
  19. from dataclasses import dataclass
  20. from typing import Optional, Tuple, Union
  21. import torch
  22. import torch.utils.checkpoint
  23. from torch import nn
  24. from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
  25. from ...activations import ACT2FN
  26. from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, SequenceClassifierOutput
  27. from ...modeling_utils import PreTrainedModel
  28. from ...pytorch_utils import (
  29. apply_chunking_to_forward,
  30. find_pruneable_heads_and_indices,
  31. is_torch_greater_or_equal_than_1_12,
  32. prune_linear_layer,
  33. )
  34. from ...utils import (
  35. ModelOutput,
  36. add_start_docstrings,
  37. add_start_docstrings_to_model_forward,
  38. logging,
  39. replace_return_docstrings,
  40. )
  41. from .configuration_tapas import TapasConfig
  42. logger = logging.get_logger(__name__)
  43. if not is_torch_greater_or_equal_than_1_12:
  44. logger.warning(
  45. f"You are using torch=={torch.__version__}, but torch>=1.12.0 is required to use "
  46. "TapasModel. Please upgrade torch."
  47. )
  48. _CONFIG_FOR_DOC = "TapasConfig"
  49. _CHECKPOINT_FOR_DOC = "google/tapas-base"
  50. EPSILON_ZERO_DIVISION = 1e-10
  51. CLOSE_ENOUGH_TO_LOG_ZERO = -10000.0
  52. @dataclass
  53. class TableQuestionAnsweringOutput(ModelOutput):
  54. """
  55. Output type of [`TapasForQuestionAnswering`].
  56. Args:
  57. loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` (and possibly `answer`, `aggregation_labels`, `numeric_values` and `numeric_values_scale` are provided)):
  58. Total loss as the sum of the hierarchical cell selection log-likelihood loss and (optionally) the
  59. semi-supervised regression loss and (optionally) supervised loss for aggregations.
  60. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
  61. Prediction scores of the cell selection head, for every token.
  62. logits_aggregation (`torch.FloatTensor`, *optional*, of shape `(batch_size, num_aggregation_labels)`):
  63. Prediction scores of the aggregation head, for every aggregation operator.
  64. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
  65. Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
  66. shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
  67. plus the initial embedding outputs.
  68. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
  69. Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
  70. sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
  71. the self-attention heads.
  72. """
  73. loss: Optional[torch.FloatTensor] = None
  74. logits: torch.FloatTensor = None
  75. logits_aggregation: torch.FloatTensor = None
  76. hidden_states: Optional[Tuple[torch.FloatTensor]] = None
  77. attentions: Optional[Tuple[torch.FloatTensor]] = None
  78. def load_tf_weights_in_tapas(model, config, tf_checkpoint_path):
  79. """
  80. Load tf checkpoints in a PyTorch model. This is an adaptation from load_tf_weights_in_bert
  81. - add cell selection and aggregation heads
  82. - take into account additional token type embedding layers
  83. """
  84. try:
  85. import re
  86. import numpy as np
  87. import tensorflow as tf
  88. except ImportError:
  89. logger.error(
  90. "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
  91. "https://www.tensorflow.org/install/ for installation instructions."
  92. )
  93. raise
  94. tf_path = os.path.abspath(tf_checkpoint_path)
  95. logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
  96. # Load weights from TF model
  97. init_vars = tf.train.list_variables(tf_path)
  98. names = []
  99. arrays = []
  100. for name, shape in init_vars:
  101. logger.info(f"Loading TF weight {name} with shape {shape}")
  102. array = tf.train.load_variable(tf_path, name)
  103. names.append(name)
  104. arrays.append(array)
  105. for name, array in zip(names, arrays):
  106. name = name.split("/")
  107. # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculate m and v
  108. # which are not required for using pretrained model
  109. if any(
  110. n
  111. in [
  112. "adam_v",
  113. "adam_m",
  114. "AdamWeightDecayOptimizer",
  115. "AdamWeightDecayOptimizer_1",
  116. "global_step",
  117. "seq_relationship",
  118. ]
  119. for n in name
  120. ):
  121. logger.info(f"Skipping {'/'.join(name)}")
  122. continue
  123. # in case the model is TapasForSequenceClassification, we skip output_bias and output_weights
  124. # since these are not used for classification
  125. if isinstance(model, TapasForSequenceClassification):
  126. if any(n in ["output_bias", "output_weights"] for n in name):
  127. logger.info(f"Skipping {'/'.join(name)}")
  128. continue
  129. # in case the model is TapasModel, we skip output_bias, output_weights, output_bias_cls and output_weights_cls
  130. # since this model does not have MLM and NSP heads
  131. if isinstance(model, TapasModel):
  132. if any(n in ["output_bias", "output_weights", "output_bias_cls", "output_weights_cls"] for n in name):
  133. logger.info(f"Skipping {'/'.join(name)}")
  134. continue
  135. # in case the model is TapasForMaskedLM, we skip the pooler
  136. if isinstance(model, TapasForMaskedLM):
  137. if any(n in ["pooler"] for n in name):
  138. logger.info(f"Skipping {'/'.join(name)}")
  139. continue
  140. # if first scope name starts with "bert", change it to "tapas"
  141. if name[0] == "bert":
  142. name[0] = "tapas"
  143. pointer = model
  144. for m_name in name:
  145. if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
  146. scope_names = re.split(r"_(\d+)", m_name)
  147. else:
  148. scope_names = [m_name]
  149. if scope_names[0] == "kernel" or scope_names[0] == "gamma":
  150. pointer = getattr(pointer, "weight")
  151. elif scope_names[0] == "beta":
  152. pointer = getattr(pointer, "bias")
  153. # cell selection heads
  154. elif scope_names[0] == "output_bias":
  155. if not isinstance(model, TapasForMaskedLM):
  156. pointer = getattr(pointer, "output_bias")
  157. else:
  158. pointer = getattr(pointer, "bias")
  159. elif scope_names[0] == "output_weights":
  160. pointer = getattr(pointer, "output_weights")
  161. elif scope_names[0] == "column_output_bias":
  162. pointer = getattr(pointer, "column_output_bias")
  163. elif scope_names[0] == "column_output_weights":
  164. pointer = getattr(pointer, "column_output_weights")
  165. # aggregation head
  166. elif scope_names[0] == "output_bias_agg":
  167. pointer = getattr(pointer, "aggregation_classifier")
  168. pointer = getattr(pointer, "bias")
  169. elif scope_names[0] == "output_weights_agg":
  170. pointer = getattr(pointer, "aggregation_classifier")
  171. pointer = getattr(pointer, "weight")
  172. # classification head
  173. elif scope_names[0] == "output_bias_cls":
  174. pointer = getattr(pointer, "classifier")
  175. pointer = getattr(pointer, "bias")
  176. elif scope_names[0] == "output_weights_cls":
  177. pointer = getattr(pointer, "classifier")
  178. pointer = getattr(pointer, "weight")
  179. else:
  180. try:
  181. pointer = getattr(pointer, scope_names[0])
  182. except AttributeError:
  183. logger.info(f"Skipping {'/'.join(name)}")
  184. continue
  185. if len(scope_names) >= 2:
  186. num = int(scope_names[1])
  187. pointer = pointer[num]
  188. if m_name[-11:] == "_embeddings":
  189. pointer = getattr(pointer, "weight")
  190. elif m_name[-13:] in [f"_embeddings_{i}" for i in range(7)]:
  191. pointer = getattr(pointer, "weight")
  192. elif m_name == "kernel":
  193. array = np.transpose(array)
  194. try:
  195. if pointer.shape != array.shape:
  196. raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
  197. except AssertionError as e:
  198. e.args += (pointer.shape, array.shape)
  199. raise
  200. logger.info(f"Initialize PyTorch weight {name}")
  201. # Added a check to see whether the array is a scalar (because bias terms in Tapas checkpoints can be
  202. # scalar => should first be converted to numpy arrays)
  203. if np.isscalar(array):
  204. array = np.array(array)
  205. pointer.data = torch.from_numpy(array)
  206. return model
  207. class TapasEmbeddings(nn.Module):
  208. """
  209. Construct the embeddings from word, position and token_type embeddings. Same as BertEmbeddings but with a number of
  210. additional token type embeddings to encode tabular structure.
  211. """
  212. def __init__(self, config):
  213. super().__init__()
  214. # we do not include config.disabled_features and config.disable_position_embeddings from the original implementation
  215. # word embeddings
  216. self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
  217. # position embeddings
  218. self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
  219. # token type embeddings
  220. for i, type_vocab_sizes in enumerate(config.type_vocab_sizes):
  221. name = f"token_type_embeddings_{i}"
  222. setattr(self, name, nn.Embedding(type_vocab_sizes, config.hidden_size))
  223. self.number_of_token_type_embeddings = len(config.type_vocab_sizes)
  224. # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
  225. # any TensorFlow checkpoint file
  226. self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
  227. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  228. self.config = config
  229. def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
  230. if input_ids is not None:
  231. input_shape = input_ids.size()
  232. else:
  233. input_shape = inputs_embeds.size()[:-1]
  234. seq_length = input_shape[1]
  235. device = input_ids.device if input_ids is not None else inputs_embeds.device
  236. if position_ids is None:
  237. # create absolute position embeddings
  238. position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
  239. position_ids = position_ids.unsqueeze(0).expand(input_shape)
  240. # when self.config.reset_position_index_per_cell is set to True, create relative position embeddings
  241. if self.config.reset_position_index_per_cell:
  242. # shape (batch_size, seq_len)
  243. col_index = IndexMap(token_type_ids[:, :, 1], self.config.type_vocab_sizes[1], batch_dims=1)
  244. # shape (batch_size, seq_len)
  245. row_index = IndexMap(token_type_ids[:, :, 2], self.config.type_vocab_sizes[2], batch_dims=1)
  246. # shape (batch_size, seq_len)
  247. full_index = ProductIndexMap(col_index, row_index)
  248. # shape (max_rows * max_columns,). First absolute position for every cell
  249. first_position_per_segment = reduce_min(position_ids, full_index)[0]
  250. # ? shape (batch_size, seq_len). First absolute position of the cell for every token
  251. first_position = gather(first_position_per_segment, full_index)
  252. # shape (1, seq_len)
  253. position = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0)
  254. position_ids = torch.min(
  255. torch.as_tensor(self.config.max_position_embeddings - 1, device=device), position - first_position
  256. )
  257. if token_type_ids is None:
  258. token_type_ids = torch.zeros(
  259. (input_shape + self.number_of_token_type_embeddings), dtype=torch.long, device=device
  260. )
  261. if inputs_embeds is None:
  262. inputs_embeds = self.word_embeddings(input_ids)
  263. position_embeddings = self.position_embeddings(position_ids)
  264. embeddings = inputs_embeds + position_embeddings
  265. for i in range(self.number_of_token_type_embeddings):
  266. name = f"token_type_embeddings_{i}"
  267. embeddings += getattr(self, name)(token_type_ids[:, :, i])
  268. embeddings = self.LayerNorm(embeddings)
  269. embeddings = self.dropout(embeddings)
  270. return embeddings
  271. class TapasSelfAttention(nn.Module):
  272. def __init__(self, config):
  273. super().__init__()
  274. if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
  275. raise ValueError(
  276. f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
  277. f"heads {config.num_attention_heads}"
  278. )
  279. self.num_attention_heads = config.num_attention_heads
  280. self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
  281. self.all_head_size = self.num_attention_heads * self.attention_head_size
  282. self.query = nn.Linear(config.hidden_size, self.all_head_size)
  283. self.key = nn.Linear(config.hidden_size, self.all_head_size)
  284. self.value = nn.Linear(config.hidden_size, self.all_head_size)
  285. self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
  286. self.is_decoder = config.is_decoder
  287. def transpose_for_scores(self, x):
  288. new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
  289. x = x.view(*new_x_shape)
  290. return x.permute(0, 2, 1, 3)
  291. def forward(
  292. self,
  293. hidden_states,
  294. attention_mask=None,
  295. head_mask=None,
  296. encoder_hidden_states=None,
  297. encoder_attention_mask=None,
  298. past_key_value=None,
  299. output_attentions=False,
  300. ):
  301. mixed_query_layer = self.query(hidden_states)
  302. # If this is instantiated as a cross-attention module, the keys
  303. # and values come from an encoder; the attention mask needs to be
  304. # such that the encoder's padding tokens are not attended to.
  305. is_cross_attention = encoder_hidden_states is not None
  306. if is_cross_attention and past_key_value is not None:
  307. # reuse k,v, cross_attentions
  308. key_layer = past_key_value[0]
  309. value_layer = past_key_value[1]
  310. attention_mask = encoder_attention_mask
  311. elif is_cross_attention:
  312. key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
  313. value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
  314. attention_mask = encoder_attention_mask
  315. elif past_key_value is not None:
  316. key_layer = self.transpose_for_scores(self.key(hidden_states))
  317. value_layer = self.transpose_for_scores(self.value(hidden_states))
  318. key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
  319. value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
  320. else:
  321. key_layer = self.transpose_for_scores(self.key(hidden_states))
  322. value_layer = self.transpose_for_scores(self.value(hidden_states))
  323. query_layer = self.transpose_for_scores(mixed_query_layer)
  324. if self.is_decoder:
  325. past_key_value = (key_layer, value_layer)
  326. # Take the dot product between "query" and "key" to get the raw attention scores.
  327. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
  328. attention_scores = attention_scores / math.sqrt(self.attention_head_size)
  329. if attention_mask is not None:
  330. # Apply the attention mask is (precomputed for all layers in TapasModel forward() function)
  331. attention_scores = attention_scores + attention_mask
  332. # Normalize the attention scores to probabilities.
  333. attention_probs = nn.functional.softmax(attention_scores, dim=-1)
  334. # This is actually dropping out entire tokens to attend to, which might
  335. # seem a bit unusual, but is taken from the original Transformer paper.
  336. attention_probs = self.dropout(attention_probs)
  337. # Mask heads if we want to
  338. if head_mask is not None:
  339. attention_probs = attention_probs * head_mask
  340. context_layer = torch.matmul(attention_probs, value_layer)
  341. context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
  342. new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
  343. context_layer = context_layer.view(*new_context_layer_shape)
  344. outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
  345. if self.is_decoder:
  346. outputs = outputs + (past_key_value,)
  347. return outputs
  348. # Copied from transformers.models.bert.modeling_bert.BertSelfOutput
  349. class TapasSelfOutput(nn.Module):
  350. def __init__(self, config):
  351. super().__init__()
  352. self.dense = nn.Linear(config.hidden_size, config.hidden_size)
  353. self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
  354. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  355. def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
  356. hidden_states = self.dense(hidden_states)
  357. hidden_states = self.dropout(hidden_states)
  358. hidden_states = self.LayerNorm(hidden_states + input_tensor)
  359. return hidden_states
  360. class TapasAttention(nn.Module):
  361. def __init__(self, config):
  362. super().__init__()
  363. self.self = TapasSelfAttention(config)
  364. self.output = TapasSelfOutput(config)
  365. self.pruned_heads = set()
  366. # Copied from transformers.models.bert.modeling_bert.BertAttention.prune_heads
  367. def prune_heads(self, heads):
  368. if len(heads) == 0:
  369. return
  370. heads, index = find_pruneable_heads_and_indices(
  371. heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
  372. )
  373. # Prune linear layers
  374. self.self.query = prune_linear_layer(self.self.query, index)
  375. self.self.key = prune_linear_layer(self.self.key, index)
  376. self.self.value = prune_linear_layer(self.self.value, index)
  377. self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
  378. # Update hyper params and store pruned heads
  379. self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
  380. self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
  381. self.pruned_heads = self.pruned_heads.union(heads)
  382. # Copied from transformers.models.bert.modeling_bert.BertAttention.forward
  383. def forward(
  384. self,
  385. hidden_states: torch.Tensor,
  386. attention_mask: Optional[torch.FloatTensor] = None,
  387. head_mask: Optional[torch.FloatTensor] = None,
  388. encoder_hidden_states: Optional[torch.FloatTensor] = None,
  389. encoder_attention_mask: Optional[torch.FloatTensor] = None,
  390. past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
  391. output_attentions: Optional[bool] = False,
  392. ) -> Tuple[torch.Tensor]:
  393. self_outputs = self.self(
  394. hidden_states,
  395. attention_mask,
  396. head_mask,
  397. encoder_hidden_states,
  398. encoder_attention_mask,
  399. past_key_value,
  400. output_attentions,
  401. )
  402. attention_output = self.output(self_outputs[0], hidden_states)
  403. outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
  404. return outputs
  405. # Copied from transformers.models.bert.modeling_bert.BertIntermediate
  406. class TapasIntermediate(nn.Module):
  407. def __init__(self, config):
  408. super().__init__()
  409. self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
  410. if isinstance(config.hidden_act, str):
  411. self.intermediate_act_fn = ACT2FN[config.hidden_act]
  412. else:
  413. self.intermediate_act_fn = config.hidden_act
  414. def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
  415. hidden_states = self.dense(hidden_states)
  416. hidden_states = self.intermediate_act_fn(hidden_states)
  417. return hidden_states
  418. # Copied from transformers.models.bert.modeling_bert.BertOutput
  419. class TapasOutput(nn.Module):
  420. def __init__(self, config):
  421. super().__init__()
  422. self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
  423. self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
  424. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  425. def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
  426. hidden_states = self.dense(hidden_states)
  427. hidden_states = self.dropout(hidden_states)
  428. hidden_states = self.LayerNorm(hidden_states + input_tensor)
  429. return hidden_states
  430. class TapasLayer(nn.Module):
  431. def __init__(self, config):
  432. super().__init__()
  433. self.chunk_size_feed_forward = config.chunk_size_feed_forward
  434. self.seq_len_dim = 1
  435. self.attention = TapasAttention(config)
  436. self.is_decoder = config.is_decoder
  437. self.add_cross_attention = config.add_cross_attention
  438. if self.add_cross_attention:
  439. if not self.is_decoder:
  440. raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
  441. self.crossattention = TapasAttention(config)
  442. self.intermediate = TapasIntermediate(config)
  443. self.output = TapasOutput(config)
  444. # Copied from transformers.models.bert.modeling_bert.BertLayer.forward
  445. def forward(
  446. self,
  447. hidden_states: torch.Tensor,
  448. attention_mask: Optional[torch.FloatTensor] = None,
  449. head_mask: Optional[torch.FloatTensor] = None,
  450. encoder_hidden_states: Optional[torch.FloatTensor] = None,
  451. encoder_attention_mask: Optional[torch.FloatTensor] = None,
  452. past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
  453. output_attentions: Optional[bool] = False,
  454. ) -> Tuple[torch.Tensor]:
  455. # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
  456. self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
  457. self_attention_outputs = self.attention(
  458. hidden_states,
  459. attention_mask,
  460. head_mask,
  461. output_attentions=output_attentions,
  462. past_key_value=self_attn_past_key_value,
  463. )
  464. attention_output = self_attention_outputs[0]
  465. # if decoder, the last output is tuple of self-attn cache
  466. if self.is_decoder:
  467. outputs = self_attention_outputs[1:-1]
  468. present_key_value = self_attention_outputs[-1]
  469. else:
  470. outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
  471. cross_attn_present_key_value = None
  472. if self.is_decoder and encoder_hidden_states is not None:
  473. if not hasattr(self, "crossattention"):
  474. raise ValueError(
  475. f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
  476. " by setting `config.add_cross_attention=True`"
  477. )
  478. # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
  479. cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
  480. cross_attention_outputs = self.crossattention(
  481. attention_output,
  482. attention_mask,
  483. head_mask,
  484. encoder_hidden_states,
  485. encoder_attention_mask,
  486. cross_attn_past_key_value,
  487. output_attentions,
  488. )
  489. attention_output = cross_attention_outputs[0]
  490. outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
  491. # add cross-attn cache to positions 3,4 of present_key_value tuple
  492. cross_attn_present_key_value = cross_attention_outputs[-1]
  493. present_key_value = present_key_value + cross_attn_present_key_value
  494. layer_output = apply_chunking_to_forward(
  495. self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
  496. )
  497. outputs = (layer_output,) + outputs
  498. # if decoder, return the attn key/values as the last output
  499. if self.is_decoder:
  500. outputs = outputs + (present_key_value,)
  501. return outputs
  502. # Copied from transformers.models.bert.modeling_bert.BertLayer.feed_forward_chunk
  503. def feed_forward_chunk(self, attention_output):
  504. intermediate_output = self.intermediate(attention_output)
  505. layer_output = self.output(intermediate_output, attention_output)
  506. return layer_output
  507. class TapasEncoder(nn.Module):
  508. def __init__(self, config):
  509. super().__init__()
  510. self.config = config
  511. self.layer = nn.ModuleList([TapasLayer(config) for _ in range(config.num_hidden_layers)])
  512. self.gradient_checkpointing = False
  513. def forward(
  514. self,
  515. hidden_states,
  516. attention_mask=None,
  517. head_mask=None,
  518. encoder_hidden_states=None,
  519. encoder_attention_mask=None,
  520. past_key_values=None,
  521. use_cache=None,
  522. output_attentions=False,
  523. output_hidden_states=False,
  524. return_dict=True,
  525. ):
  526. all_hidden_states = () if output_hidden_states else None
  527. all_attentions = () if output_attentions else None
  528. for i, layer_module in enumerate(self.layer):
  529. if output_hidden_states:
  530. all_hidden_states = all_hidden_states + (hidden_states,)
  531. layer_head_mask = head_mask[i] if head_mask is not None else None
  532. if self.gradient_checkpointing and self.training:
  533. layer_outputs = self._gradient_checkpointing_func(
  534. layer_module.__call__,
  535. hidden_states,
  536. attention_mask,
  537. layer_head_mask,
  538. encoder_hidden_states,
  539. encoder_attention_mask,
  540. past_key_values,
  541. output_attentions,
  542. )
  543. else:
  544. layer_outputs = layer_module(
  545. hidden_states,
  546. attention_mask,
  547. layer_head_mask,
  548. encoder_hidden_states,
  549. encoder_attention_mask,
  550. past_key_values,
  551. output_attentions,
  552. )
  553. hidden_states = layer_outputs[0]
  554. if output_attentions:
  555. all_attentions = all_attentions + (layer_outputs[1],)
  556. if output_hidden_states:
  557. all_hidden_states = all_hidden_states + (hidden_states,)
  558. if not return_dict:
  559. return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
  560. return BaseModelOutput(
  561. last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
  562. )
  563. # Copied from transformers.models.bert.modeling_bert.BertPooler
  564. class TapasPooler(nn.Module):
  565. def __init__(self, config):
  566. super().__init__()
  567. self.dense = nn.Linear(config.hidden_size, config.hidden_size)
  568. self.activation = nn.Tanh()
  569. def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
  570. # We "pool" the model by simply taking the hidden state corresponding
  571. # to the first token.
  572. first_token_tensor = hidden_states[:, 0]
  573. pooled_output = self.dense(first_token_tensor)
  574. pooled_output = self.activation(pooled_output)
  575. return pooled_output
  576. # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->Tapas
  577. class TapasPredictionHeadTransform(nn.Module):
  578. def __init__(self, config):
  579. super().__init__()
  580. self.dense = nn.Linear(config.hidden_size, config.hidden_size)
  581. if isinstance(config.hidden_act, str):
  582. self.transform_act_fn = ACT2FN[config.hidden_act]
  583. else:
  584. self.transform_act_fn = config.hidden_act
  585. self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
  586. def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
  587. hidden_states = self.dense(hidden_states)
  588. hidden_states = self.transform_act_fn(hidden_states)
  589. hidden_states = self.LayerNorm(hidden_states)
  590. return hidden_states
  591. # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->Tapas
  592. class TapasLMPredictionHead(nn.Module):
  593. def __init__(self, config):
  594. super().__init__()
  595. self.transform = TapasPredictionHeadTransform(config)
  596. # The output weights are the same as the input embeddings, but there is
  597. # an output-only bias for each token.
  598. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
  599. self.bias = nn.Parameter(torch.zeros(config.vocab_size))
  600. # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
  601. self.decoder.bias = self.bias
  602. def _tie_weights(self):
  603. self.decoder.bias = self.bias
  604. def forward(self, hidden_states):
  605. hidden_states = self.transform(hidden_states)
  606. hidden_states = self.decoder(hidden_states)
  607. return hidden_states
  608. # Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->Tapas
  609. class TapasOnlyMLMHead(nn.Module):
  610. def __init__(self, config):
  611. super().__init__()
  612. self.predictions = TapasLMPredictionHead(config)
  613. def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
  614. prediction_scores = self.predictions(sequence_output)
  615. return prediction_scores
  616. class TapasPreTrainedModel(PreTrainedModel):
  617. """
  618. An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
  619. models.
  620. """
  621. config_class = TapasConfig
  622. base_model_prefix = "tapas"
  623. supports_gradient_checkpointing = True
  624. _supports_param_buffer_assignment = False
  625. # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
  626. def _init_weights(self, module):
  627. """Initialize the weights"""
  628. if isinstance(module, nn.Linear):
  629. # Slightly different from the TF version which uses truncated_normal for initialization
  630. # cf https://github.com/pytorch/pytorch/pull/5617
  631. module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
  632. if module.bias is not None:
  633. module.bias.data.zero_()
  634. elif isinstance(module, nn.Embedding):
  635. module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
  636. if module.padding_idx is not None:
  637. module.weight.data[module.padding_idx].zero_()
  638. elif isinstance(module, nn.LayerNorm):
  639. module.bias.data.zero_()
  640. module.weight.data.fill_(1.0)
  641. TAPAS_START_DOCSTRING = r"""
  642. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
  643. library implements for all its models (such as downloading or saving, resizing the input embeddings, pruning heads
  644. etc.)
  645. This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
  646. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
  647. and behavior.
  648. Parameters:
  649. config ([`TapasConfig`]): Model configuration class with all the parameters of the model.
  650. Initializing with a config file does not load the weights associated with the model, only the
  651. configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
  652. """
  653. TAPAS_INPUTS_DOCSTRING = r"""
  654. Args:
  655. input_ids (`torch.LongTensor` of shape `({0})`):
  656. Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
  657. [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
  658. [What are input IDs?](../glossary#input-ids)
  659. attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
  660. Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
  661. - 1 for tokens that are **not masked**,
  662. - 0 for tokens that are **masked**.
  663. [What are attention masks?](../glossary#attention-mask)
  664. token_type_ids (`torch.LongTensor` of shape `({0}, 7)`, *optional*):
  665. Token indices that encode tabular structure. Indices can be obtained using [`AutoTokenizer`]. See this
  666. class for more info.
  667. [What are token type IDs?](../glossary#token-type-ids)
  668. position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
  669. Indices of positions of each input sequence tokens in the position embeddings. If
  670. `reset_position_index_per_cell` of [`TapasConfig`] is set to `True`, relative position embeddings will be
  671. used. Selected in the range `[0, config.max_position_embeddings - 1]`.
  672. [What are position IDs?](../glossary#position-ids)
  673. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
  674. Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1
  675. indicates the head is **not masked**, - 0 indicates the head is **masked**.
  676. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
  677. Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
  678. is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
  679. model's internal embedding lookup matrix.
  680. output_attentions (`bool`, *optional*):
  681. Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
  682. tensors for more detail.
  683. output_hidden_states (`bool`, *optional*):
  684. Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
  685. more detail.
  686. return_dict (`bool`, *optional*):
  687. Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
  688. """
  689. @add_start_docstrings(
  690. "The bare Tapas Model transformer outputting raw hidden-states without any specific head on top.",
  691. TAPAS_START_DOCSTRING,
  692. )
  693. class TapasModel(TapasPreTrainedModel):
  694. """
  695. This class is a small change compared to [`BertModel`], taking into account the additional token type ids.
  696. The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
  697. cross-attention is added between the self-attention layers, following the architecture described in [Attention is
  698. all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
  699. Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
  700. """
  701. def __init__(self, config, add_pooling_layer=True):
  702. super().__init__(config)
  703. self.config = config
  704. self.embeddings = TapasEmbeddings(config)
  705. self.encoder = TapasEncoder(config)
  706. self.pooler = TapasPooler(config) if add_pooling_layer else None
  707. # Initialize weights and apply final processing
  708. self.post_init()
  709. def get_input_embeddings(self):
  710. return self.embeddings.word_embeddings
  711. def set_input_embeddings(self, value):
  712. self.embeddings.word_embeddings = value
  713. def _prune_heads(self, heads_to_prune):
  714. """
  715. Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
  716. class PreTrainedModel
  717. """
  718. for layer, heads in heads_to_prune.items():
  719. self.encoder.layer[layer].attention.prune_heads(heads)
  720. @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  721. @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
  722. def forward(
  723. self,
  724. input_ids: Optional[torch.LongTensor] = None,
  725. attention_mask: Optional[torch.FloatTensor] = None,
  726. token_type_ids: Optional[torch.LongTensor] = None,
  727. position_ids: Optional[torch.LongTensor] = None,
  728. head_mask: Optional[torch.FloatTensor] = None,
  729. inputs_embeds: Optional[torch.FloatTensor] = None,
  730. encoder_hidden_states: Optional[torch.FloatTensor] = None,
  731. encoder_attention_mask: Optional[torch.FloatTensor] = None,
  732. output_attentions: Optional[bool] = None,
  733. output_hidden_states: Optional[bool] = None,
  734. return_dict: Optional[bool] = None,
  735. ) -> Union[Tuple, BaseModelOutputWithPooling]:
  736. r"""
  737. Returns:
  738. Examples:
  739. ```python
  740. >>> from transformers import AutoTokenizer, TapasModel
  741. >>> import pandas as pd
  742. >>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base")
  743. >>> model = TapasModel.from_pretrained("google/tapas-base")
  744. >>> data = {
  745. ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
  746. ... "Age": ["56", "45", "59"],
  747. ... "Number of movies": ["87", "53", "69"],
  748. ... }
  749. >>> table = pd.DataFrame.from_dict(data)
  750. >>> queries = ["How many movies has George Clooney played in?", "How old is Brad Pitt?"]
  751. >>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="pt")
  752. >>> outputs = model(**inputs)
  753. >>> last_hidden_states = outputs.last_hidden_state
  754. ```"""
  755. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
  756. output_hidden_states = (
  757. output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
  758. )
  759. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  760. if input_ids is not None and inputs_embeds is not None:
  761. raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
  762. elif input_ids is not None:
  763. self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
  764. input_shape = input_ids.size()
  765. elif inputs_embeds is not None:
  766. input_shape = inputs_embeds.size()[:-1]
  767. else:
  768. raise ValueError("You have to specify either input_ids or inputs_embeds")
  769. device = input_ids.device if input_ids is not None else inputs_embeds.device
  770. if attention_mask is None:
  771. attention_mask = torch.ones(input_shape, device=device)
  772. if token_type_ids is None:
  773. token_type_ids = torch.zeros(
  774. (*input_shape, len(self.config.type_vocab_sizes)), dtype=torch.long, device=device
  775. )
  776. # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
  777. # ourselves in which case we just need to make it broadcastable to all heads.
  778. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
  779. # If a 2D ou 3D attention mask is provided for the cross-attention
  780. # we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length]
  781. if self.config.is_decoder and encoder_hidden_states is not None:
  782. encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
  783. encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
  784. if encoder_attention_mask is None:
  785. encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
  786. encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
  787. else:
  788. encoder_extended_attention_mask = None
  789. # Prepare head mask if needed
  790. # 1.0 in head_mask indicate we keep the head
  791. # attention_probs has shape bsz x n_heads x N x N
  792. # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
  793. # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
  794. head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
  795. embedding_output = self.embeddings(
  796. input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
  797. )
  798. encoder_outputs = self.encoder(
  799. embedding_output,
  800. attention_mask=extended_attention_mask,
  801. head_mask=head_mask,
  802. encoder_hidden_states=encoder_hidden_states,
  803. encoder_attention_mask=encoder_extended_attention_mask,
  804. output_attentions=output_attentions,
  805. output_hidden_states=output_hidden_states,
  806. return_dict=return_dict,
  807. )
  808. sequence_output = encoder_outputs[0]
  809. pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
  810. if not return_dict:
  811. return (sequence_output, pooled_output) + encoder_outputs[1:]
  812. return BaseModelOutputWithPooling(
  813. last_hidden_state=sequence_output,
  814. pooler_output=pooled_output,
  815. hidden_states=encoder_outputs.hidden_states,
  816. attentions=encoder_outputs.attentions,
  817. )
  818. @add_start_docstrings("""Tapas Model with a `language modeling` head on top.""", TAPAS_START_DOCSTRING)
  819. class TapasForMaskedLM(TapasPreTrainedModel):
  820. _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"]
  821. config_class = TapasConfig
  822. base_model_prefix = "tapas"
  823. def __init__(self, config):
  824. super().__init__(config)
  825. self.tapas = TapasModel(config, add_pooling_layer=False)
  826. self.cls = TapasOnlyMLMHead(config)
  827. # Initialize weights and apply final processing
  828. self.post_init()
  829. def get_output_embeddings(self):
  830. return self.cls.predictions.decoder
  831. def set_output_embeddings(self, new_embeddings):
  832. self.cls.predictions.decoder = new_embeddings
  833. self.cls.predictions.bias = new_embeddings.bias
  834. @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  835. @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC)
  836. def forward(
  837. self,
  838. input_ids: Optional[torch.LongTensor] = None,
  839. attention_mask: Optional[torch.FloatTensor] = None,
  840. token_type_ids: Optional[torch.LongTensor] = None,
  841. position_ids: Optional[torch.LongTensor] = None,
  842. head_mask: Optional[torch.FloatTensor] = None,
  843. inputs_embeds: Optional[torch.FloatTensor] = None,
  844. encoder_hidden_states: Optional[torch.FloatTensor] = None,
  845. encoder_attention_mask: Optional[torch.FloatTensor] = None,
  846. labels: Optional[torch.LongTensor] = None,
  847. output_attentions: Optional[bool] = None,
  848. output_hidden_states: Optional[bool] = None,
  849. return_dict: Optional[bool] = None,
  850. **kwargs,
  851. ) -> Union[Tuple, MaskedLMOutput]:
  852. r"""
  853. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
  854. Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
  855. config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
  856. loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
  857. Returns:
  858. Examples:
  859. ```python
  860. >>> from transformers import AutoTokenizer, TapasForMaskedLM
  861. >>> import pandas as pd
  862. >>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base")
  863. >>> model = TapasForMaskedLM.from_pretrained("google/tapas-base")
  864. >>> data = {
  865. ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
  866. ... "Age": ["56", "45", "59"],
  867. ... "Number of movies": ["87", "53", "69"],
  868. ... }
  869. >>> table = pd.DataFrame.from_dict(data)
  870. >>> inputs = tokenizer(
  871. ... table=table, queries="How many [MASK] has George [MASK] played in?", return_tensors="pt"
  872. ... )
  873. >>> labels = tokenizer(
  874. ... table=table, queries="How many movies has George Clooney played in?", return_tensors="pt"
  875. ... )["input_ids"]
  876. >>> outputs = model(**inputs, labels=labels)
  877. >>> logits = outputs.logits
  878. ```"""
  879. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  880. outputs = self.tapas(
  881. input_ids,
  882. attention_mask=attention_mask,
  883. token_type_ids=token_type_ids,
  884. position_ids=position_ids,
  885. head_mask=head_mask,
  886. inputs_embeds=inputs_embeds,
  887. encoder_hidden_states=encoder_hidden_states,
  888. encoder_attention_mask=encoder_attention_mask,
  889. output_attentions=output_attentions,
  890. output_hidden_states=output_hidden_states,
  891. return_dict=return_dict,
  892. )
  893. sequence_output = outputs[0]
  894. prediction_scores = self.cls(sequence_output)
  895. masked_lm_loss = None
  896. if labels is not None:
  897. loss_fct = CrossEntropyLoss() # -100 index = padding token
  898. masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
  899. if not return_dict:
  900. output = (prediction_scores,) + outputs[2:]
  901. return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
  902. return MaskedLMOutput(
  903. loss=masked_lm_loss,
  904. logits=prediction_scores,
  905. hidden_states=outputs.hidden_states,
  906. attentions=outputs.attentions,
  907. )
  908. @add_start_docstrings(
  909. """
  910. Tapas Model with a cell selection head and optional aggregation head on top for question-answering tasks on tables
  911. (linear layers on top of the hidden-states output to compute `logits` and optional `logits_aggregation`), e.g. for
  912. SQA, WTQ or WikiSQL-supervised tasks.
  913. """,
  914. TAPAS_START_DOCSTRING,
  915. )
  916. class TapasForQuestionAnswering(TapasPreTrainedModel):
  917. def __init__(self, config: TapasConfig):
  918. super().__init__(config)
  919. # base model
  920. self.tapas = TapasModel(config)
  921. # dropout (only used when training)
  922. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  923. # cell selection heads
  924. if config.init_cell_selection_weights_to_zero:
  925. # init_cell_selection_weights_to_zero: Whether the initial weights should be
  926. # set to 0. This ensures that all tokens have the same prior probability.
  927. self.output_weights = nn.Parameter(torch.zeros(config.hidden_size))
  928. self.column_output_weights = nn.Parameter(torch.zeros(config.hidden_size))
  929. else:
  930. self.output_weights = nn.Parameter(torch.empty(config.hidden_size))
  931. nn.init.normal_(
  932. self.output_weights, std=config.initializer_range
  933. ) # here, a truncated normal is used in the original implementation
  934. self.column_output_weights = nn.Parameter(torch.empty(config.hidden_size))
  935. nn.init.normal_(
  936. self.column_output_weights, std=config.initializer_range
  937. ) # here, a truncated normal is used in the original implementation
  938. self.output_bias = nn.Parameter(torch.zeros([]))
  939. self.column_output_bias = nn.Parameter(torch.zeros([]))
  940. # aggregation head
  941. if config.num_aggregation_labels > 0:
  942. self.aggregation_classifier = nn.Linear(config.hidden_size, config.num_aggregation_labels)
  943. # Initialize weights and apply final processing
  944. self.post_init()
  945. @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  946. @replace_return_docstrings(output_type=TableQuestionAnsweringOutput, config_class=_CONFIG_FOR_DOC)
  947. def forward(
  948. self,
  949. input_ids: Optional[torch.LongTensor] = None,
  950. attention_mask: Optional[torch.FloatTensor] = None,
  951. token_type_ids: Optional[torch.LongTensor] = None,
  952. position_ids: Optional[torch.LongTensor] = None,
  953. head_mask: Optional[torch.FloatTensor] = None,
  954. inputs_embeds: Optional[torch.FloatTensor] = None,
  955. table_mask: Optional[torch.LongTensor] = None,
  956. labels: Optional[torch.LongTensor] = None,
  957. aggregation_labels: Optional[torch.LongTensor] = None,
  958. float_answer: Optional[torch.FloatTensor] = None,
  959. numeric_values: Optional[torch.FloatTensor] = None,
  960. numeric_values_scale: Optional[torch.FloatTensor] = None,
  961. output_attentions: Optional[bool] = None,
  962. output_hidden_states: Optional[bool] = None,
  963. return_dict: Optional[bool] = None,
  964. ) -> Union[Tuple, TableQuestionAnsweringOutput]:
  965. r"""
  966. table_mask (`torch.LongTensor` of shape `(batch_size, seq_length)`, *optional*):
  967. Mask for the table. Indicates which tokens belong to the table (1). Question tokens, table headers and
  968. padding are 0.
  969. labels (`torch.LongTensor` of shape `(batch_size, seq_length)`, *optional*):
  970. Labels per token for computing the hierarchical cell selection loss. This encodes the positions of the
  971. answer appearing in the table. Can be obtained using [`AutoTokenizer`].
  972. - 1 for tokens that are **part of the answer**,
  973. - 0 for tokens that are **not part of the answer**.
  974. aggregation_labels (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
  975. Aggregation function index for every example in the batch for computing the aggregation loss. Indices
  976. should be in `[0, ..., config.num_aggregation_labels - 1]`. Only required in case of strong supervision for
  977. aggregation (WikiSQL-supervised).
  978. float_answer (`torch.FloatTensor` of shape `(batch_size, )`, *optional*):
  979. Float answer for every example in the batch. Set to *float('nan')* for cell selection questions. Only
  980. required in case of weak supervision (WTQ) to calculate the aggregate mask and regression loss.
  981. numeric_values (`torch.FloatTensor` of shape `(batch_size, seq_length)`, *optional*):
  982. Numeric values of every token, NaN for tokens which are not numeric values. Can be obtained using
  983. [`AutoTokenizer`]. Only required in case of weak supervision for aggregation (WTQ) to calculate the
  984. regression loss.
  985. numeric_values_scale (`torch.FloatTensor` of shape `(batch_size, seq_length)`, *optional*):
  986. Scale of the numeric values of every token. Can be obtained using [`AutoTokenizer`]. Only required in case
  987. of weak supervision for aggregation (WTQ) to calculate the regression loss.
  988. Returns:
  989. Examples:
  990. ```python
  991. >>> from transformers import AutoTokenizer, TapasForQuestionAnswering
  992. >>> import pandas as pd
  993. >>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base-finetuned-wtq")
  994. >>> model = TapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wtq")
  995. >>> data = {
  996. ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
  997. ... "Age": ["56", "45", "59"],
  998. ... "Number of movies": ["87", "53", "69"],
  999. ... }
  1000. >>> table = pd.DataFrame.from_dict(data)
  1001. >>> queries = ["How many movies has George Clooney played in?", "How old is Brad Pitt?"]
  1002. >>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="pt")
  1003. >>> outputs = model(**inputs)
  1004. >>> logits = outputs.logits
  1005. >>> logits_aggregation = outputs.logits_aggregation
  1006. ```"""
  1007. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1008. outputs = self.tapas(
  1009. input_ids,
  1010. attention_mask=attention_mask,
  1011. token_type_ids=token_type_ids,
  1012. position_ids=position_ids,
  1013. head_mask=head_mask,
  1014. inputs_embeds=inputs_embeds,
  1015. output_attentions=output_attentions,
  1016. output_hidden_states=output_hidden_states,
  1017. return_dict=return_dict,
  1018. )
  1019. sequence_output = outputs[0]
  1020. pooled_output = outputs[1]
  1021. sequence_output = self.dropout(sequence_output)
  1022. if input_ids is not None:
  1023. input_shape = input_ids.size()
  1024. else:
  1025. input_shape = inputs_embeds.size()[:-1]
  1026. device = input_ids.device if input_ids is not None else inputs_embeds.device
  1027. # Construct indices for the table.
  1028. if token_type_ids is None:
  1029. token_type_ids = torch.zeros(
  1030. (*input_shape, len(self.config.type_vocab_sizes)), dtype=torch.long, device=device
  1031. )
  1032. token_types = [
  1033. "segment_ids",
  1034. "column_ids",
  1035. "row_ids",
  1036. "prev_labels",
  1037. "column_ranks",
  1038. "inv_column_ranks",
  1039. "numeric_relations",
  1040. ]
  1041. row_ids = token_type_ids[:, :, token_types.index("row_ids")]
  1042. column_ids = token_type_ids[:, :, token_types.index("column_ids")]
  1043. row_index = IndexMap(
  1044. indices=torch.min(row_ids, torch.as_tensor(self.config.max_num_rows - 1, device=row_ids.device)),
  1045. num_segments=self.config.max_num_rows,
  1046. batch_dims=1,
  1047. )
  1048. col_index = IndexMap(
  1049. indices=torch.min(column_ids, torch.as_tensor(self.config.max_num_columns - 1, device=column_ids.device)),
  1050. num_segments=self.config.max_num_columns,
  1051. batch_dims=1,
  1052. )
  1053. cell_index = ProductIndexMap(row_index, col_index)
  1054. # Masks.
  1055. input_shape = input_ids.size() if input_ids is not None else inputs_embeds.size()[:-1]
  1056. device = input_ids.device if input_ids is not None else inputs_embeds.device
  1057. if attention_mask is None:
  1058. attention_mask = torch.ones(input_shape, device=device)
  1059. # Table cells only, without question tokens and table headers.
  1060. if table_mask is None:
  1061. table_mask = torch.where(row_ids > 0, torch.ones_like(row_ids), torch.zeros_like(row_ids))
  1062. # torch.FloatTensor[batch_size, seq_length]
  1063. input_mask_float = attention_mask.float().to(device)
  1064. table_mask_float = table_mask.float().to(device)
  1065. # Mask for cells that exist in the table (i.e. that are not padding).
  1066. cell_mask, _ = reduce_mean(input_mask_float, cell_index)
  1067. # Compute logits per token. These are used to select individual cells.
  1068. logits = compute_token_logits(sequence_output, self.config.temperature, self.output_weights, self.output_bias)
  1069. # Compute logits per column. These are used to select a column.
  1070. column_logits = None
  1071. if self.config.select_one_column:
  1072. column_logits = compute_column_logits(
  1073. sequence_output,
  1074. self.column_output_weights,
  1075. self.column_output_bias,
  1076. cell_index,
  1077. cell_mask,
  1078. self.config.allow_empty_column_selection,
  1079. )
  1080. # Aggregation logits
  1081. logits_aggregation = None
  1082. if self.config.num_aggregation_labels > 0:
  1083. logits_aggregation = self.aggregation_classifier(pooled_output)
  1084. # Total loss calculation
  1085. total_loss = 0.0
  1086. calculate_loss = False
  1087. if labels is not None:
  1088. calculate_loss = True
  1089. is_supervised = not self.config.num_aggregation_labels > 0 or not self.config.use_answer_as_supervision
  1090. # Semi-supervised cell selection in case of no aggregation:
  1091. # If the answer (the denotation) appears directly in the table we might
  1092. # select the answer without applying any aggregation function. There are
  1093. # some ambiguous cases, see utils._calculate_aggregate_mask for more info.
  1094. # `aggregate_mask` is 1 for examples where we chose to aggregate and 0
  1095. # for examples where we chose to select the answer directly.
  1096. # `labels` encodes the positions of the answer appearing in the table.
  1097. if is_supervised:
  1098. aggregate_mask = None
  1099. else:
  1100. if float_answer is not None:
  1101. assert (
  1102. labels.shape[0] == float_answer.shape[0]
  1103. ), "Make sure the answers are a FloatTensor of shape (batch_size,)"
  1104. # <float32>[batch_size]
  1105. aggregate_mask = _calculate_aggregate_mask(
  1106. float_answer,
  1107. pooled_output,
  1108. self.config.cell_selection_preference,
  1109. labels,
  1110. self.aggregation_classifier,
  1111. )
  1112. else:
  1113. raise ValueError("You have to specify float answers in order to calculate the aggregate mask")
  1114. # Cell selection log-likelihood
  1115. if self.config.average_logits_per_cell:
  1116. logits_per_cell, _ = reduce_mean(logits, cell_index)
  1117. logits = gather(logits_per_cell, cell_index)
  1118. dist_per_token = torch.distributions.Bernoulli(logits=logits)
  1119. # Compute cell selection loss per example.
  1120. selection_loss_per_example = None
  1121. if not self.config.select_one_column:
  1122. weight = torch.where(
  1123. labels == 0,
  1124. torch.ones_like(labels, dtype=torch.float32),
  1125. self.config.positive_label_weight * torch.ones_like(labels, dtype=torch.float32),
  1126. )
  1127. selection_loss_per_token = -dist_per_token.log_prob(labels) * weight
  1128. selection_loss_per_example = torch.sum(selection_loss_per_token * input_mask_float, dim=1) / (
  1129. torch.sum(input_mask_float, dim=1) + EPSILON_ZERO_DIVISION
  1130. )
  1131. else:
  1132. selection_loss_per_example, logits = _single_column_cell_selection_loss(
  1133. logits, column_logits, labels, cell_index, col_index, cell_mask
  1134. )
  1135. dist_per_token = torch.distributions.Bernoulli(logits=logits)
  1136. # Supervised cell selection
  1137. if self.config.disable_per_token_loss:
  1138. pass
  1139. elif is_supervised:
  1140. total_loss += torch.mean(selection_loss_per_example)
  1141. else:
  1142. # For the not supervised case, do not assign loss for cell selection
  1143. total_loss += torch.mean(selection_loss_per_example * (1.0 - aggregate_mask))
  1144. # Semi-supervised regression loss and supervised loss for aggregations
  1145. if self.config.num_aggregation_labels > 0:
  1146. if is_supervised:
  1147. # Note that `aggregate_mask` is None if the setting is supervised.
  1148. if aggregation_labels is not None:
  1149. assert (
  1150. labels.shape[0] == aggregation_labels.shape[0]
  1151. ), "Make sure the aggregation labels are a LongTensor of shape (batch_size,)"
  1152. per_example_additional_loss = _calculate_aggregation_loss(
  1153. logits_aggregation,
  1154. aggregate_mask,
  1155. aggregation_labels,
  1156. self.config.use_answer_as_supervision,
  1157. self.config.num_aggregation_labels,
  1158. self.config.aggregation_loss_weight,
  1159. )
  1160. else:
  1161. raise ValueError(
  1162. "You have to specify aggregation labels in order to calculate the aggregation loss"
  1163. )
  1164. else:
  1165. # Set aggregation labels to zeros
  1166. aggregation_labels = torch.zeros(labels.shape[0], dtype=torch.long, device=labels.device)
  1167. per_example_additional_loss = _calculate_aggregation_loss(
  1168. logits_aggregation,
  1169. aggregate_mask,
  1170. aggregation_labels,
  1171. self.config.use_answer_as_supervision,
  1172. self.config.num_aggregation_labels,
  1173. self.config.aggregation_loss_weight,
  1174. )
  1175. if self.config.use_answer_as_supervision:
  1176. if numeric_values is not None and numeric_values_scale is not None:
  1177. assert numeric_values.shape == numeric_values_scale.shape
  1178. # Add regression loss for numeric answers which require aggregation.
  1179. answer_loss, large_answer_loss_mask = _calculate_regression_loss(
  1180. float_answer,
  1181. aggregate_mask,
  1182. dist_per_token,
  1183. numeric_values,
  1184. numeric_values_scale,
  1185. table_mask_float,
  1186. logits_aggregation,
  1187. self.config,
  1188. )
  1189. per_example_additional_loss += answer_loss
  1190. # Zero loss for examples with answer_loss > cutoff.
  1191. per_example_additional_loss *= large_answer_loss_mask
  1192. else:
  1193. raise ValueError(
  1194. "You have to specify numeric values and numeric values scale in order to calculate the"
  1195. " regression loss"
  1196. )
  1197. total_loss += torch.mean(per_example_additional_loss)
  1198. else:
  1199. # if no label ids are provided, set them to zeros in order to properly compute logits
  1200. labels = torch.zeros_like(logits)
  1201. _, logits = _single_column_cell_selection_loss(
  1202. logits, column_logits, labels, cell_index, col_index, cell_mask
  1203. )
  1204. if not return_dict:
  1205. output = (logits, logits_aggregation) + outputs[2:]
  1206. return ((total_loss,) + output) if calculate_loss else output
  1207. return TableQuestionAnsweringOutput(
  1208. loss=total_loss if calculate_loss else None,
  1209. logits=logits,
  1210. logits_aggregation=logits_aggregation,
  1211. hidden_states=outputs.hidden_states,
  1212. attentions=outputs.attentions,
  1213. )
  1214. @add_start_docstrings(
  1215. """
  1216. Tapas Model with a sequence classification head on top (a linear layer on top of the pooled output), e.g. for table
  1217. entailment tasks, such as TabFact (Chen et al., 2020).
  1218. """,
  1219. TAPAS_START_DOCSTRING,
  1220. )
  1221. class TapasForSequenceClassification(TapasPreTrainedModel):
  1222. def __init__(self, config):
  1223. super().__init__(config)
  1224. self.num_labels = config.num_labels
  1225. self.tapas = TapasModel(config)
  1226. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  1227. self.classifier = nn.Linear(config.hidden_size, config.num_labels)
  1228. # Initialize weights and apply final processing
  1229. self.post_init()
  1230. @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  1231. @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
  1232. def forward(
  1233. self,
  1234. input_ids: Optional[torch.LongTensor] = None,
  1235. attention_mask: Optional[torch.FloatTensor] = None,
  1236. token_type_ids: Optional[torch.LongTensor] = None,
  1237. position_ids: Optional[torch.LongTensor] = None,
  1238. head_mask: Optional[torch.FloatTensor] = None,
  1239. inputs_embeds: Optional[torch.FloatTensor] = None,
  1240. labels: Optional[torch.LongTensor] = None,
  1241. output_attentions: Optional[bool] = None,
  1242. output_hidden_states: Optional[bool] = None,
  1243. return_dict: Optional[bool] = None,
  1244. ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
  1245. r"""
  1246. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  1247. Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
  1248. config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
  1249. `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Note: this is called
  1250. "classification_class_index" in the original implementation.
  1251. Returns:
  1252. Examples:
  1253. ```python
  1254. >>> from transformers import AutoTokenizer, TapasForSequenceClassification
  1255. >>> import torch
  1256. >>> import pandas as pd
  1257. >>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base-finetuned-tabfact")
  1258. >>> model = TapasForSequenceClassification.from_pretrained("google/tapas-base-finetuned-tabfact")
  1259. >>> data = {
  1260. ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
  1261. ... "Age": ["56", "45", "59"],
  1262. ... "Number of movies": ["87", "53", "69"],
  1263. ... }
  1264. >>> table = pd.DataFrame.from_dict(data)
  1265. >>> queries = [
  1266. ... "There is only one actor who is 45 years old",
  1267. ... "There are 3 actors which played in more than 60 movies",
  1268. ... ]
  1269. >>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="pt")
  1270. >>> labels = torch.tensor([1, 0]) # 1 means entailed, 0 means refuted
  1271. >>> outputs = model(**inputs, labels=labels)
  1272. >>> loss = outputs.loss
  1273. >>> logits = outputs.logits
  1274. ```"""
  1275. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1276. outputs = self.tapas(
  1277. input_ids,
  1278. attention_mask=attention_mask,
  1279. token_type_ids=token_type_ids,
  1280. position_ids=position_ids,
  1281. head_mask=head_mask,
  1282. inputs_embeds=inputs_embeds,
  1283. output_attentions=output_attentions,
  1284. output_hidden_states=output_hidden_states,
  1285. return_dict=return_dict,
  1286. )
  1287. pooled_output = outputs[1]
  1288. pooled_output = self.dropout(pooled_output)
  1289. logits = self.classifier(pooled_output)
  1290. loss = None
  1291. if labels is not None:
  1292. if self.config.problem_type is None:
  1293. if self.num_labels == 1:
  1294. self.config.problem_type = "regression"
  1295. elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
  1296. self.config.problem_type = "single_label_classification"
  1297. else:
  1298. self.config.problem_type = "multi_label_classification"
  1299. if self.config.problem_type == "regression":
  1300. loss_fct = MSELoss()
  1301. if self.num_labels == 1:
  1302. loss = loss_fct(logits.squeeze(), labels.squeeze())
  1303. else:
  1304. loss = loss_fct(logits, labels)
  1305. elif self.config.problem_type == "single_label_classification":
  1306. loss_fct = CrossEntropyLoss()
  1307. loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
  1308. elif self.config.problem_type == "multi_label_classification":
  1309. loss_fct = BCEWithLogitsLoss()
  1310. loss = loss_fct(logits, labels)
  1311. if not return_dict:
  1312. output = (logits,) + outputs[2:]
  1313. return ((loss,) + output) if loss is not None else output
  1314. return SequenceClassifierOutput(
  1315. loss=loss,
  1316. logits=logits,
  1317. hidden_states=outputs.hidden_states,
  1318. attentions=outputs.attentions,
  1319. )
  1320. """ TAPAS utilities."""
  1321. class AverageApproximationFunction(str, enum.Enum):
  1322. RATIO = "ratio"
  1323. FIRST_ORDER = "first_order"
  1324. SECOND_ORDER = "second_order"
  1325. # Beginning of everything related to segmented tensors
  1326. class IndexMap:
  1327. """Index grouping entries within a tensor."""
  1328. def __init__(self, indices, num_segments, batch_dims=0):
  1329. """
  1330. Creates an index
  1331. Args:
  1332. indices (`torch.LongTensor`, same shape as a *values* Tensor to which the indices refer):
  1333. Tensor containing the indices.
  1334. num_segments (`torch.LongTensor`):
  1335. Scalar tensor, the number of segments. All elements in a batched segmented tensor must have the same
  1336. number of segments (although many segments can be empty).
  1337. batch_dims (`int`, *optional*, defaults to 0):
  1338. The number of batch dimensions. The first *batch_dims* dimensions of a SegmentedTensor are treated as
  1339. batch dimensions. Segments in different batch elements are always distinct even if they have the same
  1340. index.
  1341. """
  1342. self.indices = torch.as_tensor(indices)
  1343. self.num_segments = torch.as_tensor(num_segments, device=indices.device)
  1344. self.batch_dims = batch_dims
  1345. def batch_shape(self):
  1346. return self.indices.size()[: self.batch_dims] # returns a torch.Size object
  1347. class ProductIndexMap(IndexMap):
  1348. """The product of two indices."""
  1349. def __init__(self, outer_index, inner_index):
  1350. """
  1351. Combines indices i and j into pairs (i, j). The result is an index where each segment (i, j) is the
  1352. intersection of segments i and j. For example if the inputs represent table cells indexed by respectively rows
  1353. and columns the output will be a table indexed by (row, column) pairs, i.e. by cell. The implementation
  1354. combines indices {0, .., n - 1} and {0, .., m - 1} into {0, .., nm - 1}. The output has *num_segments* equal to
  1355. *outer_index.num_segments* * *inner_index.num_segments*
  1356. Args:
  1357. outer_index (`IndexMap`):
  1358. IndexMap.
  1359. inner_index (`IndexMap`):
  1360. IndexMap, must have the same shape as *outer_index*.
  1361. """
  1362. if outer_index.batch_dims != inner_index.batch_dims:
  1363. raise ValueError("outer_index.batch_dims and inner_index.batch_dims must be the same.")
  1364. super().__init__(
  1365. indices=(inner_index.indices + outer_index.indices * inner_index.num_segments),
  1366. num_segments=inner_index.num_segments * outer_index.num_segments,
  1367. batch_dims=inner_index.batch_dims,
  1368. )
  1369. self.outer_index = outer_index
  1370. self.inner_index = inner_index
  1371. def project_outer(self, index):
  1372. """Projects an index with the same index set onto the outer components."""
  1373. indices = torch.div(index.indices, self.inner_index.num_segments, rounding_mode="floor").type(torch.long)
  1374. return IndexMap(indices=indices, num_segments=self.outer_index.num_segments, batch_dims=index.batch_dims)
  1375. def project_inner(self, index):
  1376. """Projects an index with the same index set onto the inner components."""
  1377. return IndexMap(
  1378. indices=torch.fmod(index.indices, self.inner_index.num_segments)
  1379. .type(torch.float)
  1380. .floor()
  1381. .type(torch.long),
  1382. num_segments=self.inner_index.num_segments,
  1383. batch_dims=index.batch_dims,
  1384. )
  1385. def gather(values, index, name="segmented_gather"):
  1386. """
  1387. Gathers from *values* using the index map. For each element in the domain of the index map this operation looks up
  1388. a value for that index in *values*. Two elements from the same segment always get assigned the same value.
  1389. Args:
  1390. values (`torch.Tensor` of shape (B1, ..., Bn, num_segments, V1, ...)):
  1391. Tensor with segment values.
  1392. index (`IndexMap` of shape (B1, ..., Bn, I1, ..., Ik)):
  1393. IndexMap.
  1394. name (`str`, *optional*, defaults to 'segmented_gather'):
  1395. Name for the operation. Currently not used
  1396. Returns:
  1397. `tuple(torch.Tensor)`: Tensor of shape (B1, ..., Bn, I1, ..., Ik, V1, ...) with the gathered values.
  1398. """
  1399. indices = index.indices
  1400. # first, check whether the indices of the index represent scalar values (i.e. not vectorized)
  1401. if len(values.shape[index.batch_dims :]) < 2:
  1402. return torch.gather(
  1403. values,
  1404. index.batch_dims,
  1405. indices.view(
  1406. values.size()[0], -1
  1407. ), # torch.gather expects index to have the same number of dimensions as values
  1408. ).view(indices.size())
  1409. else:
  1410. # this means we have a vectorized version
  1411. # we have to adjust the index
  1412. indices = indices.unsqueeze(-1).expand(values.shape)
  1413. return torch.gather(values, index.batch_dims, indices)
  1414. def flatten(index, name="segmented_flatten"):
  1415. """
  1416. Flattens a batched index map (which is typically of shape batch_size, seq_length) to a 1d index map. This operation
  1417. relabels the segments to keep batch elements distinct. The k-th batch element will have indices shifted by
  1418. *num_segments* * (k - 1). The result is a tensor with *num_segments* multiplied by the number of elements in the
  1419. batch.
  1420. Args:
  1421. index (`IndexMap`):
  1422. IndexMap to flatten.
  1423. name (`str`, *optional*, defaults to 'segmented_flatten'):
  1424. Name for the operation. Currently not used
  1425. Returns:
  1426. (`IndexMap`): The flattened IndexMap.
  1427. """
  1428. # first, get batch_size as scalar tensor
  1429. batch_size = torch.prod(torch.tensor(list(index.batch_shape())))
  1430. # next, create offset as 1-D tensor of length batch_size,
  1431. # and multiply element-wise by num segments (to offset different elements in the batch) e.g. if batch size is 2: [0, 64]
  1432. offset = torch.arange(start=0, end=batch_size, device=index.num_segments.device) * index.num_segments
  1433. offset = offset.view(index.batch_shape())
  1434. for _ in range(index.batch_dims, len(index.indices.size())): # typically range(1,2)
  1435. offset = offset.unsqueeze(-1)
  1436. indices = offset + index.indices
  1437. return IndexMap(indices=indices.view(-1), num_segments=index.num_segments * batch_size, batch_dims=0)
  1438. def range_index_map(batch_shape, num_segments, name="range_index_map"):
  1439. """
  1440. Constructs an index map equal to range(num_segments).
  1441. Args:
  1442. batch_shape (`torch.Size`):
  1443. Batch shape
  1444. num_segments (`int`):
  1445. Number of segments
  1446. name (`str`, *optional*, defaults to 'range_index_map'):
  1447. Name for the operation. Currently not used
  1448. Returns:
  1449. (`IndexMap`): IndexMap of shape batch_shape with elements equal to range(num_segments).
  1450. """
  1451. batch_shape = torch.as_tensor(
  1452. batch_shape, dtype=torch.long
  1453. ) # create a rank 1 tensor vector containing batch_shape (e.g. [2])
  1454. assert len(batch_shape.size()) == 1
  1455. num_segments = torch.as_tensor(num_segments) # create a rank 0 tensor (scalar) containing num_segments (e.g. 64)
  1456. assert len(num_segments.size()) == 0
  1457. indices = torch.arange(
  1458. start=0, end=num_segments, device=num_segments.device
  1459. ) # create a rank 1 vector with num_segments elements
  1460. new_tensor = torch.cat(
  1461. [torch.ones_like(batch_shape, dtype=torch.long, device=num_segments.device), num_segments.unsqueeze(dim=0)],
  1462. dim=0,
  1463. )
  1464. # new_tensor is just a vector of [1 64] for example (assuming only 1 batch dimension)
  1465. new_shape = [int(x) for x in new_tensor.tolist()]
  1466. indices = indices.view(new_shape)
  1467. multiples = torch.cat([batch_shape, torch.as_tensor([1])], dim=0)
  1468. indices = indices.repeat(multiples.tolist())
  1469. # equivalent (in Numpy:)
  1470. # indices = torch.as_tensor(np.tile(indices.numpy(), multiples.tolist()))
  1471. return IndexMap(indices=indices, num_segments=num_segments, batch_dims=list(batch_shape.size())[0])
  1472. def _segment_reduce(values, index, segment_reduce_fn, name):
  1473. """
  1474. Applies a segment reduction segment-wise.
  1475. Args:
  1476. values (`torch.Tensor`):
  1477. Tensor with segment values.
  1478. index (`IndexMap`):
  1479. IndexMap.
  1480. segment_reduce_fn (`str`):
  1481. Name for the reduce operation. One of "sum", "mean", "max" or "min".
  1482. name (`str`):
  1483. Name for the operation. Currently not used
  1484. Returns:
  1485. (`IndexMap`): IndexMap of shape batch_shape with elements equal to range(num_segments).
  1486. """
  1487. # Flatten the batch dimensions, as segments ops (scatter) do not support batching.
  1488. # However if `values` has extra dimensions to the right keep them
  1489. # unflattened. Segmented ops support vector-valued operations.
  1490. flat_index = flatten(index)
  1491. vector_shape = values.size()[len(index.indices.size()) :] # torch.Size object
  1492. flattened_shape = torch.cat(
  1493. [torch.as_tensor([-1], dtype=torch.long), torch.as_tensor(vector_shape, dtype=torch.long)], dim=0
  1494. )
  1495. # changed "view" by "reshape" in the following line
  1496. flat_values = values.reshape(flattened_shape.tolist())
  1497. out = torch.zeros(int(flat_index.num_segments), dtype=torch.float, device=flat_values.device)
  1498. segment_means = out.scatter_reduce(
  1499. dim=0, index=flat_index.indices.long(), src=flat_values.float(), reduce=segment_reduce_fn, include_self=False
  1500. )
  1501. # Unflatten the values.
  1502. new_shape = torch.cat(
  1503. [
  1504. torch.as_tensor(index.batch_shape(), dtype=torch.long),
  1505. torch.as_tensor([index.num_segments], dtype=torch.long),
  1506. torch.as_tensor(vector_shape, dtype=torch.long),
  1507. ],
  1508. dim=0,
  1509. )
  1510. output_values = segment_means.clone().view(new_shape.tolist()).to(values.dtype)
  1511. output_index = range_index_map(index.batch_shape(), index.num_segments)
  1512. return output_values, output_index
  1513. def reduce_sum(values, index, name="segmented_reduce_sum"):
  1514. """
  1515. Sums a tensor over its segments.
  1516. Outputs 0 for empty segments.
  1517. This operations computes the sum over segments, with support for:
  1518. - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices.
  1519. - Vectorization using the last dimension [V1, V2, ...]. If they are present, the output will be a sum of
  1520. vectors rather than scalars. Only the middle dimensions [I1, ..., Ik] are reduced by the operation.
  1521. Args:
  1522. values (`torch.Tensor` of shape [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..]):
  1523. Tensor containing the values of which the sum must be taken segment-wise.
  1524. index (`IndexMap`, indices are of shape [B1, B2, ..., Bn, I1, .., Ik].):
  1525. Index defining the segments.
  1526. name (`str`, *optional*, defaults to 'segmented_reduce_sum'):
  1527. Name for the operation. Currently not used
  1528. Returns:
  1529. output_values (`torch.Tensor`of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..]): Tensor containing the
  1530. output values. output_index (`IndexMap`): IndexMap with shape [B1, B2, ..., Bn, num_segments]. .
  1531. """
  1532. return _segment_reduce(values, index, "sum", name)
  1533. def reduce_mean(values, index, name="segmented_reduce_mean"):
  1534. """
  1535. Averages a tensor over its segments.
  1536. Outputs 0 for empty segments.
  1537. This operations computes the mean over segments, with support for:
  1538. - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices.
  1539. - Vectorization using the last dimension [V1, V2, ...]. If they are present, the output will be a mean of
  1540. vectors rather than scalars.
  1541. Only the middle dimensions [I1, ..., Ik] are reduced by the operation.
  1542. Args:
  1543. values (`torch.Tensor` of shape [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..]):
  1544. Tensor containing the values of which the mean must be taken segment-wise.
  1545. index (`IndexMap`, indices are of shape [B1, B2, ..., Bn, I1, .., Ik].):
  1546. Index defining the segments.
  1547. name (`str`, *optional*, defaults to 'segmented_reduce_sum'):
  1548. Name for the operation. Currently not used
  1549. Returns:
  1550. output_values (`torch.Tensor`of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..]): Tensor containing the
  1551. output values. output_index (`IndexMap`): IndexMap with shape [B1, B2, ..., Bn, num_segments].
  1552. """
  1553. return _segment_reduce(values, index, "mean", name)
  1554. def reduce_max(values, index, name="segmented_reduce_max"):
  1555. """
  1556. Computes the maximum over segments.
  1557. This operation computes the maximum over segments, with support for:
  1558. - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices.
  1559. - Vectorization using the last dimension [V1, V2, ...]. If they are present, the output will be an element-wise
  1560. maximum of vectors rather than scalars.
  1561. Only the middle dimensions [I1, ..., Ik] are reduced by the operation.
  1562. Args:
  1563. values (`torch.Tensor` of shape [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..]):
  1564. Tensor containing the values of which the max must be taken segment-wise.
  1565. index (`IndexMap`, indices are of shape [B1, B2, ..., Bn, I1, .., Ik].):
  1566. Index defining the segments.
  1567. name (`str`, *optional*, defaults to 'segmented_reduce_sum'):
  1568. Name for the operation. Currently not used
  1569. Returns:
  1570. output_values (`torch.Tensor`of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..]): Tensor containing the
  1571. output values. output_index (`IndexMap`): IndexMap with shape [B1, B2, ..., Bn, num_segments].
  1572. """
  1573. return _segment_reduce(values, index, "amax", name)
  1574. def reduce_min(values, index, name="segmented_reduce_min"):
  1575. """
  1576. Computes the minimum over segments.
  1577. This operations computes the minimum over segments, with support for:
  1578. - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices.
  1579. - Vectorization using the last dimension [V1, V2, ...]. If they are present, the output will be an element-wise
  1580. minimum of vectors rather than scalars.
  1581. Only the middle dimensions [I1, ..., Ik] are reduced by the operation.
  1582. Args:
  1583. values (`torch.Tensor` of shape [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..]):
  1584. Tensor containing the values of which the min must be taken segment-wise.
  1585. index (`IndexMap`, indices are of shape [B1, B2, ..., Bn, I1, .., Ik].):
  1586. Index defining the segments.
  1587. name (`str`, *optional*, defaults to 'segmented_reduce_sum'):
  1588. Name for the operation. Currently not used
  1589. Returns:
  1590. output_values (`torch.Tensor`of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..]): Tensor containing the
  1591. output values. output_index (`IndexMap`): IndexMap with shape [B1, B2, ..., Bn, num_segments].
  1592. """
  1593. return _segment_reduce(values, index, "amin", name)
  1594. # End of everything related to segmented tensors
  1595. def compute_column_logits(
  1596. sequence_output, column_output_weights, column_output_bias, cell_index, cell_mask, allow_empty_column_selection
  1597. ):
  1598. """
  1599. Computes the column logits.
  1600. Args:
  1601. sequence_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
  1602. Also known as last_hidden_state. Sequence of hidden-states at the output of the last layer of the model.
  1603. column_output_weights (`torch.FloatTensor` of shape `(hidden_size)`):
  1604. Weights of the linear layer for column selection.
  1605. column_output_bias (`torch.FloatTensor` of shape `()`):
  1606. Bias of the linear layer for column selection.
  1607. cell_index (`ProductIndexMap`):
  1608. Index that groups tokens into cells.
  1609. cell_mask (`torch.FloatTensor` of shape `(batch_size, max_num_rows * max_num_cols)`):
  1610. Mask for cells that exist in the table (i.e. that are not padding).
  1611. allow_empty_column_selection (`bool`):
  1612. Whether to allow not to select any column
  1613. Returns:
  1614. column_logits (`torch.FloatTensor`of shape `(batch_size, max_num_cols)`): Tensor containing the column logits
  1615. for every example in the batch.
  1616. """
  1617. # First, compute the token logits (batch_size, seq_len) - without temperature
  1618. token_logits = torch.einsum("bsj,j->bs", sequence_output, column_output_weights) + column_output_bias
  1619. # Next, average the logits per cell (batch_size, max_num_cols*max_num_rows)
  1620. cell_logits, cell_logits_index = reduce_mean(token_logits, cell_index)
  1621. # Finally, average the logits per column (batch_size, max_num_cols)
  1622. column_index = cell_index.project_inner(cell_logits_index)
  1623. column_logits, out_index = reduce_sum(cell_logits * cell_mask, column_index)
  1624. cell_count, _ = reduce_sum(cell_mask, column_index)
  1625. column_logits /= cell_count + EPSILON_ZERO_DIVISION
  1626. # Mask columns that do not appear in the example.
  1627. is_padding = torch.logical_and(cell_count < 0.5, ~torch.eq(out_index.indices, 0))
  1628. column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * torch.as_tensor(
  1629. is_padding, dtype=torch.float32, device=is_padding.device
  1630. )
  1631. if not allow_empty_column_selection:
  1632. column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * torch.as_tensor(
  1633. torch.eq(out_index.indices, 0), dtype=torch.float32, device=out_index.indices.device
  1634. )
  1635. return column_logits
  1636. def _single_column_cell_selection_loss(token_logits, column_logits, labels, cell_index, col_index, cell_mask):
  1637. """
  1638. Computes the loss for cell selection constrained to a single column. The loss is a hierarchical log-likelihood. The
  1639. model first predicts a column and then selects cells within that column (conditioned on the column). Cells outside
  1640. the selected column are never selected.
  1641. Args:
  1642. token_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
  1643. Tensor containing the logits per token.
  1644. column_logits (`torch.FloatTensor` of shape `(batch_size, max_num_cols)`):
  1645. Tensor containing the logits per column.
  1646. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
  1647. Labels per token.
  1648. cell_index (`ProductIndexMap`):
  1649. Index that groups tokens into cells.
  1650. col_index (`IndexMap`):
  1651. Index that groups tokens into columns.
  1652. cell_mask (`torch.FloatTensor` of shape `(batch_size, max_num_rows * max_num_cols)`):
  1653. Mask for cells that exist in the table (i.e. that are not padding).
  1654. Returns:
  1655. selection_loss_per_example (`torch.FloatTensor` of shape `(batch_size,)`): Loss for each example. logits
  1656. (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): New logits which are only allowed to select
  1657. cells in a single column. Logits outside of the most likely column according to *column_logits* will be set to
  1658. a very low value (such that the probabilities are 0).
  1659. """
  1660. # Part 1: column loss
  1661. # First find the column we should select. We use the column with maximum number of selected cells.
  1662. labels_per_column, _ = reduce_sum(torch.as_tensor(labels, dtype=torch.float32, device=labels.device), col_index)
  1663. # shape of labels_per_column is (batch_size, max_num_cols). It contains the number of label ids for every column, for every example
  1664. column_label = torch.argmax(labels_per_column, dim=-1) # shape (batch_size,)
  1665. # Check if there are no selected cells in the column. In that case the model
  1666. # should predict the special column id 0, which means "select nothing".
  1667. no_cell_selected = torch.eq(
  1668. torch.max(labels_per_column, dim=-1)[0], 0
  1669. ) # no_cell_selected is of shape (batch_size,) and equals True
  1670. # if an example of the batch has no cells selected (i.e. if there are no labels set to 1 for that example)
  1671. column_label = torch.where(
  1672. no_cell_selected.view(column_label.size()), torch.zeros_like(column_label), column_label
  1673. )
  1674. column_dist = torch.distributions.Categorical(logits=column_logits) # shape (batch_size, max_num_cols)
  1675. column_loss_per_example = -column_dist.log_prob(column_label)
  1676. # Part 2: cell loss
  1677. # Reduce the labels and logits to per-cell from per-token.
  1678. # logits_per_cell: shape (batch_size, max_num_rows*max_num_cols) i.e. (batch_size, 64*32)
  1679. logits_per_cell, _ = reduce_mean(token_logits, cell_index)
  1680. # labels_per_cell: shape (batch_size, 64*32), indicating whether each cell should be selected (1) or not (0)
  1681. labels_per_cell, labels_index = reduce_max(
  1682. torch.as_tensor(labels, dtype=torch.long, device=labels.device), cell_index
  1683. )
  1684. # Mask for the selected column.
  1685. # column_id_for_cells: shape (batch_size, 64*32), indicating to which column each cell belongs
  1686. column_id_for_cells = cell_index.project_inner(labels_index).indices
  1687. # column_mask: shape (batch_size, 64*32), equal to 1 if cell belongs to column to be selected
  1688. column_mask = torch.as_tensor(
  1689. torch.eq(column_id_for_cells, torch.unsqueeze(column_label, dim=-1)),
  1690. dtype=torch.float32,
  1691. device=cell_mask.device,
  1692. )
  1693. # Compute the log-likelihood for cells, but only for the selected column.
  1694. cell_dist = torch.distributions.Bernoulli(logits=logits_per_cell) # shape (batch_size, 64*32)
  1695. cell_log_prob = cell_dist.log_prob(labels_per_cell.type(torch.float32)) # shape(batch_size, 64*32)
  1696. cell_loss = -torch.sum(cell_log_prob * column_mask * cell_mask, dim=1)
  1697. # We need to normalize the loss by the number of cells in the column.
  1698. cell_loss /= torch.sum(column_mask * cell_mask, dim=1) + EPSILON_ZERO_DIVISION
  1699. selection_loss_per_example = column_loss_per_example
  1700. selection_loss_per_example += torch.where(
  1701. no_cell_selected.view(selection_loss_per_example.size()),
  1702. torch.zeros_like(selection_loss_per_example),
  1703. cell_loss,
  1704. )
  1705. # Set the probs outside the selected column (selected by the *model*)
  1706. # to 0. This ensures backwards compatibility with models that select
  1707. # cells from multiple columns.
  1708. selected_column_id = torch.as_tensor(
  1709. torch.argmax(column_logits, dim=-1), dtype=torch.long, device=column_logits.device
  1710. ) # shape (batch_size,)
  1711. # selected_column_mask: shape (batch_size, 64*32), equal to 1 if cell belongs to column selected by the model
  1712. selected_column_mask = torch.as_tensor(
  1713. torch.eq(column_id_for_cells, torch.unsqueeze(selected_column_id, dim=-1)),
  1714. dtype=torch.float32,
  1715. device=selected_column_id.device,
  1716. )
  1717. # Never select cells with the special column id 0.
  1718. selected_column_mask = torch.where(
  1719. torch.eq(column_id_for_cells, 0).view(selected_column_mask.size()),
  1720. torch.zeros_like(selected_column_mask),
  1721. selected_column_mask,
  1722. )
  1723. new_logits_per_cell = logits_per_cell + CLOSE_ENOUGH_TO_LOG_ZERO * (1.0 - cell_mask * selected_column_mask)
  1724. logits = gather(new_logits_per_cell, cell_index)
  1725. return selection_loss_per_example, logits
  1726. def compute_token_logits(sequence_output, temperature, output_weights, output_bias):
  1727. """
  1728. Computes logits per token
  1729. Args:
  1730. sequence_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
  1731. Also known as last_hidden_state. Sequence of hidden-states at the output of the last layer of the model.
  1732. temperature (`float`):
  1733. Temperature for the Bernoulli distribution.
  1734. output_weights (`torch.FloatTensor` of shape `(hidden_size,)`):
  1735. Weights of the linear layer for cell selection.
  1736. output_bias (`torch.FloatTensor` of shape `()`):
  1737. Bias of the linear layer for cell selection
  1738. Returns:
  1739. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Logits per token.
  1740. """
  1741. logits = (torch.einsum("bsj,j->bs", sequence_output, output_weights) + output_bias) / temperature
  1742. return logits
  1743. def _calculate_aggregate_mask(answer, pooled_output, cell_selection_preference, labels, aggregation_classifier):
  1744. """
  1745. Finds examples where the model should select cells with no aggregation.
  1746. Returns a mask that determines for which examples should the model select answers directly from the table, without
  1747. any aggregation function. If the answer is a piece of text the case is unambiguous as aggregation functions only
  1748. apply to numbers. If the answer is a number but does not appear in the table then we must use some aggregation
  1749. case. The ambiguous case is when the answer is a number that also appears in the table. In this case we use the
  1750. aggregation function probabilities predicted by the model to decide whether to select or aggregate. The threshold
  1751. for this is a hyperparameter *cell_selection_preference*
  1752. Args:
  1753. answer (`torch.FloatTensor` of shape `(batch_size, )`):
  1754. Answer for every example in the batch. Nan if there is no scalar answer.
  1755. pooled_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
  1756. Output of the pooler (BertPooler) on top of the encoder layer.
  1757. cell_selection_preference (`float`):
  1758. Preference for cell selection in ambiguous cases.
  1759. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
  1760. Labels per token. aggregation_classifier (`torch.nn.Linear`): Aggregation head
  1761. Returns:
  1762. aggregate_mask (`torch.FloatTensor` of shape `(batch_size,)`): A mask set to 1 for examples that should use
  1763. aggregation functions.
  1764. """
  1765. # torch.FloatTensor(batch_size,)
  1766. aggregate_mask_init = torch.logical_not(torch.isnan(answer)).type(torch.FloatTensor).to(answer.device)
  1767. logits_aggregation = aggregation_classifier(pooled_output)
  1768. dist_aggregation = torch.distributions.categorical.Categorical(logits=logits_aggregation)
  1769. # Index 0 corresponds to "no aggregation".
  1770. aggregation_ops_total_mass = torch.sum(dist_aggregation.probs[:, 1:], dim=1)
  1771. # Cell selection examples according to current model.
  1772. is_pred_cell_selection = aggregation_ops_total_mass <= cell_selection_preference
  1773. # Examples with non-empty cell selection supervision.
  1774. is_cell_supervision_available = torch.sum(labels, dim=1) > 0
  1775. # torch.where is not equivalent to tf.where (in tensorflow 1)
  1776. # hence the added .view on the condition to match the shape of the first tensor
  1777. aggregate_mask = torch.where(
  1778. torch.logical_and(is_pred_cell_selection, is_cell_supervision_available).view(aggregate_mask_init.size()),
  1779. torch.zeros_like(aggregate_mask_init, dtype=torch.float32),
  1780. aggregate_mask_init,
  1781. )
  1782. aggregate_mask = aggregate_mask.detach()
  1783. return aggregate_mask
  1784. def _calculate_aggregation_loss_known(
  1785. logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels
  1786. ):
  1787. """
  1788. Calculates aggregation loss when its type is known during training.
  1789. In the weakly supervised setting, the only known information is that for cell selection examples, "no aggregation"
  1790. should be predicted. For other examples (those that require aggregation), no loss is accumulated. In the setting
  1791. where aggregation type is always known, standard cross entropy loss is accumulated for all examples
  1792. Args:
  1793. logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`):
  1794. Logits per aggregation operation.
  1795. aggregate_mask (`torch.FloatTensor` of shape `(batch_size, )`):
  1796. A mask set to 1 for examples that should use aggregation functions.
  1797. aggregation_labels (`torch.LongTensor` of shape `(batch_size, )`):
  1798. Aggregation function id for every example in the batch.
  1799. use_answer_as_supervision (`bool`, *optional*):
  1800. Whether to use the answer as the only supervision for aggregation examples.
  1801. num_aggregation_labels (`int`, *optional*, defaults to 0):
  1802. The number of aggregation operators to predict.
  1803. Returns:
  1804. aggregation_loss_known (`torch.FloatTensor` of shape `(batch_size,)`): Aggregation loss (when its type is known
  1805. during training) per example.
  1806. """
  1807. if use_answer_as_supervision:
  1808. # Prepare "no aggregation" targets for cell selection examples.
  1809. target_aggregation = torch.zeros_like(aggregate_mask, dtype=torch.long)
  1810. else:
  1811. # Use aggregation supervision as the target.
  1812. target_aggregation = aggregation_labels
  1813. one_hot_labels = nn.functional.one_hot(target_aggregation, num_classes=num_aggregation_labels).type(torch.float32)
  1814. log_probs = nn.functional.log_softmax(logits_aggregation, dim=-1)
  1815. # torch.FloatTensor[batch_size]
  1816. per_example_aggregation_intermediate = -torch.sum(one_hot_labels * log_probs, dim=-1)
  1817. if use_answer_as_supervision:
  1818. # Accumulate loss only for examples requiring cell selection
  1819. # (no aggregation).
  1820. return per_example_aggregation_intermediate * (1 - aggregate_mask)
  1821. else:
  1822. return per_example_aggregation_intermediate
  1823. def _calculate_aggregation_loss_unknown(logits_aggregation, aggregate_mask):
  1824. """
  1825. Calculates aggregation loss in the case of answer supervision.
  1826. Args:
  1827. logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`):
  1828. Logits per aggregation operation.
  1829. aggregate_mask (`torch.FloatTensor` of shape `(batch_size, )`):
  1830. A mask set to 1 for examples that should use aggregation functions
  1831. Returns:
  1832. aggregation_loss_unknown (`torch.FloatTensor` of shape `(batch_size,)`): Aggregation loss (in case of answer
  1833. supervision) per example.
  1834. """
  1835. dist_aggregation = torch.distributions.categorical.Categorical(logits=logits_aggregation)
  1836. # Index 0 corresponds to "no aggregation".
  1837. aggregation_ops_total_mass = torch.sum(dist_aggregation.probs[:, 1:], dim=1)
  1838. # Predict some aggregation in case of an answer that needs aggregation.
  1839. # This increases the probability of all aggregation functions, in a way
  1840. # similar to MML, but without considering whether the function gives the
  1841. # correct answer.
  1842. return -torch.log(aggregation_ops_total_mass) * aggregate_mask
  1843. def _calculate_aggregation_loss(
  1844. logits_aggregation,
  1845. aggregate_mask,
  1846. aggregation_labels,
  1847. use_answer_as_supervision,
  1848. num_aggregation_labels,
  1849. aggregation_loss_weight,
  1850. ):
  1851. """
  1852. Calculates the aggregation loss per example.
  1853. Args:
  1854. logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`):
  1855. Logits per aggregation operation.
  1856. aggregate_mask (`torch.FloatTensor` of shape `(batch_size, )`):
  1857. A mask set to 1 for examples that should use aggregation functions.
  1858. aggregation_labels (`torch.LongTensor` of shape `(batch_size, )`):
  1859. Aggregation function id for every example in the batch.
  1860. use_answer_as_supervision (`bool`, *optional*):
  1861. Whether to use the answer as the only supervision for aggregation examples.
  1862. num_aggregation_labels (`int`, *optional*, defaults to 0):
  1863. The number of aggregation operators to predict.
  1864. aggregation_loss_weight (`float`, *optional*, defaults to 1.0):
  1865. Importance weight for the aggregation loss.
  1866. Returns:
  1867. aggregation_loss (`torch.FloatTensor` of shape `(batch_size,)`): Aggregation loss per example.
  1868. """
  1869. per_example_aggregation_loss = _calculate_aggregation_loss_known(
  1870. logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels
  1871. )
  1872. if use_answer_as_supervision:
  1873. # Add aggregation loss for numeric answers that need aggregation.
  1874. per_example_aggregation_loss += _calculate_aggregation_loss_unknown(logits_aggregation, aggregate_mask)
  1875. return aggregation_loss_weight * per_example_aggregation_loss
  1876. def _calculate_expected_result(
  1877. dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config
  1878. ):
  1879. """
  1880. Calculates the expected result given cell and aggregation probabilities.
  1881. Args:
  1882. dist_per_cell (`torch.distributions.Bernoulli`):
  1883. Cell selection distribution for each cell.
  1884. numeric_values (`torch.FloatTensor` of shape `(batch_size, seq_length)`):
  1885. Numeric values of every token. Nan for tokens which are not numeric values.
  1886. numeric_values_scale (`torch.FloatTensor` of shape `(batch_size, seq_length)`):
  1887. Scale of the numeric values of every token.
  1888. input_mask_float (`torch.FloatTensor` of shape `(batch_size, seq_length)`):
  1889. Mask for the table, without question tokens and table headers.
  1890. logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`):
  1891. Logits per aggregation operation.
  1892. config ([`TapasConfig`]):
  1893. Model configuration class with all the hyperparameters of the model
  1894. Returns:
  1895. expected_result (`torch.FloatTensor` of shape `(batch_size,)`): The expected result per example.
  1896. """
  1897. if config.use_gumbel_for_cells:
  1898. gumbel_dist = torch.distributions.RelaxedBernoulli(
  1899. # The token logits where already divided by the temperature and used for
  1900. # computing cell selection errors so we need to multiply it again here
  1901. temperature=config.temperature,
  1902. logits=dist_per_cell.logits * config.temperature,
  1903. )
  1904. scaled_probability_per_cell = gumbel_dist.sample()
  1905. else:
  1906. scaled_probability_per_cell = dist_per_cell.probs
  1907. # <float32>[batch_size, seq_length]
  1908. scaled_probability_per_cell = (scaled_probability_per_cell / numeric_values_scale) * input_mask_float
  1909. count_result = torch.sum(scaled_probability_per_cell, dim=1)
  1910. numeric_values_masked = torch.where(
  1911. torch.isnan(numeric_values), torch.zeros_like(numeric_values), numeric_values
  1912. ) # Mask non-numeric table values to zero.
  1913. sum_result = torch.sum(scaled_probability_per_cell * numeric_values_masked, dim=1)
  1914. avg_approximation = config.average_approximation_function
  1915. if avg_approximation == AverageApproximationFunction.RATIO:
  1916. average_result = sum_result / (count_result + EPSILON_ZERO_DIVISION)
  1917. elif avg_approximation == AverageApproximationFunction.FIRST_ORDER:
  1918. # The sum of all probabilities except that correspond to other cells
  1919. # Ex here stands for expectation, more explicitly the expectation of the sum of N-1 Bernoulli random variables plus
  1920. # the constant 1, which is computed as adding all N expected values and subtracting the extra one. It corresponds to X_c
  1921. # in Appendix D of the original TAPAS paper which is trying to approximate the average of a random set.
  1922. ex = torch.sum(scaled_probability_per_cell, dim=1, keepdim=True) - scaled_probability_per_cell + 1
  1923. average_result = torch.sum(numeric_values_masked * scaled_probability_per_cell / ex, dim=1)
  1924. elif avg_approximation == AverageApproximationFunction.SECOND_ORDER:
  1925. # The sum of all probabilities except that correspond to other cells
  1926. ex = torch.sum(scaled_probability_per_cell, dim=1, keepdim=True) - scaled_probability_per_cell + 1
  1927. pointwise_var = scaled_probability_per_cell * (1 - scaled_probability_per_cell)
  1928. var = torch.sum(pointwise_var, dim=1, keepdim=True) - pointwise_var
  1929. multiplier = (var / torch.square(ex) + 1) / ex
  1930. average_result = torch.sum(numeric_values_masked * scaled_probability_per_cell * multiplier, dim=1)
  1931. else:
  1932. raise ValueError(f"Invalid average_approximation_function: {config.average_approximation_function}")
  1933. if config.use_gumbel_for_aggregation:
  1934. gumbel_dist = torch.distributions.RelaxedOneHotCategorical(
  1935. config.aggregation_temperature, logits=logits_aggregation[:, 1:]
  1936. )
  1937. # <float32>[batch_size, num_aggregation_labels - 1]
  1938. aggregation_op_only_probs = gumbel_dist.sample()
  1939. else:
  1940. # <float32>[batch_size, num_aggregation_labels - 1]
  1941. aggregation_op_only_probs = nn.functional.softmax(
  1942. logits_aggregation[:, 1:] / config.aggregation_temperature, dim=-1
  1943. )
  1944. all_results = torch.cat(
  1945. [
  1946. torch.unsqueeze(sum_result, dim=1),
  1947. torch.unsqueeze(average_result, dim=1),
  1948. torch.unsqueeze(count_result, dim=1),
  1949. ],
  1950. dim=1,
  1951. )
  1952. expected_result = torch.sum(all_results * aggregation_op_only_probs, dim=1)
  1953. return expected_result
  1954. # PyTorch does not currently support Huber loss with custom delta so we define it ourself
  1955. def huber_loss(input, target, delta: float = 1.0):
  1956. errors = torch.abs(input - target) # shape (batch_size,)
  1957. return torch.where(errors < delta, 0.5 * errors**2, errors * delta - (0.5 * delta**2))
  1958. def _calculate_regression_loss(
  1959. answer,
  1960. aggregate_mask,
  1961. dist_per_cell,
  1962. numeric_values,
  1963. numeric_values_scale,
  1964. input_mask_float,
  1965. logits_aggregation,
  1966. config,
  1967. ):
  1968. """
  1969. Calculates the regression loss per example.
  1970. Args:
  1971. answer (`torch.FloatTensor` of shape `(batch_size,)`):
  1972. Answer for every example in the batch. Nan if there is no scalar answer.
  1973. aggregate_mask (`torch.FloatTensor` of shape `(batch_size,)`):
  1974. A mask set to 1 for examples that should use aggregation functions.
  1975. dist_per_cell (`torch.distributions.Bernoulli`):
  1976. Cell selection distribution for each cell.
  1977. numeric_values (`torch.FloatTensor` of shape `(batch_size, seq_length)`):
  1978. Numeric values of every token. Nan for tokens which are not numeric values.
  1979. numeric_values_scale (`torch.FloatTensor` of shape `(batch_size, seq_length)`):
  1980. Scale of the numeric values of every token.
  1981. input_mask_float (`torch.FloatTensor` of shape `(batch_size, seq_length)`):
  1982. Mask for the table, without question tokens and table headers.
  1983. logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`):
  1984. Logits per aggregation operation.
  1985. config ([`TapasConfig`]):
  1986. Model configuration class with all the parameters of the model
  1987. Returns:
  1988. per_example_answer_loss_scaled (`torch.FloatTensor` of shape `(batch_size,)`): Scales answer loss for each
  1989. example in the batch. large_answer_loss_mask (`torch.FloatTensor` of shape `(batch_size,)`): A mask which is 1
  1990. for examples for which their answer loss is larger than the answer_loss_cutoff.
  1991. """
  1992. # float32 (batch_size,)
  1993. expected_result = _calculate_expected_result(
  1994. dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config
  1995. )
  1996. # float32 (batch_size,)
  1997. answer_masked = torch.where(torch.isnan(answer), torch.zeros_like(answer), answer)
  1998. if config.use_normalized_answer_loss:
  1999. normalizer = (torch.max(torch.abs(expected_result), torch.abs(answer_masked)) + EPSILON_ZERO_DIVISION).detach()
  2000. normalized_answer_masked = answer_masked / normalizer
  2001. normalized_expected_result = expected_result / normalizer
  2002. per_example_answer_loss = huber_loss(
  2003. normalized_expected_result * aggregate_mask, normalized_answer_masked * aggregate_mask
  2004. )
  2005. else:
  2006. per_example_answer_loss = huber_loss(
  2007. expected_result * aggregate_mask, answer_masked * aggregate_mask, delta=config.huber_loss_delta
  2008. )
  2009. if config.answer_loss_cutoff is None:
  2010. large_answer_loss_mask = torch.ones_like(per_example_answer_loss, dtype=torch.float32)
  2011. else:
  2012. large_answer_loss_mask = torch.where(
  2013. per_example_answer_loss > config.answer_loss_cutoff,
  2014. torch.zeros_like(per_example_answer_loss, dtype=torch.float32),
  2015. torch.ones_like(per_example_answer_loss, dtype=torch.float32),
  2016. )
  2017. per_example_answer_loss_scaled = config.answer_loss_importance * (per_example_answer_loss * aggregate_mask)
  2018. return per_example_answer_loss_scaled, large_answer_loss_mask