tokenization_tapas.py 116 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789
  1. # coding=utf-8
  2. # Copyright 2020 Google Research and The HuggingFace Inc. team.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """Tokenization class for TAPAS model."""
  16. import collections
  17. import datetime
  18. import enum
  19. import itertools
  20. import math
  21. import os
  22. import re
  23. import unicodedata
  24. from dataclasses import dataclass
  25. from typing import Callable, Dict, Generator, List, Optional, Tuple, Union
  26. import numpy as np
  27. from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
  28. from ...tokenization_utils_base import (
  29. ENCODE_KWARGS_DOCSTRING,
  30. VERY_LARGE_INTEGER,
  31. BatchEncoding,
  32. EncodedInput,
  33. PreTokenizedInput,
  34. TextInput,
  35. )
  36. from ...utils import ExplicitEnum, PaddingStrategy, TensorType, add_end_docstrings, is_pandas_available, logging
  37. if is_pandas_available():
  38. import pandas as pd
  39. logger = logging.get_logger(__name__)
  40. VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
  41. class TapasTruncationStrategy(ExplicitEnum):
  42. """
  43. Possible values for the `truncation` argument in [`~TapasTokenizer.__call__`]. Useful for tab-completion in an IDE.
  44. """
  45. DROP_ROWS_TO_FIT = "drop_rows_to_fit"
  46. DO_NOT_TRUNCATE = "do_not_truncate"
  47. TableValue = collections.namedtuple("TokenValue", ["token", "column_id", "row_id"])
  48. @dataclass(frozen=True)
  49. class TokenCoordinates:
  50. column_index: int
  51. row_index: int
  52. token_index: int
  53. @dataclass
  54. class TokenizedTable:
  55. rows: List[List[List[str]]]
  56. selected_tokens: List[TokenCoordinates]
  57. @dataclass(frozen=True)
  58. class SerializedExample:
  59. tokens: List[str]
  60. column_ids: List[int]
  61. row_ids: List[int]
  62. segment_ids: List[int]
  63. def _is_inner_wordpiece(token: str):
  64. return token.startswith("##")
  65. def load_vocab(vocab_file):
  66. """Loads a vocabulary file into a dictionary."""
  67. vocab = collections.OrderedDict()
  68. with open(vocab_file, "r", encoding="utf-8") as reader:
  69. tokens = reader.readlines()
  70. for index, token in enumerate(tokens):
  71. token = token.rstrip("\n")
  72. vocab[token] = index
  73. return vocab
  74. def whitespace_tokenize(text):
  75. """Runs basic whitespace cleaning and splitting on a piece of text."""
  76. text = text.strip()
  77. if not text:
  78. return []
  79. tokens = text.split()
  80. return tokens
  81. TAPAS_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r"""
  82. add_special_tokens (`bool`, *optional*, defaults to `True`):
  83. Whether or not to encode the sequences with the special tokens relative to their model.
  84. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
  85. Activates and controls padding. Accepts the following values:
  86. - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
  87. sequence if provided).
  88. - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
  89. acceptable input length for the model if that argument is not provided.
  90. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
  91. lengths).
  92. truncation (`bool`, `str` or [`TapasTruncationStrategy`], *optional*, defaults to `False`):
  93. Activates and controls truncation. Accepts the following values:
  94. - `True` or `'drop_rows_to_fit'`: Truncate to a maximum length specified with the argument `max_length`
  95. or to the maximum acceptable input length for the model if that argument is not provided. This will
  96. truncate row by row, removing rows from the table.
  97. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
  98. greater than the model maximum admissible input size).
  99. max_length (`int`, *optional*):
  100. Controls the maximum length to use by one of the truncation/padding parameters.
  101. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
  102. is required by one of the truncation/padding parameters. If the model has no specific maximum input
  103. length (like XLNet) truncation/padding to a maximum length will be deactivated.
  104. is_split_into_words (`bool`, *optional*, defaults to `False`):
  105. Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the
  106. tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace)
  107. which it will tokenize. This is useful for NER or token classification.
  108. pad_to_multiple_of (`int`, *optional*):
  109. If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
  110. the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
  111. return_tensors (`str` or [`~utils.TensorType`], *optional*):
  112. If set, will return tensors instead of list of python integers. Acceptable values are:
  113. - `'tf'`: Return TensorFlow `tf.constant` objects.
  114. - `'pt'`: Return PyTorch `torch.Tensor` objects.
  115. - `'np'`: Return Numpy `np.ndarray` objects.
  116. """
  117. class TapasTokenizer(PreTrainedTokenizer):
  118. r"""
  119. Construct a TAPAS tokenizer. Based on WordPiece. Flattens a table and one or more related sentences to be used by
  120. TAPAS models.
  121. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
  122. this superclass for more information regarding those methods. [`TapasTokenizer`] creates several token type ids to
  123. encode tabular structure. To be more precise, it adds 7 token type ids, in the following order: `segment_ids`,
  124. `column_ids`, `row_ids`, `prev_labels`, `column_ranks`, `inv_column_ranks` and `numeric_relations`:
  125. - segment_ids: indicate whether a token belongs to the question (0) or the table (1). 0 for special tokens and
  126. padding.
  127. - column_ids: indicate to which column of the table a token belongs (starting from 1). Is 0 for all question
  128. tokens, special tokens and padding.
  129. - row_ids: indicate to which row of the table a token belongs (starting from 1). Is 0 for all question tokens,
  130. special tokens and padding. Tokens of column headers are also 0.
  131. - prev_labels: indicate whether a token was (part of) an answer to the previous question (1) or not (0). Useful in
  132. a conversational setup (such as SQA).
  133. - column_ranks: indicate the rank of a table token relative to a column, if applicable. For example, if you have a
  134. column "number of movies" with values 87, 53 and 69, then the column ranks of these tokens are 3, 1 and 2
  135. respectively. 0 for all question tokens, special tokens and padding.
  136. - inv_column_ranks: indicate the inverse rank of a table token relative to a column, if applicable. For example, if
  137. you have a column "number of movies" with values 87, 53 and 69, then the inverse column ranks of these tokens are
  138. 1, 3 and 2 respectively. 0 for all question tokens, special tokens and padding.
  139. - numeric_relations: indicate numeric relations between the question and the tokens of the table. 0 for all
  140. question tokens, special tokens and padding.
  141. [`TapasTokenizer`] runs end-to-end tokenization on a table and associated sentences: punctuation splitting and
  142. wordpiece.
  143. Args:
  144. vocab_file (`str`):
  145. File containing the vocabulary.
  146. do_lower_case (`bool`, *optional*, defaults to `True`):
  147. Whether or not to lowercase the input when tokenizing.
  148. do_basic_tokenize (`bool`, *optional*, defaults to `True`):
  149. Whether or not to do basic tokenization before WordPiece.
  150. never_split (`Iterable`, *optional*):
  151. Collection of tokens which will never be split during tokenization. Only has an effect when
  152. `do_basic_tokenize=True`
  153. unk_token (`str`, *optional*, defaults to `"[UNK]"`):
  154. The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
  155. token instead.
  156. sep_token (`str`, *optional*, defaults to `"[SEP]"`):
  157. The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
  158. sequence classification or for a text and a question for question answering. It is also used as the last
  159. token of a sequence built with special tokens.
  160. pad_token (`str`, *optional*, defaults to `"[PAD]"`):
  161. The token used for padding, for example when batching sequences of different lengths.
  162. cls_token (`str`, *optional*, defaults to `"[CLS]"`):
  163. The classifier token which is used when doing sequence classification (classification of the whole sequence
  164. instead of per-token classification). It is the first token of the sequence when built with special tokens.
  165. mask_token (`str`, *optional*, defaults to `"[MASK]"`):
  166. The token used for masking values. This is the token used when training this model with masked language
  167. modeling. This is the token which the model will try to predict.
  168. empty_token (`str`, *optional*, defaults to `"[EMPTY]"`):
  169. The token used for empty cell values in a table. Empty cell values include "", "n/a", "nan" and "?".
  170. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
  171. Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this
  172. [issue](https://github.com/huggingface/transformers/issues/328)).
  173. strip_accents (`bool`, *optional*):
  174. Whether or not to strip all accents. If this option is not specified, then it will be determined by the
  175. value for `lowercase` (as in the original BERT).
  176. cell_trim_length (`int`, *optional*, defaults to -1):
  177. If > 0: Trim cells so that the length is <= this value. Also disables further cell trimming, should thus be
  178. used with `truncation` set to `True`.
  179. max_column_id (`int`, *optional*):
  180. Max column id to extract.
  181. max_row_id (`int`, *optional*):
  182. Max row id to extract.
  183. strip_column_names (`bool`, *optional*, defaults to `False`):
  184. Whether to add empty strings instead of column names.
  185. update_answer_coordinates (`bool`, *optional*, defaults to `False`):
  186. Whether to recompute the answer coordinates from the answer text.
  187. min_question_length (`int`, *optional*):
  188. Minimum length of each question in terms of tokens (will be skipped otherwise).
  189. max_question_length (`int`, *optional*):
  190. Maximum length of each question in terms of tokens (will be skipped otherwise).
  191. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`):
  192. Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
  193. extra spaces.
  194. """
  195. vocab_files_names = VOCAB_FILES_NAMES
  196. def __init__(
  197. self,
  198. vocab_file,
  199. do_lower_case=True,
  200. do_basic_tokenize=True,
  201. never_split=None,
  202. unk_token="[UNK]",
  203. sep_token="[SEP]",
  204. pad_token="[PAD]",
  205. cls_token="[CLS]",
  206. mask_token="[MASK]",
  207. empty_token="[EMPTY]",
  208. tokenize_chinese_chars=True,
  209. strip_accents=None,
  210. cell_trim_length: int = -1,
  211. max_column_id: int = None,
  212. max_row_id: int = None,
  213. strip_column_names: bool = False,
  214. update_answer_coordinates: bool = False,
  215. min_question_length=None,
  216. max_question_length=None,
  217. model_max_length: int = 512,
  218. additional_special_tokens: Optional[List[str]] = None,
  219. clean_up_tokenization_spaces=True,
  220. **kwargs,
  221. ):
  222. if not is_pandas_available():
  223. raise ImportError("Pandas is required for the TAPAS tokenizer.")
  224. if additional_special_tokens is not None:
  225. if empty_token not in additional_special_tokens:
  226. additional_special_tokens.append(empty_token)
  227. else:
  228. additional_special_tokens = [empty_token]
  229. if not os.path.isfile(vocab_file):
  230. raise ValueError(
  231. f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
  232. " model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
  233. )
  234. self.vocab = load_vocab(vocab_file)
  235. self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
  236. self.do_basic_tokenize = do_basic_tokenize
  237. if do_basic_tokenize:
  238. self.basic_tokenizer = BasicTokenizer(
  239. do_lower_case=do_lower_case,
  240. never_split=never_split,
  241. tokenize_chinese_chars=tokenize_chinese_chars,
  242. strip_accents=strip_accents,
  243. )
  244. self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
  245. # Additional properties
  246. self.cell_trim_length = cell_trim_length
  247. self.max_column_id = (
  248. max_column_id
  249. if max_column_id is not None
  250. else model_max_length
  251. if model_max_length is not None
  252. else VERY_LARGE_INTEGER
  253. )
  254. self.max_row_id = (
  255. max_row_id
  256. if max_row_id is not None
  257. else model_max_length
  258. if model_max_length is not None
  259. else VERY_LARGE_INTEGER
  260. )
  261. self.strip_column_names = strip_column_names
  262. self.update_answer_coordinates = update_answer_coordinates
  263. self.min_question_length = min_question_length
  264. self.max_question_length = max_question_length
  265. super().__init__(
  266. do_lower_case=do_lower_case,
  267. do_basic_tokenize=do_basic_tokenize,
  268. never_split=never_split,
  269. unk_token=unk_token,
  270. sep_token=sep_token,
  271. pad_token=pad_token,
  272. cls_token=cls_token,
  273. mask_token=mask_token,
  274. empty_token=empty_token,
  275. tokenize_chinese_chars=tokenize_chinese_chars,
  276. strip_accents=strip_accents,
  277. cell_trim_length=cell_trim_length,
  278. max_column_id=max_column_id,
  279. max_row_id=max_row_id,
  280. strip_column_names=strip_column_names,
  281. update_answer_coordinates=update_answer_coordinates,
  282. min_question_length=min_question_length,
  283. max_question_length=max_question_length,
  284. model_max_length=model_max_length,
  285. additional_special_tokens=additional_special_tokens,
  286. clean_up_tokenization_spaces=clean_up_tokenization_spaces,
  287. **kwargs,
  288. )
  289. @property
  290. def do_lower_case(self):
  291. return self.basic_tokenizer.do_lower_case
  292. @property
  293. def vocab_size(self):
  294. return len(self.vocab)
  295. def get_vocab(self):
  296. return dict(self.vocab, **self.added_tokens_encoder)
  297. def _tokenize(self, text):
  298. if format_text(text) == EMPTY_TEXT:
  299. return [self.additional_special_tokens[0]]
  300. split_tokens = []
  301. if self.do_basic_tokenize:
  302. for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
  303. # If the token is part of the never_split set
  304. if token in self.basic_tokenizer.never_split:
  305. split_tokens.append(token)
  306. else:
  307. split_tokens += self.wordpiece_tokenizer.tokenize(token)
  308. else:
  309. split_tokens = self.wordpiece_tokenizer.tokenize(text)
  310. return split_tokens
  311. def _convert_token_to_id(self, token):
  312. """Converts a token (str) in an id using the vocab."""
  313. return self.vocab.get(token, self.vocab.get(self.unk_token))
  314. def _convert_id_to_token(self, index):
  315. """Converts an index (integer) in a token (str) using the vocab."""
  316. return self.ids_to_tokens.get(index, self.unk_token)
  317. def convert_tokens_to_string(self, tokens):
  318. """Converts a sequence of tokens (string) in a single string."""
  319. out_string = " ".join(tokens).replace(" ##", "").strip()
  320. return out_string
  321. def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
  322. index = 0
  323. if os.path.isdir(save_directory):
  324. vocab_file = os.path.join(
  325. save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
  326. )
  327. else:
  328. vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
  329. with open(vocab_file, "w", encoding="utf-8") as writer:
  330. for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
  331. if index != token_index:
  332. logger.warning(
  333. f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
  334. " Please check that the vocabulary is not corrupted!"
  335. )
  336. index = token_index
  337. writer.write(token + "\n")
  338. index += 1
  339. return (vocab_file,)
  340. def create_attention_mask_from_sequences(self, query_ids: List[int], table_values: List[TableValue]) -> List[int]:
  341. """
  342. Creates the attention mask according to the query token IDs and a list of table values.
  343. Args:
  344. query_ids (`List[int]`): list of token IDs corresponding to the ID.
  345. table_values (`List[TableValue]`): lift of table values, which are named tuples containing the
  346. token value, the column ID and the row ID of said token.
  347. Returns:
  348. `List[int]`: List of ints containing the attention mask values.
  349. """
  350. return [1] * (1 + len(query_ids) + 1 + len(table_values))
  351. def create_segment_token_type_ids_from_sequences(
  352. self, query_ids: List[int], table_values: List[TableValue]
  353. ) -> List[int]:
  354. """
  355. Creates the segment token type IDs according to the query token IDs and a list of table values.
  356. Args:
  357. query_ids (`List[int]`): list of token IDs corresponding to the ID.
  358. table_values (`List[TableValue]`): lift of table values, which are named tuples containing the
  359. token value, the column ID and the row ID of said token.
  360. Returns:
  361. `List[int]`: List of ints containing the segment token type IDs values.
  362. """
  363. table_ids = list(zip(*table_values))[0] if table_values else []
  364. return [0] * (1 + len(query_ids) + 1) + [1] * len(table_ids)
  365. def create_column_token_type_ids_from_sequences(
  366. self, query_ids: List[int], table_values: List[TableValue]
  367. ) -> List[int]:
  368. """
  369. Creates the column token type IDs according to the query token IDs and a list of table values.
  370. Args:
  371. query_ids (`List[int]`): list of token IDs corresponding to the ID.
  372. table_values (`List[TableValue]`): lift of table values, which are named tuples containing the
  373. token value, the column ID and the row ID of said token.
  374. Returns:
  375. `List[int]`: List of ints containing the column token type IDs values.
  376. """
  377. table_column_ids = list(zip(*table_values))[1] if table_values else []
  378. return [0] * (1 + len(query_ids) + 1) + list(table_column_ids)
  379. def create_row_token_type_ids_from_sequences(
  380. self, query_ids: List[int], table_values: List[TableValue]
  381. ) -> List[int]:
  382. """
  383. Creates the row token type IDs according to the query token IDs and a list of table values.
  384. Args:
  385. query_ids (`List[int]`): list of token IDs corresponding to the ID.
  386. table_values (`List[TableValue]`): lift of table values, which are named tuples containing the
  387. token value, the column ID and the row ID of said token.
  388. Returns:
  389. `List[int]`: List of ints containing the row token type IDs values.
  390. """
  391. table_row_ids = list(zip(*table_values))[2] if table_values else []
  392. return [0] * (1 + len(query_ids) + 1) + list(table_row_ids)
  393. def build_inputs_with_special_tokens(
  394. self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
  395. ) -> List[int]:
  396. """
  397. Build model inputs from a question and flattened table for question answering or sequence classification tasks
  398. by concatenating and adding special tokens.
  399. Args:
  400. token_ids_0 (`List[int]`): The ids of the question.
  401. token_ids_1 (`List[int]`, *optional*): The ids of the flattened table.
  402. Returns:
  403. `List[int]`: The model input with special tokens.
  404. """
  405. if token_ids_1 is None:
  406. raise ValueError("With TAPAS, you must provide both question IDs and table IDs.")
  407. return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] + token_ids_1
  408. def get_special_tokens_mask(
  409. self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
  410. ) -> List[int]:
  411. """
  412. Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
  413. special tokens using the tokenizer `prepare_for_model` method.
  414. Args:
  415. token_ids_0 (`List[int]`):
  416. List of question IDs.
  417. token_ids_1 (`List[int]`, *optional*):
  418. List of flattened table IDs.
  419. already_has_special_tokens (`bool`, *optional*, defaults to `False`):
  420. Whether or not the token list is already formatted with special tokens for the model.
  421. Returns:
  422. `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
  423. """
  424. if already_has_special_tokens:
  425. return super().get_special_tokens_mask(
  426. token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
  427. )
  428. if token_ids_1 is not None:
  429. return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1))
  430. return [1] + ([0] * len(token_ids_0)) + [1]
  431. @add_end_docstrings(TAPAS_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
  432. def __call__(
  433. self,
  434. table: "pd.DataFrame",
  435. queries: Optional[
  436. Union[
  437. TextInput,
  438. PreTokenizedInput,
  439. EncodedInput,
  440. List[TextInput],
  441. List[PreTokenizedInput],
  442. List[EncodedInput],
  443. ]
  444. ] = None,
  445. answer_coordinates: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,
  446. answer_text: Optional[Union[List[TextInput], List[List[TextInput]]]] = None,
  447. add_special_tokens: bool = True,
  448. padding: Union[bool, str, PaddingStrategy] = False,
  449. truncation: Union[bool, str, TapasTruncationStrategy] = False,
  450. max_length: Optional[int] = None,
  451. pad_to_multiple_of: Optional[int] = None,
  452. padding_side: Optional[bool] = None,
  453. return_tensors: Optional[Union[str, TensorType]] = None,
  454. return_token_type_ids: Optional[bool] = None,
  455. return_attention_mask: Optional[bool] = None,
  456. return_overflowing_tokens: bool = False,
  457. return_special_tokens_mask: bool = False,
  458. return_offsets_mapping: bool = False,
  459. return_length: bool = False,
  460. verbose: bool = True,
  461. **kwargs,
  462. ) -> BatchEncoding:
  463. """
  464. Main method to tokenize and prepare for the model one or several sequence(s) related to a table.
  465. Args:
  466. table (`pd.DataFrame`):
  467. Table containing tabular data. Note that all cell values must be text. Use *.astype(str)* on a Pandas
  468. dataframe to convert it to string.
  469. queries (`str` or `List[str]`):
  470. Question or batch of questions related to a table to be encoded. Note that in case of a batch, all
  471. questions must refer to the **same** table.
  472. answer_coordinates (`List[Tuple]` or `List[List[Tuple]]`, *optional*):
  473. Answer coordinates of each table-question pair in the batch. In case only a single table-question pair
  474. is provided, then the answer_coordinates must be a single list of one or more tuples. Each tuple must
  475. be a (row_index, column_index) pair. The first data row (not the column header row) has index 0. The
  476. first column has index 0. In case a batch of table-question pairs is provided, then the
  477. answer_coordinates must be a list of lists of tuples (each list corresponding to a single
  478. table-question pair).
  479. answer_text (`List[str]` or `List[List[str]]`, *optional*):
  480. Answer text of each table-question pair in the batch. In case only a single table-question pair is
  481. provided, then the answer_text must be a single list of one or more strings. Each string must be the
  482. answer text of a corresponding answer coordinate. In case a batch of table-question pairs is provided,
  483. then the answer_coordinates must be a list of lists of strings (each list corresponding to a single
  484. table-question pair).
  485. """
  486. assert isinstance(table, pd.DataFrame), "Table must be of type pd.DataFrame"
  487. # Input type checking for clearer error
  488. valid_query = False
  489. # Check that query has a valid type
  490. if queries is None or isinstance(queries, str):
  491. valid_query = True
  492. elif isinstance(queries, (list, tuple)):
  493. if len(queries) == 0 or isinstance(queries[0], str):
  494. valid_query = True
  495. if not valid_query:
  496. raise ValueError(
  497. "queries input must of type `str` (single example), `List[str]` (batch or single pretokenized"
  498. " example). "
  499. )
  500. is_batched = isinstance(queries, (list, tuple))
  501. if is_batched:
  502. return self.batch_encode_plus(
  503. table=table,
  504. queries=queries,
  505. answer_coordinates=answer_coordinates,
  506. answer_text=answer_text,
  507. add_special_tokens=add_special_tokens,
  508. padding=padding,
  509. truncation=truncation,
  510. max_length=max_length,
  511. pad_to_multiple_of=pad_to_multiple_of,
  512. padding_side=padding_side,
  513. return_tensors=return_tensors,
  514. return_token_type_ids=return_token_type_ids,
  515. return_attention_mask=return_attention_mask,
  516. return_overflowing_tokens=return_overflowing_tokens,
  517. return_special_tokens_mask=return_special_tokens_mask,
  518. return_offsets_mapping=return_offsets_mapping,
  519. return_length=return_length,
  520. verbose=verbose,
  521. **kwargs,
  522. )
  523. else:
  524. return self.encode_plus(
  525. table=table,
  526. query=queries,
  527. answer_coordinates=answer_coordinates,
  528. answer_text=answer_text,
  529. add_special_tokens=add_special_tokens,
  530. padding=padding,
  531. truncation=truncation,
  532. max_length=max_length,
  533. pad_to_multiple_of=pad_to_multiple_of,
  534. padding_side=padding_side,
  535. return_tensors=return_tensors,
  536. return_token_type_ids=return_token_type_ids,
  537. return_attention_mask=return_attention_mask,
  538. return_overflowing_tokens=return_overflowing_tokens,
  539. return_special_tokens_mask=return_special_tokens_mask,
  540. return_offsets_mapping=return_offsets_mapping,
  541. return_length=return_length,
  542. verbose=verbose,
  543. **kwargs,
  544. )
  545. @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPAS_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
  546. def batch_encode_plus(
  547. self,
  548. table: "pd.DataFrame",
  549. queries: Optional[
  550. Union[
  551. List[TextInput],
  552. List[PreTokenizedInput],
  553. List[EncodedInput],
  554. ]
  555. ] = None,
  556. answer_coordinates: Optional[List[List[Tuple]]] = None,
  557. answer_text: Optional[List[List[TextInput]]] = None,
  558. add_special_tokens: bool = True,
  559. padding: Union[bool, str, PaddingStrategy] = False,
  560. truncation: Union[bool, str, TapasTruncationStrategy] = False,
  561. max_length: Optional[int] = None,
  562. pad_to_multiple_of: Optional[int] = None,
  563. padding_side: Optional[bool] = None,
  564. return_tensors: Optional[Union[str, TensorType]] = None,
  565. return_token_type_ids: Optional[bool] = None,
  566. return_attention_mask: Optional[bool] = None,
  567. return_overflowing_tokens: bool = False,
  568. return_special_tokens_mask: bool = False,
  569. return_offsets_mapping: bool = False,
  570. return_length: bool = False,
  571. verbose: bool = True,
  572. **kwargs,
  573. ) -> BatchEncoding:
  574. """
  575. Prepare a table and a list of strings for the model.
  576. <Tip warning={true}>
  577. This method is deprecated, `__call__` should be used instead.
  578. </Tip>
  579. Args:
  580. table (`pd.DataFrame`):
  581. Table containing tabular data. Note that all cell values must be text. Use *.astype(str)* on a Pandas
  582. dataframe to convert it to string.
  583. queries (`List[str]`):
  584. Batch of questions related to a table to be encoded. Note that all questions must refer to the **same**
  585. table.
  586. answer_coordinates (`List[Tuple]` or `List[List[Tuple]]`, *optional*):
  587. Answer coordinates of each table-question pair in the batch. Each tuple must be a (row_index,
  588. column_index) pair. The first data row (not the column header row) has index 0. The first column has
  589. index 0. The answer_coordinates must be a list of lists of tuples (each list corresponding to a single
  590. table-question pair).
  591. answer_text (`List[str]` or `List[List[str]]`, *optional*):
  592. Answer text of each table-question pair in the batch. In case a batch of table-question pairs is
  593. provided, then the answer_coordinates must be a list of lists of strings (each list corresponding to a
  594. single table-question pair). Each string must be the answer text of a corresponding answer coordinate.
  595. """
  596. if return_token_type_ids is not None and not add_special_tokens:
  597. raise ValueError(
  598. "Asking to return token_type_ids while setting add_special_tokens to False "
  599. "results in an undefined behavior. Please set add_special_tokens to True or "
  600. "set return_token_type_ids to None."
  601. )
  602. if (answer_coordinates and not answer_text) or (not answer_coordinates and answer_text):
  603. raise ValueError("In case you provide answers, both answer_coordinates and answer_text should be provided")
  604. elif answer_coordinates is None and answer_text is None:
  605. answer_coordinates = answer_text = [None] * len(queries)
  606. if "is_split_into_words" in kwargs:
  607. raise NotImplementedError("Currently TapasTokenizer only supports questions as strings.")
  608. if return_offsets_mapping:
  609. raise NotImplementedError(
  610. "return_offset_mapping is not available when using Python tokenizers. "
  611. "To use this feature, change your tokenizer to one deriving from "
  612. "transformers.PreTrainedTokenizerFast."
  613. )
  614. return self._batch_encode_plus(
  615. table=table,
  616. queries=queries,
  617. answer_coordinates=answer_coordinates,
  618. answer_text=answer_text,
  619. add_special_tokens=add_special_tokens,
  620. padding=padding,
  621. truncation=truncation,
  622. max_length=max_length,
  623. pad_to_multiple_of=pad_to_multiple_of,
  624. padding_side=padding_side,
  625. return_tensors=return_tensors,
  626. return_token_type_ids=return_token_type_ids,
  627. return_attention_mask=return_attention_mask,
  628. return_overflowing_tokens=return_overflowing_tokens,
  629. return_special_tokens_mask=return_special_tokens_mask,
  630. return_offsets_mapping=return_offsets_mapping,
  631. return_length=return_length,
  632. verbose=verbose,
  633. **kwargs,
  634. )
  635. def _get_question_tokens(self, query):
  636. """Tokenizes the query, taking into account the max and min question length."""
  637. query_tokens = self.tokenize(query)
  638. if self.max_question_length is not None and len(query_tokens) > self.max_question_length:
  639. logger.warning("Skipping query as its tokens are longer than the max question length")
  640. return "", []
  641. if self.min_question_length is not None and len(query_tokens) < self.min_question_length:
  642. logger.warning("Skipping query as its tokens are shorter than the min question length")
  643. return "", []
  644. return query, query_tokens
  645. def _batch_encode_plus(
  646. self,
  647. table,
  648. queries: Union[
  649. List[TextInput],
  650. List[PreTokenizedInput],
  651. List[EncodedInput],
  652. ],
  653. answer_coordinates: Optional[List[List[Tuple]]] = None,
  654. answer_text: Optional[List[List[TextInput]]] = None,
  655. add_special_tokens: bool = True,
  656. padding: Union[bool, str, PaddingStrategy] = False,
  657. truncation: Union[bool, str, TapasTruncationStrategy] = False,
  658. max_length: Optional[int] = None,
  659. pad_to_multiple_of: Optional[int] = None,
  660. padding_side: Optional[bool] = None,
  661. return_tensors: Optional[Union[str, TensorType]] = None,
  662. return_token_type_ids: Optional[bool] = True,
  663. return_attention_mask: Optional[bool] = None,
  664. return_overflowing_tokens: bool = False,
  665. return_special_tokens_mask: bool = False,
  666. return_offsets_mapping: bool = False,
  667. return_length: bool = False,
  668. verbose: bool = True,
  669. **kwargs,
  670. ) -> BatchEncoding:
  671. table_tokens = self._tokenize_table(table)
  672. queries_tokens = []
  673. for idx, query in enumerate(queries):
  674. query, query_tokens = self._get_question_tokens(query)
  675. queries[idx] = query
  676. queries_tokens.append(query_tokens)
  677. batch_outputs = self._batch_prepare_for_model(
  678. table,
  679. queries,
  680. tokenized_table=table_tokens,
  681. queries_tokens=queries_tokens,
  682. answer_coordinates=answer_coordinates,
  683. padding=padding,
  684. truncation=truncation,
  685. answer_text=answer_text,
  686. add_special_tokens=add_special_tokens,
  687. max_length=max_length,
  688. pad_to_multiple_of=pad_to_multiple_of,
  689. padding_side=padding_side,
  690. return_tensors=return_tensors,
  691. prepend_batch_axis=True,
  692. return_attention_mask=return_attention_mask,
  693. return_token_type_ids=return_token_type_ids,
  694. return_overflowing_tokens=return_overflowing_tokens,
  695. return_special_tokens_mask=return_special_tokens_mask,
  696. return_length=return_length,
  697. verbose=verbose,
  698. )
  699. return BatchEncoding(batch_outputs)
  700. def _batch_prepare_for_model(
  701. self,
  702. raw_table: "pd.DataFrame",
  703. raw_queries: Union[
  704. List[TextInput],
  705. List[PreTokenizedInput],
  706. List[EncodedInput],
  707. ],
  708. tokenized_table: Optional[TokenizedTable] = None,
  709. queries_tokens: Optional[List[List[str]]] = None,
  710. answer_coordinates: Optional[List[List[Tuple]]] = None,
  711. answer_text: Optional[List[List[TextInput]]] = None,
  712. add_special_tokens: bool = True,
  713. padding: Union[bool, str, PaddingStrategy] = False,
  714. truncation: Union[bool, str, TapasTruncationStrategy] = False,
  715. max_length: Optional[int] = None,
  716. pad_to_multiple_of: Optional[int] = None,
  717. padding_side: Optional[bool] = None,
  718. return_tensors: Optional[Union[str, TensorType]] = None,
  719. return_token_type_ids: Optional[bool] = True,
  720. return_attention_mask: Optional[bool] = True,
  721. return_special_tokens_mask: bool = False,
  722. return_offsets_mapping: bool = False,
  723. return_length: bool = False,
  724. verbose: bool = True,
  725. prepend_batch_axis: bool = False,
  726. **kwargs,
  727. ) -> BatchEncoding:
  728. batch_outputs = {}
  729. for index, example in enumerate(zip(raw_queries, queries_tokens, answer_coordinates, answer_text)):
  730. raw_query, query_tokens, answer_coords, answer_txt = example
  731. outputs = self.prepare_for_model(
  732. raw_table,
  733. raw_query,
  734. tokenized_table=tokenized_table,
  735. query_tokens=query_tokens,
  736. answer_coordinates=answer_coords,
  737. answer_text=answer_txt,
  738. add_special_tokens=add_special_tokens,
  739. padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterwards
  740. truncation=truncation,
  741. max_length=max_length,
  742. pad_to_multiple_of=None, # we pad in batch afterwards
  743. padding_side=None, # we pad in batch afterward
  744. return_attention_mask=False, # we pad in batch afterwards
  745. return_token_type_ids=return_token_type_ids,
  746. return_special_tokens_mask=return_special_tokens_mask,
  747. return_length=return_length,
  748. return_tensors=None, # We convert the whole batch to tensors at the end
  749. prepend_batch_axis=False,
  750. verbose=verbose,
  751. prev_answer_coordinates=answer_coordinates[index - 1] if index != 0 else None,
  752. prev_answer_text=answer_text[index - 1] if index != 0 else None,
  753. )
  754. for key, value in outputs.items():
  755. if key not in batch_outputs:
  756. batch_outputs[key] = []
  757. batch_outputs[key].append(value)
  758. batch_outputs = self.pad(
  759. batch_outputs,
  760. padding=padding,
  761. max_length=max_length,
  762. pad_to_multiple_of=pad_to_multiple_of,
  763. padding_side=padding_side,
  764. return_attention_mask=return_attention_mask,
  765. )
  766. batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
  767. return batch_outputs
  768. @add_end_docstrings(ENCODE_KWARGS_DOCSTRING)
  769. def encode(
  770. self,
  771. table: "pd.DataFrame",
  772. query: Optional[
  773. Union[
  774. TextInput,
  775. PreTokenizedInput,
  776. EncodedInput,
  777. ]
  778. ] = None,
  779. add_special_tokens: bool = True,
  780. padding: Union[bool, str, PaddingStrategy] = False,
  781. truncation: Union[bool, str, TapasTruncationStrategy] = False,
  782. max_length: Optional[int] = None,
  783. return_tensors: Optional[Union[str, TensorType]] = None,
  784. **kwargs,
  785. ) -> List[int]:
  786. """
  787. Prepare a table and a string for the model. This method does not return token type IDs, attention masks, etc.
  788. which are necessary for the model to work correctly. Use that method if you want to build your processing on
  789. your own, otherwise refer to `__call__`.
  790. Args:
  791. table (`pd.DataFrame`):
  792. Table containing tabular data. Note that all cell values must be text. Use *.astype(str)* on a Pandas
  793. dataframe to convert it to string.
  794. query (`str` or `List[str]`):
  795. Question related to a table to be encoded.
  796. """
  797. encoded_inputs = self.encode_plus(
  798. table,
  799. query=query,
  800. add_special_tokens=add_special_tokens,
  801. padding=padding,
  802. truncation=truncation,
  803. max_length=max_length,
  804. return_tensors=return_tensors,
  805. **kwargs,
  806. )
  807. return encoded_inputs["input_ids"]
  808. @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPAS_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
  809. def encode_plus(
  810. self,
  811. table: "pd.DataFrame",
  812. query: Optional[
  813. Union[
  814. TextInput,
  815. PreTokenizedInput,
  816. EncodedInput,
  817. ]
  818. ] = None,
  819. answer_coordinates: Optional[List[Tuple]] = None,
  820. answer_text: Optional[List[TextInput]] = None,
  821. add_special_tokens: bool = True,
  822. padding: Union[bool, str, PaddingStrategy] = False,
  823. truncation: Union[bool, str, TapasTruncationStrategy] = False,
  824. max_length: Optional[int] = None,
  825. pad_to_multiple_of: Optional[int] = None,
  826. padding_side: Optional[bool] = None,
  827. return_tensors: Optional[Union[str, TensorType]] = None,
  828. return_token_type_ids: Optional[bool] = None,
  829. return_attention_mask: Optional[bool] = None,
  830. return_special_tokens_mask: bool = False,
  831. return_offsets_mapping: bool = False,
  832. return_length: bool = False,
  833. verbose: bool = True,
  834. **kwargs,
  835. ) -> BatchEncoding:
  836. """
  837. Prepare a table and a string for the model.
  838. Args:
  839. table (`pd.DataFrame`):
  840. Table containing tabular data. Note that all cell values must be text. Use *.astype(str)* on a Pandas
  841. dataframe to convert it to string.
  842. query (`str` or `List[str]`):
  843. Question related to a table to be encoded.
  844. answer_coordinates (`List[Tuple]` or `List[List[Tuple]]`, *optional*):
  845. Answer coordinates of each table-question pair in the batch. The answer_coordinates must be a single
  846. list of one or more tuples. Each tuple must be a (row_index, column_index) pair. The first data row
  847. (not the column header row) has index 0. The first column has index 0.
  848. answer_text (`List[str]` or `List[List[str]]`, *optional*):
  849. Answer text of each table-question pair in the batch. The answer_text must be a single list of one or
  850. more strings. Each string must be the answer text of a corresponding answer coordinate.
  851. """
  852. if return_token_type_ids is not None and not add_special_tokens:
  853. raise ValueError(
  854. "Asking to return token_type_ids while setting add_special_tokens to False "
  855. "results in an undefined behavior. Please set add_special_tokens to True or "
  856. "set return_token_type_ids to None."
  857. )
  858. if (answer_coordinates and not answer_text) or (not answer_coordinates and answer_text):
  859. raise ValueError("In case you provide answers, both answer_coordinates and answer_text should be provided")
  860. if "is_split_into_words" in kwargs:
  861. raise NotImplementedError("Currently TapasTokenizer only supports questions as strings.")
  862. if return_offsets_mapping:
  863. raise NotImplementedError(
  864. "return_offset_mapping is not available when using Python tokenizers. "
  865. "To use this feature, change your tokenizer to one deriving from "
  866. "transformers.PreTrainedTokenizerFast."
  867. )
  868. return self._encode_plus(
  869. table=table,
  870. query=query,
  871. answer_coordinates=answer_coordinates,
  872. answer_text=answer_text,
  873. add_special_tokens=add_special_tokens,
  874. truncation=truncation,
  875. padding=padding,
  876. max_length=max_length,
  877. pad_to_multiple_of=pad_to_multiple_of,
  878. padding_side=padding_side,
  879. return_tensors=return_tensors,
  880. return_token_type_ids=return_token_type_ids,
  881. return_attention_mask=return_attention_mask,
  882. return_special_tokens_mask=return_special_tokens_mask,
  883. return_offsets_mapping=return_offsets_mapping,
  884. return_length=return_length,
  885. verbose=verbose,
  886. **kwargs,
  887. )
  888. def _encode_plus(
  889. self,
  890. table: "pd.DataFrame",
  891. query: Union[
  892. TextInput,
  893. PreTokenizedInput,
  894. EncodedInput,
  895. ],
  896. answer_coordinates: Optional[List[Tuple]] = None,
  897. answer_text: Optional[List[TextInput]] = None,
  898. add_special_tokens: bool = True,
  899. padding: Union[bool, str, PaddingStrategy] = False,
  900. truncation: Union[bool, str, TapasTruncationStrategy] = False,
  901. max_length: Optional[int] = None,
  902. pad_to_multiple_of: Optional[int] = None,
  903. padding_side: Optional[bool] = None,
  904. return_tensors: Optional[Union[str, TensorType]] = None,
  905. return_token_type_ids: Optional[bool] = True,
  906. return_attention_mask: Optional[bool] = True,
  907. return_special_tokens_mask: bool = False,
  908. return_offsets_mapping: bool = False,
  909. return_length: bool = False,
  910. verbose: bool = True,
  911. **kwargs,
  912. ):
  913. if query is None:
  914. query = ""
  915. logger.warning(
  916. "TAPAS is a question answering model but you have not passed a query. Please be aware that the "
  917. "model will probably not behave correctly."
  918. )
  919. table_tokens = self._tokenize_table(table)
  920. query, query_tokens = self._get_question_tokens(query)
  921. return self.prepare_for_model(
  922. table,
  923. query,
  924. tokenized_table=table_tokens,
  925. query_tokens=query_tokens,
  926. answer_coordinates=answer_coordinates,
  927. answer_text=answer_text,
  928. add_special_tokens=add_special_tokens,
  929. truncation=truncation,
  930. padding=padding,
  931. max_length=max_length,
  932. pad_to_multiple_of=pad_to_multiple_of,
  933. padding_side=padding_side,
  934. return_tensors=return_tensors,
  935. prepend_batch_axis=True,
  936. return_attention_mask=return_attention_mask,
  937. return_token_type_ids=return_token_type_ids,
  938. return_special_tokens_mask=return_special_tokens_mask,
  939. return_length=return_length,
  940. verbose=verbose,
  941. )
  942. @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPAS_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
  943. def prepare_for_model(
  944. self,
  945. raw_table: "pd.DataFrame",
  946. raw_query: Union[
  947. TextInput,
  948. PreTokenizedInput,
  949. EncodedInput,
  950. ],
  951. tokenized_table: Optional[TokenizedTable] = None,
  952. query_tokens: Optional[TokenizedTable] = None,
  953. answer_coordinates: Optional[List[Tuple]] = None,
  954. answer_text: Optional[List[TextInput]] = None,
  955. add_special_tokens: bool = True,
  956. padding: Union[bool, str, PaddingStrategy] = False,
  957. truncation: Union[bool, str, TapasTruncationStrategy] = False,
  958. max_length: Optional[int] = None,
  959. pad_to_multiple_of: Optional[int] = None,
  960. padding_side: Optional[bool] = None,
  961. return_tensors: Optional[Union[str, TensorType]] = None,
  962. return_token_type_ids: Optional[bool] = True,
  963. return_attention_mask: Optional[bool] = True,
  964. return_special_tokens_mask: bool = False,
  965. return_offsets_mapping: bool = False,
  966. return_length: bool = False,
  967. verbose: bool = True,
  968. prepend_batch_axis: bool = False,
  969. **kwargs,
  970. ) -> BatchEncoding:
  971. """
  972. Prepares a sequence of input id so that it can be used by the model. It adds special tokens, truncates
  973. sequences if overflowing while taking into account the special tokens.
  974. Args:
  975. raw_table (`pd.DataFrame`):
  976. The original table before any transformation (like tokenization) was applied to it.
  977. raw_query (`TextInput` or `PreTokenizedInput` or `EncodedInput`):
  978. The original query before any transformation (like tokenization) was applied to it.
  979. tokenized_table (`TokenizedTable`):
  980. The table after tokenization.
  981. query_tokens (`List[str]`):
  982. The query after tokenization.
  983. answer_coordinates (`List[Tuple]` or `List[List[Tuple]]`, *optional*):
  984. Answer coordinates of each table-question pair in the batch. The answer_coordinates must be a single
  985. list of one or more tuples. Each tuple must be a (row_index, column_index) pair. The first data row
  986. (not the column header row) has index 0. The first column has index 0.
  987. answer_text (`List[str]` or `List[List[str]]`, *optional*):
  988. Answer text of each table-question pair in the batch. The answer_text must be a single list of one or
  989. more strings. Each string must be the answer text of a corresponding answer coordinate.
  990. """
  991. if isinstance(padding, bool):
  992. if padding and (max_length is not None or pad_to_multiple_of is not None):
  993. padding = PaddingStrategy.MAX_LENGTH
  994. else:
  995. padding = PaddingStrategy.DO_NOT_PAD
  996. elif not isinstance(padding, PaddingStrategy):
  997. padding = PaddingStrategy(padding)
  998. if isinstance(truncation, bool):
  999. if truncation:
  1000. truncation = TapasTruncationStrategy.DROP_ROWS_TO_FIT
  1001. else:
  1002. truncation = TapasTruncationStrategy.DO_NOT_TRUNCATE
  1003. elif not isinstance(truncation, TapasTruncationStrategy):
  1004. truncation = TapasTruncationStrategy(truncation)
  1005. encoded_inputs = {}
  1006. is_part_of_batch = False
  1007. prev_answer_coordinates, prev_answer_text = None, None
  1008. if "prev_answer_coordinates" in kwargs and "prev_answer_text" in kwargs:
  1009. is_part_of_batch = True
  1010. prev_answer_coordinates = kwargs["prev_answer_coordinates"]
  1011. prev_answer_text = kwargs["prev_answer_text"]
  1012. num_rows = self._get_num_rows(raw_table, truncation != TapasTruncationStrategy.DO_NOT_TRUNCATE)
  1013. num_columns = self._get_num_columns(raw_table)
  1014. _, _, num_tokens = self._get_table_boundaries(tokenized_table)
  1015. if truncation != TapasTruncationStrategy.DO_NOT_TRUNCATE:
  1016. num_rows, num_tokens = self._get_truncated_table_rows(
  1017. query_tokens, tokenized_table, num_rows, num_columns, max_length, truncation_strategy=truncation
  1018. )
  1019. table_data = list(self._get_table_values(tokenized_table, num_columns, num_rows, num_tokens))
  1020. query_ids = self.convert_tokens_to_ids(query_tokens)
  1021. table_ids = list(zip(*table_data))[0] if len(table_data) > 0 else list(zip(*table_data))
  1022. table_ids = self.convert_tokens_to_ids(list(table_ids))
  1023. if "return_overflowing_tokens" in kwargs and kwargs["return_overflowing_tokens"]:
  1024. raise ValueError("TAPAS does not return overflowing tokens as it works on tables.")
  1025. if add_special_tokens:
  1026. input_ids = self.build_inputs_with_special_tokens(query_ids, table_ids)
  1027. else:
  1028. input_ids = query_ids + table_ids
  1029. if max_length is not None and len(input_ids) > max_length:
  1030. raise ValueError(
  1031. "Could not encode the query and table header given the maximum length. Encoding the query and table "
  1032. f"header results in a length of {len(input_ids)} which is higher than the max_length of {max_length}"
  1033. )
  1034. encoded_inputs["input_ids"] = input_ids
  1035. segment_ids = self.create_segment_token_type_ids_from_sequences(query_ids, table_data)
  1036. column_ids = self.create_column_token_type_ids_from_sequences(query_ids, table_data)
  1037. row_ids = self.create_row_token_type_ids_from_sequences(query_ids, table_data)
  1038. if not is_part_of_batch or (prev_answer_coordinates is None and prev_answer_text is None):
  1039. # simply set the prev_labels to zeros
  1040. prev_labels = [0] * len(row_ids)
  1041. else:
  1042. prev_labels = self.get_answer_ids(
  1043. column_ids, row_ids, table_data, prev_answer_text, prev_answer_coordinates
  1044. )
  1045. # FIRST: parse both the table and question in terms of numeric values
  1046. raw_table = add_numeric_table_values(raw_table)
  1047. raw_query = add_numeric_values_to_question(raw_query)
  1048. # SECOND: add numeric-related features (and not parse them in these functions):
  1049. column_ranks, inv_column_ranks = self._get_numeric_column_ranks(column_ids, row_ids, raw_table)
  1050. numeric_relations = self._get_numeric_relations(raw_query, column_ids, row_ids, raw_table)
  1051. # Load from model defaults
  1052. if return_token_type_ids is None:
  1053. return_token_type_ids = "token_type_ids" in self.model_input_names
  1054. if return_attention_mask is None:
  1055. return_attention_mask = "attention_mask" in self.model_input_names
  1056. if return_attention_mask:
  1057. attention_mask = self.create_attention_mask_from_sequences(query_ids, table_data)
  1058. encoded_inputs["attention_mask"] = attention_mask
  1059. if answer_coordinates is not None and answer_text is not None:
  1060. labels = self.get_answer_ids(column_ids, row_ids, table_data, answer_text, answer_coordinates)
  1061. numeric_values = self._get_numeric_values(raw_table, column_ids, row_ids)
  1062. numeric_values_scale = self._get_numeric_values_scale(raw_table, column_ids, row_ids)
  1063. encoded_inputs["labels"] = labels
  1064. encoded_inputs["numeric_values"] = numeric_values
  1065. encoded_inputs["numeric_values_scale"] = numeric_values_scale
  1066. if return_token_type_ids:
  1067. token_type_ids = [
  1068. segment_ids,
  1069. column_ids,
  1070. row_ids,
  1071. prev_labels,
  1072. column_ranks,
  1073. inv_column_ranks,
  1074. numeric_relations,
  1075. ]
  1076. token_type_ids = [list(ids) for ids in list(zip(*token_type_ids))]
  1077. encoded_inputs["token_type_ids"] = token_type_ids
  1078. if return_special_tokens_mask:
  1079. if add_special_tokens:
  1080. encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(query_ids, table_ids)
  1081. else:
  1082. encoded_inputs["special_tokens_mask"] = [0] * len(input_ids)
  1083. # Check lengths
  1084. if max_length is None and len(encoded_inputs["input_ids"]) > self.model_max_length and verbose:
  1085. if not self.deprecation_warnings.get("sequence-length-is-longer-than-the-specified-maximum", False):
  1086. logger.warning(
  1087. "Token indices sequence length is longer than the specified maximum sequence length "
  1088. f"for this model ({len(encoded_inputs['input_ids'])} > {self.model_max_length}). Running this "
  1089. "sequence through the model will result in indexing errors."
  1090. )
  1091. self.deprecation_warnings["sequence-length-is-longer-than-the-specified-maximum"] = True
  1092. # Padding
  1093. if padding != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
  1094. encoded_inputs = self.pad(
  1095. encoded_inputs,
  1096. max_length=max_length,
  1097. padding=padding.value,
  1098. pad_to_multiple_of=pad_to_multiple_of,
  1099. padding_side=padding_side,
  1100. return_attention_mask=return_attention_mask,
  1101. )
  1102. if return_length:
  1103. encoded_inputs["length"] = len(encoded_inputs["input_ids"])
  1104. batch_outputs = BatchEncoding(
  1105. encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis
  1106. )
  1107. return batch_outputs
  1108. def _get_truncated_table_rows(
  1109. self,
  1110. query_tokens: List[str],
  1111. tokenized_table: TokenizedTable,
  1112. num_rows: int,
  1113. num_columns: int,
  1114. max_length: int,
  1115. truncation_strategy: Union[str, TapasTruncationStrategy],
  1116. ) -> Tuple[int, int]:
  1117. """
  1118. Truncates a sequence pair in-place following the strategy.
  1119. Args:
  1120. query_tokens (`List[str]`):
  1121. List of strings corresponding to the tokenized query.
  1122. tokenized_table (`TokenizedTable`):
  1123. Tokenized table
  1124. num_rows (`int`):
  1125. Total number of table rows
  1126. num_columns (`int`):
  1127. Total number of table columns
  1128. max_length (`int`):
  1129. Total maximum length.
  1130. truncation_strategy (`str` or [`TapasTruncationStrategy]`):
  1131. Truncation strategy to use. Seeing as this method should only be called when truncating, the only
  1132. available strategy is the `"drop_rows_to_fit"` strategy.
  1133. Returns:
  1134. `Tuple(int, int)`: tuple containing the number of rows after truncation, and the number of tokens available
  1135. for each table element.
  1136. """
  1137. if not isinstance(truncation_strategy, TapasTruncationStrategy):
  1138. truncation_strategy = TapasTruncationStrategy(truncation_strategy)
  1139. if max_length is None:
  1140. max_length = self.model_max_length
  1141. if truncation_strategy == TapasTruncationStrategy.DROP_ROWS_TO_FIT:
  1142. while True:
  1143. num_tokens = self._get_max_num_tokens(
  1144. query_tokens, tokenized_table, num_rows=num_rows, num_columns=num_columns, max_length=max_length
  1145. )
  1146. if num_tokens is not None:
  1147. # We could fit the table.
  1148. break
  1149. # Try to drop a row to fit the table.
  1150. num_rows -= 1
  1151. if num_rows < 1:
  1152. break
  1153. elif truncation_strategy != TapasTruncationStrategy.DO_NOT_TRUNCATE:
  1154. raise ValueError(f"Unknown truncation strategy {truncation_strategy}.")
  1155. return num_rows, num_tokens or 1
  1156. def _tokenize_table(
  1157. self,
  1158. table=None,
  1159. ):
  1160. """
  1161. Tokenizes column headers and cell texts of a table.
  1162. Args:
  1163. table (`pd.Dataframe`):
  1164. Table. Returns: `TokenizedTable`: TokenizedTable object.
  1165. """
  1166. tokenized_rows = []
  1167. tokenized_row = []
  1168. # tokenize column headers
  1169. for column in table:
  1170. if self.strip_column_names:
  1171. tokenized_row.append(self.tokenize(""))
  1172. else:
  1173. tokenized_row.append(self.tokenize(column))
  1174. tokenized_rows.append(tokenized_row)
  1175. # tokenize cell values
  1176. for idx, row in table.iterrows():
  1177. tokenized_row = []
  1178. for cell in row:
  1179. tokenized_row.append(self.tokenize(cell))
  1180. tokenized_rows.append(tokenized_row)
  1181. token_coordinates = []
  1182. for row_index, row in enumerate(tokenized_rows):
  1183. for column_index, cell in enumerate(row):
  1184. for token_index, _ in enumerate(cell):
  1185. token_coordinates.append(
  1186. TokenCoordinates(
  1187. row_index=row_index,
  1188. column_index=column_index,
  1189. token_index=token_index,
  1190. )
  1191. )
  1192. return TokenizedTable(
  1193. rows=tokenized_rows,
  1194. selected_tokens=token_coordinates,
  1195. )
  1196. def _question_encoding_cost(self, question_tokens):
  1197. # Two extra spots of SEP and CLS.
  1198. return len(question_tokens) + 2
  1199. def _get_token_budget(self, question_tokens, max_length=None):
  1200. """
  1201. Computes the number of tokens left for the table after tokenizing a question, taking into account the max
  1202. sequence length of the model.
  1203. Args:
  1204. question_tokens (`List[String]`):
  1205. List of question tokens. Returns: `int`: the number of tokens left for the table, given the model max
  1206. length.
  1207. """
  1208. return (max_length if max_length is not None else self.model_max_length) - self._question_encoding_cost(
  1209. question_tokens
  1210. )
  1211. def _get_table_values(self, table, num_columns, num_rows, num_tokens) -> Generator[TableValue, None, None]:
  1212. """Iterates over partial table and returns token, column and row indexes."""
  1213. for tc in table.selected_tokens:
  1214. # First row is header row.
  1215. if tc.row_index >= num_rows + 1:
  1216. continue
  1217. if tc.column_index >= num_columns:
  1218. continue
  1219. cell = table.rows[tc.row_index][tc.column_index]
  1220. token = cell[tc.token_index]
  1221. word_begin_index = tc.token_index
  1222. # Don't add partial words. Find the starting word piece and check if it
  1223. # fits in the token budget.
  1224. while word_begin_index >= 0 and _is_inner_wordpiece(cell[word_begin_index]):
  1225. word_begin_index -= 1
  1226. if word_begin_index >= num_tokens:
  1227. continue
  1228. yield TableValue(token, tc.column_index + 1, tc.row_index)
  1229. def _get_table_boundaries(self, table):
  1230. """Return maximal number of rows, columns and tokens."""
  1231. max_num_tokens = 0
  1232. max_num_columns = 0
  1233. max_num_rows = 0
  1234. for tc in table.selected_tokens:
  1235. max_num_columns = max(max_num_columns, tc.column_index + 1)
  1236. max_num_rows = max(max_num_rows, tc.row_index + 1)
  1237. max_num_tokens = max(max_num_tokens, tc.token_index + 1)
  1238. max_num_columns = min(self.max_column_id, max_num_columns)
  1239. max_num_rows = min(self.max_row_id, max_num_rows)
  1240. return max_num_rows, max_num_columns, max_num_tokens
  1241. def _get_table_cost(self, table, num_columns, num_rows, num_tokens):
  1242. return sum(1 for _ in self._get_table_values(table, num_columns, num_rows, num_tokens))
  1243. def _get_max_num_tokens(self, question_tokens, tokenized_table, num_columns, num_rows, max_length):
  1244. """Computes max number of tokens that can be squeezed into the budget."""
  1245. token_budget = self._get_token_budget(question_tokens, max_length)
  1246. _, _, max_num_tokens = self._get_table_boundaries(tokenized_table)
  1247. if self.cell_trim_length >= 0 and max_num_tokens > self.cell_trim_length:
  1248. max_num_tokens = self.cell_trim_length
  1249. num_tokens = 0
  1250. for num_tokens in range(max_num_tokens + 1):
  1251. cost = self._get_table_cost(tokenized_table, num_columns, num_rows, num_tokens + 1)
  1252. if cost > token_budget:
  1253. break
  1254. if num_tokens < max_num_tokens:
  1255. if self.cell_trim_length >= 0:
  1256. # We don't allow dynamic trimming if a cell_trim_length is set.
  1257. return None
  1258. if num_tokens == 0:
  1259. return None
  1260. return num_tokens
  1261. def _get_num_columns(self, table):
  1262. num_columns = table.shape[1]
  1263. if num_columns >= self.max_column_id:
  1264. raise ValueError("Too many columns")
  1265. return num_columns
  1266. def _get_num_rows(self, table, drop_rows_to_fit):
  1267. num_rows = table.shape[0]
  1268. if num_rows >= self.max_row_id:
  1269. if drop_rows_to_fit:
  1270. num_rows = self.max_row_id - 1
  1271. else:
  1272. raise ValueError("Too many rows")
  1273. return num_rows
  1274. def _serialize_text(self, question_tokens):
  1275. """Serializes texts in index arrays."""
  1276. tokens = []
  1277. segment_ids = []
  1278. column_ids = []
  1279. row_ids = []
  1280. # add [CLS] token at the beginning
  1281. tokens.append(self.cls_token)
  1282. segment_ids.append(0)
  1283. column_ids.append(0)
  1284. row_ids.append(0)
  1285. for token in question_tokens:
  1286. tokens.append(token)
  1287. segment_ids.append(0)
  1288. column_ids.append(0)
  1289. row_ids.append(0)
  1290. return tokens, segment_ids, column_ids, row_ids
  1291. def _serialize(
  1292. self,
  1293. question_tokens,
  1294. table,
  1295. num_columns,
  1296. num_rows,
  1297. num_tokens,
  1298. ):
  1299. """Serializes table and text."""
  1300. tokens, segment_ids, column_ids, row_ids = self._serialize_text(question_tokens)
  1301. # add [SEP] token between question and table tokens
  1302. tokens.append(self.sep_token)
  1303. segment_ids.append(0)
  1304. column_ids.append(0)
  1305. row_ids.append(0)
  1306. for token, column_id, row_id in self._get_table_values(table, num_columns, num_rows, num_tokens):
  1307. tokens.append(token)
  1308. segment_ids.append(1)
  1309. column_ids.append(column_id)
  1310. row_ids.append(row_id)
  1311. return SerializedExample(
  1312. tokens=tokens,
  1313. segment_ids=segment_ids,
  1314. column_ids=column_ids,
  1315. row_ids=row_ids,
  1316. )
  1317. def _get_column_values(self, table, col_index):
  1318. table_numeric_values = {}
  1319. for row_index, row in table.iterrows():
  1320. cell = row[col_index]
  1321. if cell.numeric_value is not None:
  1322. table_numeric_values[row_index] = cell.numeric_value
  1323. return table_numeric_values
  1324. def _get_cell_token_indexes(self, column_ids, row_ids, column_id, row_id):
  1325. for index in range(len(column_ids)):
  1326. if column_ids[index] - 1 == column_id and row_ids[index] - 1 == row_id:
  1327. yield index
  1328. def _get_numeric_column_ranks(self, column_ids, row_ids, table):
  1329. """Returns column ranks for all numeric columns."""
  1330. ranks = [0] * len(column_ids)
  1331. inv_ranks = [0] * len(column_ids)
  1332. # original code from tf_example_utils.py of the original implementation
  1333. if table is not None:
  1334. for col_index in range(len(table.columns)):
  1335. table_numeric_values = self._get_column_values(table, col_index)
  1336. if not table_numeric_values:
  1337. continue
  1338. try:
  1339. key_fn = get_numeric_sort_key_fn(table_numeric_values.values())
  1340. except ValueError:
  1341. continue
  1342. table_numeric_values = {row_index: key_fn(value) for row_index, value in table_numeric_values.items()}
  1343. table_numeric_values_inv = collections.defaultdict(list)
  1344. for row_index, value in table_numeric_values.items():
  1345. table_numeric_values_inv[value].append(row_index)
  1346. unique_values = sorted(table_numeric_values_inv.keys())
  1347. for rank, value in enumerate(unique_values):
  1348. for row_index in table_numeric_values_inv[value]:
  1349. for index in self._get_cell_token_indexes(column_ids, row_ids, col_index, row_index):
  1350. ranks[index] = rank + 1
  1351. inv_ranks[index] = len(unique_values) - rank
  1352. return ranks, inv_ranks
  1353. def _get_numeric_sort_key_fn(self, table_numeric_values, value):
  1354. """
  1355. Returns the sort key function for comparing value to table values. The function returned will be a suitable
  1356. input for the key param of the sort(). See number_annotation_utils._get_numeric_sort_key_fn for details
  1357. Args:
  1358. table_numeric_values: Numeric values of a column
  1359. value: Numeric value in the question
  1360. Returns:
  1361. A function key function to compare column and question values.
  1362. """
  1363. if not table_numeric_values:
  1364. return None
  1365. all_values = list(table_numeric_values.values())
  1366. all_values.append(value)
  1367. try:
  1368. return get_numeric_sort_key_fn(all_values)
  1369. except ValueError:
  1370. return None
  1371. def _get_numeric_relations(self, question, column_ids, row_ids, table):
  1372. """
  1373. Returns numeric relations embeddings
  1374. Args:
  1375. question: Question object.
  1376. column_ids: Maps word piece position to column id.
  1377. row_ids: Maps word piece position to row id.
  1378. table: The table containing the numeric cell values.
  1379. """
  1380. numeric_relations = [0] * len(column_ids)
  1381. # first, we add any numeric value spans to the question:
  1382. # Create a dictionary that maps a table cell to the set of all relations
  1383. # this cell has with any value in the question.
  1384. cell_indices_to_relations = collections.defaultdict(set)
  1385. if question is not None and table is not None:
  1386. for numeric_value_span in question.numeric_spans:
  1387. for value in numeric_value_span.values:
  1388. for column_index in range(len(table.columns)):
  1389. table_numeric_values = self._get_column_values(table, column_index)
  1390. sort_key_fn = self._get_numeric_sort_key_fn(table_numeric_values, value)
  1391. if sort_key_fn is None:
  1392. continue
  1393. for row_index, cell_value in table_numeric_values.items():
  1394. relation = get_numeric_relation(value, cell_value, sort_key_fn)
  1395. if relation is not None:
  1396. cell_indices_to_relations[column_index, row_index].add(relation)
  1397. # For each cell add a special feature for all its word pieces.
  1398. for (column_index, row_index), relations in cell_indices_to_relations.items():
  1399. relation_set_index = 0
  1400. for relation in relations:
  1401. assert relation.value >= Relation.EQ.value
  1402. relation_set_index += 2 ** (relation.value - Relation.EQ.value)
  1403. for cell_token_index in self._get_cell_token_indexes(column_ids, row_ids, column_index, row_index):
  1404. numeric_relations[cell_token_index] = relation_set_index
  1405. return numeric_relations
  1406. def _get_numeric_values(self, table, column_ids, row_ids):
  1407. """Returns numeric values for computation of answer loss."""
  1408. numeric_values = [float("nan")] * len(column_ids)
  1409. if table is not None:
  1410. num_rows = table.shape[0]
  1411. num_columns = table.shape[1]
  1412. for col_index in range(num_columns):
  1413. for row_index in range(num_rows):
  1414. numeric_value = table.iloc[row_index, col_index].numeric_value
  1415. if numeric_value is not None:
  1416. if numeric_value.float_value is None:
  1417. continue
  1418. float_value = numeric_value.float_value
  1419. if float_value == float("inf"):
  1420. continue
  1421. for index in self._get_cell_token_indexes(column_ids, row_ids, col_index, row_index):
  1422. numeric_values[index] = float_value
  1423. return numeric_values
  1424. def _get_numeric_values_scale(self, table, column_ids, row_ids):
  1425. """Returns a scale to each token to down weigh the value of long words."""
  1426. numeric_values_scale = [1.0] * len(column_ids)
  1427. if table is None:
  1428. return numeric_values_scale
  1429. num_rows = table.shape[0]
  1430. num_columns = table.shape[1]
  1431. for col_index in range(num_columns):
  1432. for row_index in range(num_rows):
  1433. indices = list(self._get_cell_token_indexes(column_ids, row_ids, col_index, row_index))
  1434. num_indices = len(indices)
  1435. if num_indices > 1:
  1436. for index in indices:
  1437. numeric_values_scale[index] = float(num_indices)
  1438. return numeric_values_scale
  1439. def _pad_to_seq_length(self, inputs):
  1440. while len(inputs) > self.model_max_length:
  1441. inputs.pop()
  1442. while len(inputs) < self.model_max_length:
  1443. inputs.append(0)
  1444. def _get_all_answer_ids_from_coordinates(
  1445. self,
  1446. column_ids,
  1447. row_ids,
  1448. answers_list,
  1449. ):
  1450. """Maps lists of answer coordinates to token indexes."""
  1451. answer_ids = [0] * len(column_ids)
  1452. found_answers = set()
  1453. all_answers = set()
  1454. for answers in answers_list:
  1455. column_index, row_index = answers
  1456. all_answers.add((column_index, row_index))
  1457. for index in self._get_cell_token_indexes(column_ids, row_ids, column_index, row_index):
  1458. found_answers.add((column_index, row_index))
  1459. answer_ids[index] = 1
  1460. missing_count = len(all_answers) - len(found_answers)
  1461. return answer_ids, missing_count
  1462. def _get_all_answer_ids(self, column_ids, row_ids, answer_coordinates):
  1463. """
  1464. Maps answer coordinates of a question to token indexes.
  1465. In the SQA format (TSV), the coordinates are given as (row, column) tuples. Here, we first swap them to
  1466. (column, row) format before calling _get_all_answer_ids_from_coordinates.
  1467. """
  1468. def _to_coordinates(answer_coordinates_question):
  1469. return [(coords[1], coords[0]) for coords in answer_coordinates_question]
  1470. return self._get_all_answer_ids_from_coordinates(
  1471. column_ids, row_ids, answers_list=(_to_coordinates(answer_coordinates))
  1472. )
  1473. def _find_tokens(self, text, segment):
  1474. """Return start index of segment in text or None."""
  1475. logging.info(f"text: {text} {segment}")
  1476. for index in range(1 + len(text) - len(segment)):
  1477. for seg_index, seg_token in enumerate(segment):
  1478. if text[index + seg_index].piece != seg_token.piece:
  1479. break
  1480. else:
  1481. return index
  1482. return None
  1483. def _find_answer_coordinates_from_answer_text(
  1484. self,
  1485. tokenized_table,
  1486. answer_text,
  1487. ):
  1488. """Returns all occurrences of answer_text in the table."""
  1489. logging.info(f"answer text: {answer_text}")
  1490. for row_index, row in enumerate(tokenized_table.rows):
  1491. if row_index == 0:
  1492. # We don't search for answers in the header.
  1493. continue
  1494. for col_index, cell in enumerate(row):
  1495. token_index = self._find_tokens(cell, answer_text)
  1496. if token_index is not None:
  1497. yield TokenCoordinates(
  1498. row_index=row_index,
  1499. column_index=col_index,
  1500. token_index=token_index,
  1501. )
  1502. def _find_answer_ids_from_answer_texts(
  1503. self,
  1504. column_ids,
  1505. row_ids,
  1506. tokenized_table,
  1507. answer_texts,
  1508. ):
  1509. """Maps question with answer texts to the first matching token indexes."""
  1510. answer_ids = [0] * len(column_ids)
  1511. for answer_text in answer_texts:
  1512. for coordinates in self._find_answer_coordinates_from_answer_text(
  1513. tokenized_table,
  1514. answer_text,
  1515. ):
  1516. # Maps answer coordinates to indexes this can fail if tokens / rows have
  1517. # been pruned.
  1518. indexes = list(
  1519. self._get_cell_token_indexes(
  1520. column_ids,
  1521. row_ids,
  1522. column_id=coordinates.column_index,
  1523. row_id=coordinates.row_index - 1,
  1524. )
  1525. )
  1526. indexes.sort()
  1527. coordinate_answer_ids = []
  1528. if indexes:
  1529. begin_index = coordinates.token_index + indexes[0]
  1530. end_index = begin_index + len(answer_text)
  1531. for index in indexes:
  1532. if index >= begin_index and index < end_index:
  1533. coordinate_answer_ids.append(index)
  1534. if len(coordinate_answer_ids) == len(answer_text):
  1535. for index in coordinate_answer_ids:
  1536. answer_ids[index] = 1
  1537. break
  1538. return answer_ids
  1539. def _get_answer_ids(self, column_ids, row_ids, answer_coordinates):
  1540. """Maps answer coordinates of a question to token indexes."""
  1541. answer_ids, missing_count = self._get_all_answer_ids(column_ids, row_ids, answer_coordinates)
  1542. if missing_count:
  1543. raise ValueError("Couldn't find all answers")
  1544. return answer_ids
  1545. def get_answer_ids(self, column_ids, row_ids, tokenized_table, answer_texts_question, answer_coordinates_question):
  1546. if self.update_answer_coordinates:
  1547. return self._find_answer_ids_from_answer_texts(
  1548. column_ids,
  1549. row_ids,
  1550. tokenized_table,
  1551. answer_texts=[self.tokenize(at) for at in answer_texts_question],
  1552. )
  1553. return self._get_answer_ids(column_ids, row_ids, answer_coordinates_question)
  1554. def _pad(
  1555. self,
  1556. encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
  1557. max_length: Optional[int] = None,
  1558. padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
  1559. pad_to_multiple_of: Optional[int] = None,
  1560. padding_side: Optional[bool] = None,
  1561. return_attention_mask: Optional[bool] = None,
  1562. ) -> dict:
  1563. """
  1564. Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
  1565. Args:
  1566. encoded_inputs:
  1567. Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
  1568. max_length: maximum length of the returned list and optionally padding length (see below).
  1569. Will truncate by taking into account the special tokens.
  1570. padding_strategy: PaddingStrategy to use for padding.
  1571. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
  1572. - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
  1573. - PaddingStrategy.DO_NOT_PAD: Do not pad
  1574. The tokenizer padding sides are defined in self.padding_side:
  1575. - 'left': pads on the left of the sequences
  1576. - 'right': pads on the right of the sequences
  1577. pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
  1578. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
  1579. `>= 7.5` (Volta).
  1580. padding_side:
  1581. The side on which the model should have padding applied. Should be selected between ['right', 'left'].
  1582. Default value is picked from the class attribute of the same name.
  1583. return_attention_mask:
  1584. (optional) Set to False to avoid returning attention mask (default: set to model specifics)
  1585. """
  1586. # Load from model defaults
  1587. if return_attention_mask is None:
  1588. return_attention_mask = "attention_mask" in self.model_input_names
  1589. if padding_strategy == PaddingStrategy.LONGEST:
  1590. max_length = len(encoded_inputs["input_ids"])
  1591. if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
  1592. max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
  1593. needs_to_be_padded = (
  1594. padding_strategy != PaddingStrategy.DO_NOT_PAD and len(encoded_inputs["input_ids"]) != max_length
  1595. )
  1596. # Initialize attention mask if not present.
  1597. if return_attention_mask and "attention_mask" not in encoded_inputs:
  1598. encoded_inputs["attention_mask"] = [1] * len(encoded_inputs["input_ids"])
  1599. if needs_to_be_padded:
  1600. difference = max_length - len(encoded_inputs["input_ids"])
  1601. padding_side = padding_side if padding_side is not None else self.padding_side
  1602. if padding_side == "right":
  1603. if return_attention_mask:
  1604. encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
  1605. if "token_type_ids" in encoded_inputs:
  1606. encoded_inputs["token_type_ids"] = (
  1607. encoded_inputs["token_type_ids"] + [[self.pad_token_type_id] * 7] * difference
  1608. )
  1609. if "labels" in encoded_inputs:
  1610. encoded_inputs["labels"] = encoded_inputs["labels"] + [0] * difference
  1611. if "numeric_values" in encoded_inputs:
  1612. encoded_inputs["numeric_values"] = encoded_inputs["numeric_values"] + [float("nan")] * difference
  1613. if "numeric_values_scale" in encoded_inputs:
  1614. encoded_inputs["numeric_values_scale"] = (
  1615. encoded_inputs["numeric_values_scale"] + [1.0] * difference
  1616. )
  1617. if "special_tokens_mask" in encoded_inputs:
  1618. encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
  1619. encoded_inputs["input_ids"] = encoded_inputs["input_ids"] + [self.pad_token_id] * difference
  1620. elif padding_side == "left":
  1621. if return_attention_mask:
  1622. encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
  1623. if "token_type_ids" in encoded_inputs:
  1624. encoded_inputs["token_type_ids"] = [[self.pad_token_type_id] * 7] * difference + encoded_inputs[
  1625. "token_type_ids"
  1626. ]
  1627. if "labels" in encoded_inputs:
  1628. encoded_inputs["labels"] = [0] * difference + encoded_inputs["labels"]
  1629. if "numeric_values" in encoded_inputs:
  1630. encoded_inputs["numeric_values"] = [float("nan")] * difference + encoded_inputs["numeric_values"]
  1631. if "numeric_values_scale" in encoded_inputs:
  1632. encoded_inputs["numeric_values_scale"] = [1.0] * difference + encoded_inputs[
  1633. "numeric_values_scale"
  1634. ]
  1635. if "special_tokens_mask" in encoded_inputs:
  1636. encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
  1637. encoded_inputs["input_ids"] = [self.pad_token_id] * difference + encoded_inputs["input_ids"]
  1638. else:
  1639. raise ValueError("Invalid padding strategy:" + str(padding_side))
  1640. return encoded_inputs
  1641. # Everything related to converting logits to predictions
  1642. def _get_cell_token_probs(self, probabilities, segment_ids, row_ids, column_ids):
  1643. for i, p in enumerate(probabilities):
  1644. segment_id = segment_ids[i]
  1645. col = column_ids[i] - 1
  1646. row = row_ids[i] - 1
  1647. if col >= 0 and row >= 0 and segment_id == 1:
  1648. yield i, p
  1649. def _get_mean_cell_probs(self, probabilities, segment_ids, row_ids, column_ids):
  1650. """Computes average probability per cell, aggregating over tokens."""
  1651. coords_to_probs = collections.defaultdict(list)
  1652. for i, prob in self._get_cell_token_probs(probabilities, segment_ids, row_ids, column_ids):
  1653. col = column_ids[i] - 1
  1654. row = row_ids[i] - 1
  1655. coords_to_probs[(col, row)].append(prob)
  1656. return {coords: np.array(cell_probs).mean() for coords, cell_probs in coords_to_probs.items()}
  1657. def convert_logits_to_predictions(self, data, logits, logits_agg=None, cell_classification_threshold=0.5):
  1658. """
  1659. Converts logits of [`TapasForQuestionAnswering`] to actual predicted answer coordinates and optional
  1660. aggregation indices.
  1661. The original implementation, on which this function is based, can be found
  1662. [here](https://github.com/google-research/tapas/blob/4908213eb4df7aa988573350278b44c4dbe3f71b/tapas/experiments/prediction_utils.py#L288).
  1663. Args:
  1664. data (`dict`):
  1665. Dictionary mapping features to actual values. Should be created using [`TapasTokenizer`].
  1666. logits (`torch.Tensor` or `tf.Tensor` of shape `(batch_size, sequence_length)`):
  1667. Tensor containing the logits at the token level.
  1668. logits_agg (`torch.Tensor` or `tf.Tensor` of shape `(batch_size, num_aggregation_labels)`, *optional*):
  1669. Tensor containing the aggregation logits.
  1670. cell_classification_threshold (`float`, *optional*, defaults to 0.5):
  1671. Threshold to be used for cell selection. All table cells for which their probability is larger than
  1672. this threshold will be selected.
  1673. Returns:
  1674. `tuple` comprising various elements depending on the inputs:
  1675. - predicted_answer_coordinates (`List[List[[tuple]]` of length `batch_size`): Predicted answer coordinates
  1676. as a list of lists of tuples. Each element in the list contains the predicted answer coordinates of a
  1677. single example in the batch, as a list of tuples. Each tuple is a cell, i.e. (row index, column index).
  1678. - predicted_aggregation_indices (`List[int]`of length `batch_size`, *optional*, returned when
  1679. `logits_aggregation` is provided): Predicted aggregation operator indices of the aggregation head.
  1680. """
  1681. # converting to numpy arrays to work with PT/TF
  1682. logits = logits.numpy()
  1683. if logits_agg is not None:
  1684. logits_agg = logits_agg.numpy()
  1685. data = {key: value.numpy() for key, value in data.items() if key != "training"}
  1686. # input data is of type float32
  1687. # np.log(np.finfo(np.float32).max) = 88.72284
  1688. # Any value over 88.72284 will overflow when passed through the exponential, sending a warning
  1689. # We disable this warning by truncating the logits.
  1690. logits[logits < -88.7] = -88.7
  1691. # Compute probabilities from token logits
  1692. probabilities = 1 / (1 + np.exp(-logits)) * data["attention_mask"]
  1693. token_types = [
  1694. "segment_ids",
  1695. "column_ids",
  1696. "row_ids",
  1697. "prev_labels",
  1698. "column_ranks",
  1699. "inv_column_ranks",
  1700. "numeric_relations",
  1701. ]
  1702. # collect input_ids, segment ids, row ids and column ids of batch. Shape (batch_size, seq_len)
  1703. input_ids = data["input_ids"]
  1704. segment_ids = data["token_type_ids"][:, :, token_types.index("segment_ids")]
  1705. row_ids = data["token_type_ids"][:, :, token_types.index("row_ids")]
  1706. column_ids = data["token_type_ids"][:, :, token_types.index("column_ids")]
  1707. # next, get answer coordinates for every example in the batch
  1708. num_batch = input_ids.shape[0]
  1709. predicted_answer_coordinates = []
  1710. for i in range(num_batch):
  1711. probabilities_example = probabilities[i].tolist()
  1712. segment_ids_example = segment_ids[i]
  1713. row_ids_example = row_ids[i]
  1714. column_ids_example = column_ids[i]
  1715. max_width = column_ids_example.max()
  1716. max_height = row_ids_example.max()
  1717. if max_width == 0 and max_height == 0:
  1718. continue
  1719. cell_coords_to_prob = self._get_mean_cell_probs(
  1720. probabilities_example,
  1721. segment_ids_example.tolist(),
  1722. row_ids_example.tolist(),
  1723. column_ids_example.tolist(),
  1724. )
  1725. # Select the answers above the classification threshold.
  1726. answer_coordinates = []
  1727. for col in range(max_width):
  1728. for row in range(max_height):
  1729. cell_prob = cell_coords_to_prob.get((col, row), None)
  1730. if cell_prob is not None:
  1731. if cell_prob > cell_classification_threshold:
  1732. answer_coordinates.append((row, col))
  1733. answer_coordinates = sorted(answer_coordinates)
  1734. predicted_answer_coordinates.append(answer_coordinates)
  1735. output = (predicted_answer_coordinates,)
  1736. if logits_agg is not None:
  1737. predicted_aggregation_indices = logits_agg.argmax(axis=-1)
  1738. output = (predicted_answer_coordinates, predicted_aggregation_indices.tolist())
  1739. return output
  1740. # End of everything related to converting logits to predictions
  1741. # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
  1742. class BasicTokenizer:
  1743. """
  1744. Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
  1745. Args:
  1746. do_lower_case (`bool`, *optional*, defaults to `True`):
  1747. Whether or not to lowercase the input when tokenizing.
  1748. never_split (`Iterable`, *optional*):
  1749. Collection of tokens which will never be split during tokenization. Only has an effect when
  1750. `do_basic_tokenize=True`
  1751. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
  1752. Whether or not to tokenize Chinese characters.
  1753. This should likely be deactivated for Japanese (see this
  1754. [issue](https://github.com/huggingface/transformers/issues/328)).
  1755. strip_accents (`bool`, *optional*):
  1756. Whether or not to strip all accents. If this option is not specified, then it will be determined by the
  1757. value for `lowercase` (as in the original BERT).
  1758. do_split_on_punc (`bool`, *optional*, defaults to `True`):
  1759. In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
  1760. the full context of the words, such as contractions.
  1761. """
  1762. def __init__(
  1763. self,
  1764. do_lower_case=True,
  1765. never_split=None,
  1766. tokenize_chinese_chars=True,
  1767. strip_accents=None,
  1768. do_split_on_punc=True,
  1769. ):
  1770. if never_split is None:
  1771. never_split = []
  1772. self.do_lower_case = do_lower_case
  1773. self.never_split = set(never_split)
  1774. self.tokenize_chinese_chars = tokenize_chinese_chars
  1775. self.strip_accents = strip_accents
  1776. self.do_split_on_punc = do_split_on_punc
  1777. def tokenize(self, text, never_split=None):
  1778. """
  1779. Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
  1780. Args:
  1781. never_split (`List[str]`, *optional*)
  1782. Kept for backward compatibility purposes. Now implemented directly at the base class level (see
  1783. [`PreTrainedTokenizer.tokenize`]) List of token not to split.
  1784. """
  1785. # union() returns a new set by concatenating the two sets.
  1786. never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
  1787. text = self._clean_text(text)
  1788. # This was added on November 1st, 2018 for the multilingual and Chinese
  1789. # models. This is also applied to the English models now, but it doesn't
  1790. # matter since the English models were not trained on any Chinese data
  1791. # and generally don't have any Chinese data in them (there are Chinese
  1792. # characters in the vocabulary because Wikipedia does have some Chinese
  1793. # words in the English Wikipedia.).
  1794. if self.tokenize_chinese_chars:
  1795. text = self._tokenize_chinese_chars(text)
  1796. # prevents treating the same character with different unicode codepoints as different characters
  1797. unicode_normalized_text = unicodedata.normalize("NFC", text)
  1798. orig_tokens = whitespace_tokenize(unicode_normalized_text)
  1799. split_tokens = []
  1800. for token in orig_tokens:
  1801. if token not in never_split:
  1802. if self.do_lower_case:
  1803. token = token.lower()
  1804. if self.strip_accents is not False:
  1805. token = self._run_strip_accents(token)
  1806. elif self.strip_accents:
  1807. token = self._run_strip_accents(token)
  1808. split_tokens.extend(self._run_split_on_punc(token, never_split))
  1809. output_tokens = whitespace_tokenize(" ".join(split_tokens))
  1810. return output_tokens
  1811. def _run_strip_accents(self, text):
  1812. """Strips accents from a piece of text."""
  1813. text = unicodedata.normalize("NFD", text)
  1814. output = []
  1815. for char in text:
  1816. cat = unicodedata.category(char)
  1817. if cat == "Mn":
  1818. continue
  1819. output.append(char)
  1820. return "".join(output)
  1821. def _run_split_on_punc(self, text, never_split=None):
  1822. """Splits punctuation on a piece of text."""
  1823. if not self.do_split_on_punc or (never_split is not None and text in never_split):
  1824. return [text]
  1825. chars = list(text)
  1826. i = 0
  1827. start_new_word = True
  1828. output = []
  1829. while i < len(chars):
  1830. char = chars[i]
  1831. if _is_punctuation(char):
  1832. output.append([char])
  1833. start_new_word = True
  1834. else:
  1835. if start_new_word:
  1836. output.append([])
  1837. start_new_word = False
  1838. output[-1].append(char)
  1839. i += 1
  1840. return ["".join(x) for x in output]
  1841. def _tokenize_chinese_chars(self, text):
  1842. """Adds whitespace around any CJK character."""
  1843. output = []
  1844. for char in text:
  1845. cp = ord(char)
  1846. if self._is_chinese_char(cp):
  1847. output.append(" ")
  1848. output.append(char)
  1849. output.append(" ")
  1850. else:
  1851. output.append(char)
  1852. return "".join(output)
  1853. def _is_chinese_char(self, cp):
  1854. """Checks whether CP is the codepoint of a CJK character."""
  1855. # This defines a "chinese character" as anything in the CJK Unicode block:
  1856. # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
  1857. #
  1858. # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
  1859. # despite its name. The modern Korean Hangul alphabet is a different block,
  1860. # as is Japanese Hiragana and Katakana. Those alphabets are used to write
  1861. # space-separated words, so they are not treated specially and handled
  1862. # like the all of the other languages.
  1863. if (
  1864. (cp >= 0x4E00 and cp <= 0x9FFF)
  1865. or (cp >= 0x3400 and cp <= 0x4DBF) #
  1866. or (cp >= 0x20000 and cp <= 0x2A6DF) #
  1867. or (cp >= 0x2A700 and cp <= 0x2B73F) #
  1868. or (cp >= 0x2B740 and cp <= 0x2B81F) #
  1869. or (cp >= 0x2B820 and cp <= 0x2CEAF) #
  1870. or (cp >= 0xF900 and cp <= 0xFAFF)
  1871. or (cp >= 0x2F800 and cp <= 0x2FA1F) #
  1872. ): #
  1873. return True
  1874. return False
  1875. def _clean_text(self, text):
  1876. """Performs invalid character removal and whitespace cleanup on text."""
  1877. output = []
  1878. for char in text:
  1879. cp = ord(char)
  1880. if cp == 0 or cp == 0xFFFD or _is_control(char):
  1881. continue
  1882. if _is_whitespace(char):
  1883. output.append(" ")
  1884. else:
  1885. output.append(char)
  1886. return "".join(output)
  1887. # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
  1888. class WordpieceTokenizer:
  1889. """Runs WordPiece tokenization."""
  1890. def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
  1891. self.vocab = vocab
  1892. self.unk_token = unk_token
  1893. self.max_input_chars_per_word = max_input_chars_per_word
  1894. def tokenize(self, text):
  1895. """
  1896. Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
  1897. tokenization using the given vocabulary.
  1898. For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
  1899. Args:
  1900. text: A single token or whitespace separated tokens. This should have
  1901. already been passed through *BasicTokenizer*.
  1902. Returns:
  1903. A list of wordpiece tokens.
  1904. """
  1905. output_tokens = []
  1906. for token in whitespace_tokenize(text):
  1907. chars = list(token)
  1908. if len(chars) > self.max_input_chars_per_word:
  1909. output_tokens.append(self.unk_token)
  1910. continue
  1911. is_bad = False
  1912. start = 0
  1913. sub_tokens = []
  1914. while start < len(chars):
  1915. end = len(chars)
  1916. cur_substr = None
  1917. while start < end:
  1918. substr = "".join(chars[start:end])
  1919. if start > 0:
  1920. substr = "##" + substr
  1921. if substr in self.vocab:
  1922. cur_substr = substr
  1923. break
  1924. end -= 1
  1925. if cur_substr is None:
  1926. is_bad = True
  1927. break
  1928. sub_tokens.append(cur_substr)
  1929. start = end
  1930. if is_bad:
  1931. output_tokens.append(self.unk_token)
  1932. else:
  1933. output_tokens.extend(sub_tokens)
  1934. return output_tokens
  1935. # Below: utilities for TAPAS tokenizer (independent from PyTorch/Tensorflow).
  1936. # This includes functions to parse numeric values (dates and numbers) from both the table and questions in order
  1937. # to create the column_ranks, inv_column_ranks, numeric_values, numeric values_scale and numeric_relations in
  1938. # prepare_for_model of TapasTokenizer.
  1939. # These are meant to be used in an academic setup, for production use cases Gold mine or Aqua should be used.
  1940. # taken from constants.py of the original implementation
  1941. # URL: https://github.com/google-research/tapas/blob/master/tapas/utils/constants.py
  1942. class Relation(enum.Enum):
  1943. HEADER_TO_CELL = 1 # Connects header to cell.
  1944. CELL_TO_HEADER = 2 # Connects cell to header.
  1945. QUERY_TO_HEADER = 3 # Connects query to headers.
  1946. QUERY_TO_CELL = 4 # Connects query to cells.
  1947. ROW_TO_CELL = 5 # Connects row to cells.
  1948. CELL_TO_ROW = 6 # Connects cells to row.
  1949. EQ = 7 # Annotation value is same as cell value
  1950. LT = 8 # Annotation value is less than cell value
  1951. GT = 9 # Annotation value is greater than cell value
  1952. @dataclass
  1953. class Date:
  1954. year: Optional[int] = None
  1955. month: Optional[int] = None
  1956. day: Optional[int] = None
  1957. @dataclass
  1958. class NumericValue:
  1959. float_value: Optional[float] = None
  1960. date: Optional[Date] = None
  1961. @dataclass
  1962. class NumericValueSpan:
  1963. begin_index: int = None
  1964. end_index: int = None
  1965. values: List[NumericValue] = None
  1966. @dataclass
  1967. class Cell:
  1968. text: str
  1969. numeric_value: Optional[NumericValue] = None
  1970. @dataclass
  1971. class Question:
  1972. original_text: str # The original raw question string.
  1973. text: str # The question string after normalization.
  1974. numeric_spans: Optional[List[NumericValueSpan]] = None
  1975. # Below: all functions from number_utils.py as well as 2 functions (namely get_all_spans and normalize_for_match)
  1976. # from text_utils.py of the original implementation. URL's:
  1977. # - https://github.com/google-research/tapas/blob/master/tapas/utils/number_utils.py
  1978. # - https://github.com/google-research/tapas/blob/master/tapas/utils/text_utils.py
  1979. # Constants for parsing date expressions.
  1980. # Masks that specify (by a bool) which of (year, month, day) will be populated.
  1981. _DateMask = collections.namedtuple("_DateMask", ["year", "month", "day"])
  1982. _YEAR = _DateMask(True, False, False)
  1983. _YEAR_MONTH = _DateMask(True, True, False)
  1984. _YEAR_MONTH_DAY = _DateMask(True, True, True)
  1985. _MONTH = _DateMask(False, True, False)
  1986. _MONTH_DAY = _DateMask(False, True, True)
  1987. # Pairs of patterns to pass to 'datetime.strptime' and masks specifying which
  1988. # fields will be set by the corresponding pattern.
  1989. _DATE_PATTERNS = (
  1990. ("%B", _MONTH),
  1991. ("%Y", _YEAR),
  1992. ("%Ys", _YEAR),
  1993. ("%b %Y", _YEAR_MONTH),
  1994. ("%B %Y", _YEAR_MONTH),
  1995. ("%B %d", _MONTH_DAY),
  1996. ("%b %d", _MONTH_DAY),
  1997. ("%d %b", _MONTH_DAY),
  1998. ("%d %B", _MONTH_DAY),
  1999. ("%B %d, %Y", _YEAR_MONTH_DAY),
  2000. ("%d %B %Y", _YEAR_MONTH_DAY),
  2001. ("%m-%d-%Y", _YEAR_MONTH_DAY),
  2002. ("%Y-%m-%d", _YEAR_MONTH_DAY),
  2003. ("%Y-%m", _YEAR_MONTH),
  2004. ("%B %Y", _YEAR_MONTH),
  2005. ("%d %b %Y", _YEAR_MONTH_DAY),
  2006. ("%Y-%m-%d", _YEAR_MONTH_DAY),
  2007. ("%b %d, %Y", _YEAR_MONTH_DAY),
  2008. ("%d.%m.%Y", _YEAR_MONTH_DAY),
  2009. ("%A, %b %d", _MONTH_DAY),
  2010. ("%A, %B %d", _MONTH_DAY),
  2011. )
  2012. # This mapping is used to convert date patterns to regex patterns.
  2013. _FIELD_TO_REGEX = (
  2014. ("%A", r"\w+"), # Weekday as locale’s full name.
  2015. ("%B", r"\w+"), # Month as locale’s full name.
  2016. ("%Y", r"\d{4}"), # Year with century as a decimal number.
  2017. ("%b", r"\w{3}"), # Month as locale’s abbreviated name.
  2018. ("%d", r"\d{1,2}"), # Day of the month as a zero-padded decimal number.
  2019. ("%m", r"\d{1,2}"), # Month as a zero-padded decimal number.
  2020. )
  2021. def _process_date_pattern(dp):
  2022. """Compute a regex for each date pattern to use as a prefilter."""
  2023. pattern, mask = dp
  2024. regex = pattern
  2025. regex = regex.replace(".", re.escape("."))
  2026. regex = regex.replace("-", re.escape("-"))
  2027. regex = regex.replace(" ", r"\s+")
  2028. for field, field_regex in _FIELD_TO_REGEX:
  2029. regex = regex.replace(field, field_regex)
  2030. # Make sure we didn't miss any of the fields.
  2031. assert "%" not in regex, regex
  2032. return pattern, mask, re.compile("^" + regex + "$")
  2033. def _process_date_patterns():
  2034. return tuple(_process_date_pattern(dp) for dp in _DATE_PATTERNS)
  2035. _PROCESSED_DATE_PATTERNS = _process_date_patterns()
  2036. _MAX_DATE_NGRAM_SIZE = 5
  2037. # Following DynSp:
  2038. # https://github.com/Microsoft/DynSP/blob/master/util.py#L414.
  2039. _NUMBER_WORDS = [
  2040. "zero",
  2041. "one",
  2042. "two",
  2043. "three",
  2044. "four",
  2045. "five",
  2046. "six",
  2047. "seven",
  2048. "eight",
  2049. "nine",
  2050. "ten",
  2051. "eleven",
  2052. "twelve",
  2053. ]
  2054. _ORDINAL_WORDS = [
  2055. "zeroth",
  2056. "first",
  2057. "second",
  2058. "third",
  2059. "fourth",
  2060. "fith",
  2061. "sixth",
  2062. "seventh",
  2063. "eighth",
  2064. "ninth",
  2065. "tenth",
  2066. "eleventh",
  2067. "twelfth",
  2068. ]
  2069. _ORDINAL_SUFFIXES = ["st", "nd", "rd", "th"]
  2070. _NUMBER_PATTERN = re.compile(r"((^|\s)[+-])?((\.\d+)|(\d+(,\d\d\d)*(\.\d*)?))")
  2071. # Following DynSp:
  2072. # https://github.com/Microsoft/DynSP/blob/master/util.py#L293.
  2073. _MIN_YEAR = 1700
  2074. _MAX_YEAR = 2016
  2075. _INF = float("INF")
  2076. def _get_numeric_value_from_date(date, mask):
  2077. """Converts date (datetime Python object) to a NumericValue object with a Date object value."""
  2078. if date.year < _MIN_YEAR or date.year > _MAX_YEAR:
  2079. raise ValueError(f"Invalid year: {date.year}")
  2080. new_date = Date()
  2081. if mask.year:
  2082. new_date.year = date.year
  2083. if mask.month:
  2084. new_date.month = date.month
  2085. if mask.day:
  2086. new_date.day = date.day
  2087. return NumericValue(date=new_date)
  2088. def _get_span_length_key(span):
  2089. """Sorts span by decreasing length first and increasing first index second."""
  2090. return span[1] - span[0], -span[0]
  2091. def _get_numeric_value_from_float(value):
  2092. """Converts float (Python) to a NumericValue object with a float value."""
  2093. return NumericValue(float_value=value)
  2094. # Doesn't parse ordinal expressions such as '18th of february 1655'.
  2095. def _parse_date(text):
  2096. """Attempts to format a text as a standard date string (yyyy-mm-dd)."""
  2097. text = re.sub(r"Sept\b", "Sep", text)
  2098. for in_pattern, mask, regex in _PROCESSED_DATE_PATTERNS:
  2099. if not regex.match(text):
  2100. continue
  2101. try:
  2102. date = datetime.datetime.strptime(text, in_pattern).date()
  2103. except ValueError:
  2104. continue
  2105. try:
  2106. return _get_numeric_value_from_date(date, mask)
  2107. except ValueError:
  2108. continue
  2109. return None
  2110. def _parse_number(text):
  2111. """Parses simple cardinal and ordinals numbers."""
  2112. for suffix in _ORDINAL_SUFFIXES:
  2113. if text.endswith(suffix):
  2114. text = text[: -len(suffix)]
  2115. break
  2116. text = text.replace(",", "")
  2117. try:
  2118. value = float(text)
  2119. except ValueError:
  2120. return None
  2121. if math.isnan(value):
  2122. return None
  2123. if value == _INF:
  2124. return None
  2125. return value
  2126. def get_all_spans(text, max_ngram_length):
  2127. """
  2128. Split a text into all possible ngrams up to 'max_ngram_length'. Split points are white space and punctuation.
  2129. Args:
  2130. text: Text to split.
  2131. max_ngram_length: maximal ngram length.
  2132. Yields:
  2133. Spans, tuples of begin-end index.
  2134. """
  2135. start_indexes = []
  2136. for index, char in enumerate(text):
  2137. if not char.isalnum():
  2138. continue
  2139. if index == 0 or not text[index - 1].isalnum():
  2140. start_indexes.append(index)
  2141. if index + 1 == len(text) or not text[index + 1].isalnum():
  2142. for start_index in start_indexes[-max_ngram_length:]:
  2143. yield start_index, index + 1
  2144. def normalize_for_match(text):
  2145. return " ".join(text.lower().split())
  2146. def format_text(text):
  2147. """Lowercases and strips punctuation."""
  2148. text = text.lower().strip()
  2149. if text == "n/a" or text == "?" or text == "nan":
  2150. text = EMPTY_TEXT
  2151. text = re.sub(r"[^\w\d]+", " ", text).replace("_", " ")
  2152. text = " ".join(text.split())
  2153. text = text.strip()
  2154. if text:
  2155. return text
  2156. return EMPTY_TEXT
  2157. def parse_text(text):
  2158. """
  2159. Extracts longest number and date spans.
  2160. Args:
  2161. text: text to annotate
  2162. Returns:
  2163. List of longest numeric value spans.
  2164. """
  2165. span_dict = collections.defaultdict(list)
  2166. for match in _NUMBER_PATTERN.finditer(text):
  2167. span_text = text[match.start() : match.end()]
  2168. number = _parse_number(span_text)
  2169. if number is not None:
  2170. span_dict[match.span()].append(_get_numeric_value_from_float(number))
  2171. for begin_index, end_index in get_all_spans(text, max_ngram_length=1):
  2172. if (begin_index, end_index) in span_dict:
  2173. continue
  2174. span_text = text[begin_index:end_index]
  2175. number = _parse_number(span_text)
  2176. if number is not None:
  2177. span_dict[begin_index, end_index].append(_get_numeric_value_from_float(number))
  2178. for number, word in enumerate(_NUMBER_WORDS):
  2179. if span_text == word:
  2180. span_dict[begin_index, end_index].append(_get_numeric_value_from_float(float(number)))
  2181. break
  2182. for number, word in enumerate(_ORDINAL_WORDS):
  2183. if span_text == word:
  2184. span_dict[begin_index, end_index].append(_get_numeric_value_from_float(float(number)))
  2185. break
  2186. for begin_index, end_index in get_all_spans(text, max_ngram_length=_MAX_DATE_NGRAM_SIZE):
  2187. span_text = text[begin_index:end_index]
  2188. date = _parse_date(span_text)
  2189. if date is not None:
  2190. span_dict[begin_index, end_index].append(date)
  2191. spans = sorted(span_dict.items(), key=lambda span_value: _get_span_length_key(span_value[0]), reverse=True)
  2192. selected_spans = []
  2193. for span, value in spans:
  2194. for selected_span, _ in selected_spans:
  2195. if selected_span[0] <= span[0] and span[1] <= selected_span[1]:
  2196. break
  2197. else:
  2198. selected_spans.append((span, value))
  2199. selected_spans.sort(key=lambda span_value: span_value[0][0])
  2200. numeric_value_spans = []
  2201. for span, values in selected_spans:
  2202. numeric_value_spans.append(NumericValueSpan(begin_index=span[0], end_index=span[1], values=values))
  2203. return numeric_value_spans
  2204. # Below: all functions from number_annotation_utils.py and 2 functions (namely filter_invalid_unicode
  2205. # and filter_invalid_unicode_from_table) from text_utils.py of the original implementation. URL's:
  2206. # - https://github.com/google-research/tapas/blob/master/tapas/utils/number_annotation_utils.py
  2207. # - https://github.com/google-research/tapas/blob/master/tapas/utils/text_utils.py
  2208. _PrimitiveNumericValue = Union[float, Tuple[Optional[float], Optional[float], Optional[float]]]
  2209. _SortKeyFn = Callable[[NumericValue], Tuple[float, Ellipsis]]
  2210. _DATE_TUPLE_SIZE = 3
  2211. EMPTY_TEXT = "EMPTY"
  2212. NUMBER_TYPE = "number"
  2213. DATE_TYPE = "date"
  2214. def _get_value_type(numeric_value):
  2215. if numeric_value.float_value is not None:
  2216. return NUMBER_TYPE
  2217. elif numeric_value.date is not None:
  2218. return DATE_TYPE
  2219. raise ValueError(f"Unknown type: {numeric_value}")
  2220. def _get_value_as_primitive_value(numeric_value):
  2221. """Maps a NumericValue proto to a float or tuple of float."""
  2222. if numeric_value.float_value is not None:
  2223. return numeric_value.float_value
  2224. if numeric_value.date is not None:
  2225. date = numeric_value.date
  2226. value_tuple = [None, None, None]
  2227. # All dates fields are cased to float to produce a simple primitive value.
  2228. if date.year is not None:
  2229. value_tuple[0] = float(date.year)
  2230. if date.month is not None:
  2231. value_tuple[1] = float(date.month)
  2232. if date.day is not None:
  2233. value_tuple[2] = float(date.day)
  2234. return tuple(value_tuple)
  2235. raise ValueError(f"Unknown type: {numeric_value}")
  2236. def _get_all_types(numeric_values):
  2237. return {_get_value_type(value) for value in numeric_values}
  2238. def get_numeric_sort_key_fn(numeric_values):
  2239. """
  2240. Creates a function that can be used as a sort key or to compare the values. Maps to primitive types and finds the
  2241. biggest common subset. Consider the values "05/05/2010" and "August 2007". With the corresponding primitive values
  2242. (2010.,5.,5.) and (2007.,8., None). These values can be compared by year and date so we map to the sequence (2010.,
  2243. 5.), (2007., 8.). If we added a third value "2006" with primitive value (2006., None, None), we could only compare
  2244. by the year so we would map to (2010.,), (2007.,) and (2006.,).
  2245. Args:
  2246. numeric_values: Values to compare
  2247. Returns:
  2248. A function that can be used as a sort key function (mapping numeric values to a comparable tuple)
  2249. Raises:
  2250. ValueError if values don't have a common type or are not comparable.
  2251. """
  2252. value_types = _get_all_types(numeric_values)
  2253. if len(value_types) != 1:
  2254. raise ValueError(f"No common value type in {numeric_values}")
  2255. value_type = next(iter(value_types))
  2256. if value_type == NUMBER_TYPE:
  2257. # Primitive values are simple floats, nothing to do here.
  2258. return _get_value_as_primitive_value
  2259. # The type can only be Date at this point which means the primitive type
  2260. # is a float triple.
  2261. valid_indexes = set(range(_DATE_TUPLE_SIZE))
  2262. for numeric_value in numeric_values:
  2263. value = _get_value_as_primitive_value(numeric_value)
  2264. assert isinstance(value, tuple)
  2265. for tuple_index, inner_value in enumerate(value):
  2266. if inner_value is None:
  2267. valid_indexes.discard(tuple_index)
  2268. if not valid_indexes:
  2269. raise ValueError(f"No common value in {numeric_values}")
  2270. def _sort_key_fn(numeric_value):
  2271. value = _get_value_as_primitive_value(numeric_value)
  2272. return tuple(value[index] for index in valid_indexes)
  2273. return _sort_key_fn
  2274. def _consolidate_numeric_values(row_index_to_values, min_consolidation_fraction, debug_info):
  2275. """
  2276. Finds the most common numeric values in a column and returns them
  2277. Args:
  2278. row_index_to_values:
  2279. For each row index all the values in that cell.
  2280. min_consolidation_fraction:
  2281. Fraction of cells that need to have consolidated value.
  2282. debug_info:
  2283. Additional information only used for logging
  2284. Returns:
  2285. For each row index the first value that matches the most common value. Rows that don't have a matching value
  2286. are dropped. Empty list if values can't be consolidated.
  2287. """
  2288. type_counts = collections.Counter()
  2289. for numeric_values in row_index_to_values.values():
  2290. type_counts.update(_get_all_types(numeric_values))
  2291. if not type_counts:
  2292. return {}
  2293. max_count = max(type_counts.values())
  2294. if max_count < len(row_index_to_values) * min_consolidation_fraction:
  2295. # logging.log_every_n(logging.INFO, f'Can\'t consolidate types: {debug_info} {row_index_to_values} {max_count}', 100)
  2296. return {}
  2297. valid_types = set()
  2298. for value_type, count in type_counts.items():
  2299. if count == max_count:
  2300. valid_types.add(value_type)
  2301. if len(valid_types) > 1:
  2302. assert DATE_TYPE in valid_types
  2303. max_type = DATE_TYPE
  2304. else:
  2305. max_type = next(iter(valid_types))
  2306. new_row_index_to_value = {}
  2307. for index, values in row_index_to_values.items():
  2308. # Extract the first matching value.
  2309. for value in values:
  2310. if _get_value_type(value) == max_type:
  2311. new_row_index_to_value[index] = value
  2312. break
  2313. return new_row_index_to_value
  2314. def _get_numeric_values(text):
  2315. """Parses text and returns numeric values."""
  2316. numeric_spans = parse_text(text)
  2317. return itertools.chain(*(span.values for span in numeric_spans))
  2318. def _get_column_values(table, col_index):
  2319. """
  2320. Parses text in column and returns a dict mapping row_index to values. This is the _get_column_values function from
  2321. number_annotation_utils.py of the original implementation
  2322. Args:
  2323. table: Pandas dataframe
  2324. col_index: integer, indicating the index of the column to get the numeric values of
  2325. """
  2326. index_to_values = {}
  2327. for row_index, row in table.iterrows():
  2328. text = normalize_for_match(row[col_index].text)
  2329. index_to_values[row_index] = list(_get_numeric_values(text))
  2330. return index_to_values
  2331. def get_numeric_relation(value, other_value, sort_key_fn):
  2332. """Compares two values and returns their relation or None."""
  2333. value = sort_key_fn(value)
  2334. other_value = sort_key_fn(other_value)
  2335. if value == other_value:
  2336. return Relation.EQ
  2337. if value < other_value:
  2338. return Relation.LT
  2339. if value > other_value:
  2340. return Relation.GT
  2341. return None
  2342. def add_numeric_values_to_question(question):
  2343. """Adds numeric value spans to a question."""
  2344. original_text = question
  2345. question = normalize_for_match(question)
  2346. numeric_spans = parse_text(question)
  2347. return Question(original_text=original_text, text=question, numeric_spans=numeric_spans)
  2348. def filter_invalid_unicode(text):
  2349. """Return an empty string and True if 'text' is in invalid unicode."""
  2350. return ("", True) if isinstance(text, bytes) else (text, False)
  2351. def filter_invalid_unicode_from_table(table):
  2352. """
  2353. Removes invalid unicode from table. Checks whether a table cell text contains an invalid unicode encoding. If yes,
  2354. reset the table cell text to an empty str and log a warning for each invalid cell
  2355. Args:
  2356. table: table to clean.
  2357. """
  2358. # to do: add table id support
  2359. if not hasattr(table, "table_id"):
  2360. table.table_id = 0
  2361. for row_index, row in table.iterrows():
  2362. for col_index, cell in enumerate(row):
  2363. cell, is_invalid = filter_invalid_unicode(cell)
  2364. if is_invalid:
  2365. logging.warning(
  2366. f"Scrub an invalid table body @ table_id: {table.table_id}, row_index: {row_index}, "
  2367. f"col_index: {col_index}",
  2368. )
  2369. for col_index, column in enumerate(table.columns):
  2370. column, is_invalid = filter_invalid_unicode(column)
  2371. if is_invalid:
  2372. logging.warning(f"Scrub an invalid table header @ table_id: {table.table_id}, col_index: {col_index}")
  2373. def add_numeric_table_values(table, min_consolidation_fraction=0.7, debug_info=None):
  2374. """
  2375. Parses text in table column-wise and adds the consolidated values. Consolidation refers to finding values with a
  2376. common types (date or number)
  2377. Args:
  2378. table:
  2379. Table to annotate.
  2380. min_consolidation_fraction:
  2381. Fraction of cells in a column that need to have consolidated value.
  2382. debug_info:
  2383. Additional information used for logging.
  2384. """
  2385. table = table.copy()
  2386. # First, filter table on invalid unicode
  2387. filter_invalid_unicode_from_table(table)
  2388. # Second, replace cell values by Cell objects
  2389. for row_index, row in table.iterrows():
  2390. for col_index, cell in enumerate(row):
  2391. table.iloc[row_index, col_index] = Cell(text=cell)
  2392. # Third, add numeric_value attributes to these Cell objects
  2393. for col_index, column in enumerate(table.columns):
  2394. column_values = _consolidate_numeric_values(
  2395. _get_column_values(table, col_index),
  2396. min_consolidation_fraction=min_consolidation_fraction,
  2397. debug_info=(debug_info, column),
  2398. )
  2399. for row_index, numeric_value in column_values.items():
  2400. table.iloc[row_index, col_index].numeric_value = numeric_value
  2401. return table