tokenization_layoutxlm.py 57 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186
  1. # coding=utf-8
  2. # Copyright 2021 The HuggingFace Inc. team.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License
  15. """Tokenization classes for LayoutXLM model."""
  16. import os
  17. from shutil import copyfile
  18. from typing import Any, Dict, List, Optional, Tuple, Union
  19. import sentencepiece as spm
  20. from ...tokenization_utils import AddedToken, PreTrainedTokenizer
  21. from ...tokenization_utils_base import (
  22. BatchEncoding,
  23. EncodedInput,
  24. PreTokenizedInput,
  25. TextInput,
  26. TextInputPair,
  27. TruncationStrategy,
  28. )
  29. from ...utils import PaddingStrategy, TensorType, add_end_docstrings, logging
  30. from ..xlm_roberta.tokenization_xlm_roberta import (
  31. SPIECE_UNDERLINE,
  32. VOCAB_FILES_NAMES,
  33. )
  34. logger = logging.get_logger(__name__)
  35. LAYOUTXLM_ENCODE_KWARGS_DOCSTRING = r"""
  36. add_special_tokens (`bool`, *optional*, defaults to `True`):
  37. Whether or not to encode the sequences with the special tokens relative to their model.
  38. padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
  39. Activates and controls padding. Accepts the following values:
  40. - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
  41. sequence if provided).
  42. - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
  43. acceptable input length for the model if that argument is not provided.
  44. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
  45. lengths).
  46. truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
  47. Activates and controls truncation. Accepts the following values:
  48. - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
  49. to the maximum acceptable input length for the model if that argument is not provided. This will
  50. truncate token by token, removing a token from the longest sequence in the pair if a pair of
  51. sequences (or a batch of pairs) is provided.
  52. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
  53. maximum acceptable input length for the model if that argument is not provided. This will only
  54. truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
  55. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
  56. maximum acceptable input length for the model if that argument is not provided. This will only
  57. truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
  58. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
  59. greater than the model maximum admissible input size).
  60. max_length (`int`, *optional*):
  61. Controls the maximum length to use by one of the truncation/padding parameters.
  62. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
  63. is required by one of the truncation/padding parameters. If the model has no specific maximum input
  64. length (like XLNet) truncation/padding to a maximum length will be deactivated.
  65. stride (`int`, *optional*, defaults to 0):
  66. If set to a number along with `max_length`, the overflowing tokens returned when
  67. `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
  68. returned to provide some overlap between truncated and overflowing sequences. The value of this
  69. argument defines the number of overlapping tokens.
  70. pad_to_multiple_of (`int`, *optional*):
  71. If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
  72. the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
  73. return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
  74. If set, will return tensors instead of list of python integers. Acceptable values are:
  75. - `'tf'`: Return TensorFlow `tf.constant` objects.
  76. - `'pt'`: Return PyTorch `torch.Tensor` objects.
  77. - `'np'`: Return Numpy `np.ndarray` objects.
  78. return_token_type_ids (`bool`, *optional*):
  79. Whether to return token type IDs. If left to the default, will return the token type IDs according to
  80. the specific tokenizer's default, defined by the `return_outputs` attribute.
  81. [What are token type IDs?](../glossary#token-type-ids)
  82. return_attention_mask (`bool`, *optional*):
  83. Whether to return the attention mask. If left to the default, will return the attention mask according
  84. to the specific tokenizer's default, defined by the `return_outputs` attribute.
  85. [What are attention masks?](../glossary#attention-mask)
  86. return_overflowing_tokens (`bool`, *optional*, defaults to `False`):
  87. Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch
  88. of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead
  89. of returning overflowing tokens.
  90. return_special_tokens_mask (`bool`, *optional*, defaults to `False`):
  91. Whether or not to return special tokens mask information.
  92. return_offsets_mapping (`bool`, *optional*, defaults to `False`):
  93. Whether or not to return `(char_start, char_end)` for each token.
  94. This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using
  95. Python's tokenizer, this method will raise `NotImplementedError`.
  96. return_length (`bool`, *optional*, defaults to `False`):
  97. Whether or not to return the lengths of the encoded inputs.
  98. verbose (`bool`, *optional*, defaults to `True`):
  99. Whether or not to print more information and warnings.
  100. **kwargs: passed to the `self.tokenize()` method
  101. Return:
  102. [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
  103. - **input_ids** -- List of token ids to be fed to a model.
  104. [What are input IDs?](../glossary#input-ids)
  105. - **bbox** -- List of bounding boxes to be fed to a model.
  106. - **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or
  107. if *"token_type_ids"* is in `self.model_input_names`).
  108. [What are token type IDs?](../glossary#token-type-ids)
  109. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
  110. `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`).
  111. [What are attention masks?](../glossary#attention-mask)
  112. - **labels** -- List of labels to be fed to a model. (when `word_labels` is specified).
  113. - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and
  114. `return_overflowing_tokens=True`).
  115. - **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and
  116. `return_overflowing_tokens=True`).
  117. - **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying
  118. regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`).
  119. - **length** -- The length of the inputs (when `return_length=True`).
  120. """
  121. class LayoutXLMTokenizer(PreTrainedTokenizer):
  122. """
  123. Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
  124. [SentencePiece](https://github.com/google/sentencepiece).
  125. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
  126. this superclass for more information regarding those methods.
  127. Args:
  128. vocab_file (`str`):
  129. Path to the vocabulary file.
  130. bos_token (`str`, *optional*, defaults to `"<s>"`):
  131. The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
  132. <Tip>
  133. When building a sequence using special tokens, this is not the token that is used for the beginning of
  134. sequence. The token used is the `cls_token`.
  135. </Tip>
  136. eos_token (`str`, *optional*, defaults to `"</s>"`):
  137. The end of sequence token.
  138. <Tip>
  139. When building a sequence using special tokens, this is not the token that is used for the end of sequence.
  140. The token used is the `sep_token`.
  141. </Tip>
  142. sep_token (`str`, *optional*, defaults to `"</s>"`):
  143. The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
  144. sequence classification or for a text and a question for question answering. It is also used as the last
  145. token of a sequence built with special tokens.
  146. cls_token (`str`, *optional*, defaults to `"<s>"`):
  147. The classifier token which is used when doing sequence classification (classification of the whole sequence
  148. instead of per-token classification). It is the first token of the sequence when built with special tokens.
  149. unk_token (`str`, *optional*, defaults to `"<unk>"`):
  150. The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
  151. token instead.
  152. pad_token (`str`, *optional*, defaults to `"<pad>"`):
  153. The token used for padding, for example when batching sequences of different lengths.
  154. mask_token (`str`, *optional*, defaults to `"<mask>"`):
  155. The token used for masking values. This is the token used when training this model with masked language
  156. modeling. This is the token which the model will try to predict.
  157. cls_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
  158. The bounding box to use for the special [CLS] token.
  159. sep_token_box (`List[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`):
  160. The bounding box to use for the special [SEP] token.
  161. pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
  162. The bounding box to use for the special [PAD] token.
  163. pad_token_label (`int`, *optional*, defaults to -100):
  164. The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
  165. CrossEntropyLoss.
  166. only_label_first_subword (`bool`, *optional*, defaults to `True`):
  167. Whether or not to only label the first subword, in case word labels are provided.
  168. sp_model_kwargs (`dict`, *optional*):
  169. Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
  170. SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
  171. to set:
  172. - `enable_sampling`: Enable subword regularization.
  173. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
  174. - `nbest_size = {0,1}`: No sampling is performed.
  175. - `nbest_size > 1`: samples from the nbest_size results.
  176. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
  177. using forward-filtering-and-backward-sampling algorithm.
  178. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
  179. BPE-dropout.
  180. Attributes:
  181. sp_model (`SentencePieceProcessor`):
  182. The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
  183. """
  184. vocab_files_names = VOCAB_FILES_NAMES
  185. model_input_names = ["input_ids", "attention_mask"]
  186. def __init__(
  187. self,
  188. vocab_file,
  189. bos_token="<s>",
  190. eos_token="</s>",
  191. sep_token="</s>",
  192. cls_token="<s>",
  193. unk_token="<unk>",
  194. pad_token="<pad>",
  195. mask_token="<mask>",
  196. cls_token_box=[0, 0, 0, 0],
  197. sep_token_box=[1000, 1000, 1000, 1000],
  198. pad_token_box=[0, 0, 0, 0],
  199. pad_token_label=-100,
  200. only_label_first_subword=True,
  201. sp_model_kwargs: Optional[Dict[str, Any]] = None,
  202. **kwargs,
  203. ) -> None:
  204. # Mask token behave like a normal word, i.e. include the space before it
  205. mask_token = AddedToken(mask_token, lstrip=True, special=True) if isinstance(mask_token, str) else mask_token
  206. self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
  207. self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
  208. self.sp_model.Load(str(vocab_file))
  209. self.vocab_file = vocab_file
  210. # Original fairseq vocab and spm vocab must be "aligned":
  211. # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
  212. # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
  213. # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
  214. # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
  215. # Mimic fairseq token-to-id alignment for the first 4 token
  216. self.fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
  217. # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
  218. self.fairseq_offset = 1
  219. self.fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + self.fairseq_offset
  220. self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
  221. # additional properties
  222. self.cls_token_box = cls_token_box
  223. self.sep_token_box = sep_token_box
  224. self.pad_token_box = pad_token_box
  225. self.pad_token_label = pad_token_label
  226. self.only_label_first_subword = only_label_first_subword
  227. super().__init__(
  228. bos_token=bos_token,
  229. eos_token=eos_token,
  230. unk_token=unk_token,
  231. sep_token=sep_token,
  232. cls_token=cls_token,
  233. pad_token=pad_token,
  234. mask_token=mask_token,
  235. cls_token_box=cls_token_box,
  236. sep_token_box=sep_token_box,
  237. pad_token_box=pad_token_box,
  238. pad_token_label=pad_token_label,
  239. only_label_first_subword=only_label_first_subword,
  240. sp_model_kwargs=self.sp_model_kwargs,
  241. **kwargs,
  242. )
  243. def __getstate__(self):
  244. state = self.__dict__.copy()
  245. state["sp_model"] = None
  246. state["sp_model_proto"] = self.sp_model.serialized_model_proto()
  247. return state
  248. def __setstate__(self, d):
  249. self.__dict__ = d
  250. # for backward compatibility
  251. if not hasattr(self, "sp_model_kwargs"):
  252. self.sp_model_kwargs = {}
  253. self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
  254. self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
  255. def build_inputs_with_special_tokens(
  256. self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
  257. ) -> List[int]:
  258. """
  259. Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
  260. adding special tokens. An XLM-RoBERTa sequence has the following format:
  261. - single sequence: `<s> X </s>`
  262. - pair of sequences: `<s> A </s></s> B </s>`
  263. Args:
  264. token_ids_0 (`List[int]`):
  265. List of IDs to which the special tokens will be added.
  266. token_ids_1 (`List[int]`, *optional*):
  267. Optional second list of IDs for sequence pairs.
  268. Returns:
  269. `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
  270. """
  271. if token_ids_1 is None:
  272. return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
  273. cls = [self.cls_token_id]
  274. sep = [self.sep_token_id]
  275. return cls + token_ids_0 + sep + sep + token_ids_1 + sep
  276. def get_special_tokens_mask(
  277. self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
  278. ) -> List[int]:
  279. """
  280. Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
  281. special tokens using the tokenizer `prepare_for_model` method.
  282. Args:
  283. token_ids_0 (`List[int]`):
  284. List of IDs.
  285. token_ids_1 (`List[int]`, *optional*):
  286. Optional second list of IDs for sequence pairs.
  287. already_has_special_tokens (`bool`, *optional*, defaults to `False`):
  288. Whether or not the token list is already formatted with special tokens for the model.
  289. Returns:
  290. `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
  291. """
  292. if already_has_special_tokens:
  293. return super().get_special_tokens_mask(
  294. token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
  295. )
  296. if token_ids_1 is None:
  297. return [1] + ([0] * len(token_ids_0)) + [1]
  298. return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
  299. def create_token_type_ids_from_sequences(
  300. self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
  301. ) -> List[int]:
  302. """
  303. Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does
  304. not make use of token type ids, therefore a list of zeros is returned.
  305. Args:
  306. token_ids_0 (`List[int]`):
  307. List of IDs.
  308. token_ids_1 (`List[int]`, *optional*):
  309. Optional second list of IDs for sequence pairs.
  310. Returns:
  311. `List[int]`: List of zeros.
  312. """
  313. sep = [self.sep_token_id]
  314. cls = [self.cls_token_id]
  315. if token_ids_1 is None:
  316. return len(cls + token_ids_0 + sep) * [0]
  317. return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
  318. @property
  319. def vocab_size(self):
  320. return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token
  321. def get_vocab(self):
  322. vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
  323. vocab.update(self.added_tokens_encoder)
  324. return vocab
  325. def _tokenize(self, text: str) -> List[str]:
  326. return self.sp_model.encode(text, out_type=str)
  327. def _convert_token_to_id(self, token):
  328. """Converts a token (str) in an id using the vocab."""
  329. if token in self.fairseq_tokens_to_ids:
  330. return self.fairseq_tokens_to_ids[token]
  331. spm_id = self.sp_model.PieceToId(token)
  332. # Need to return unknown token if the SP model returned 0
  333. return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
  334. def _convert_id_to_token(self, index):
  335. """Converts an index (integer) in a token (str) using the vocab."""
  336. if index in self.fairseq_ids_to_tokens:
  337. return self.fairseq_ids_to_tokens[index]
  338. return self.sp_model.IdToPiece(index - self.fairseq_offset)
  339. def convert_tokens_to_string(self, tokens):
  340. """Converts a sequence of tokens (strings for sub-words) in a single string."""
  341. out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
  342. return out_string
  343. def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
  344. if not os.path.isdir(save_directory):
  345. logger.error(f"Vocabulary path ({save_directory}) should be a directory")
  346. return
  347. out_vocab_file = os.path.join(
  348. save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
  349. )
  350. if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
  351. copyfile(self.vocab_file, out_vocab_file)
  352. elif not os.path.isfile(self.vocab_file):
  353. with open(out_vocab_file, "wb") as fi:
  354. content_spiece_model = self.sp_model.serialized_model_proto()
  355. fi.write(content_spiece_model)
  356. return (out_vocab_file,)
  357. @add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING)
  358. def __call__(
  359. self,
  360. text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
  361. text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
  362. boxes: Union[List[List[int]], List[List[List[int]]]] = None,
  363. word_labels: Optional[Union[List[int], List[List[int]]]] = None,
  364. add_special_tokens: bool = True,
  365. padding: Union[bool, str, PaddingStrategy] = False,
  366. truncation: Union[bool, str, TruncationStrategy] = None,
  367. max_length: Optional[int] = None,
  368. stride: int = 0,
  369. pad_to_multiple_of: Optional[int] = None,
  370. padding_side: Optional[bool] = None,
  371. return_tensors: Optional[Union[str, TensorType]] = None,
  372. return_token_type_ids: Optional[bool] = None,
  373. return_attention_mask: Optional[bool] = None,
  374. return_overflowing_tokens: bool = False,
  375. return_special_tokens_mask: bool = False,
  376. return_offsets_mapping: bool = False,
  377. return_length: bool = False,
  378. verbose: bool = True,
  379. **kwargs,
  380. ) -> BatchEncoding:
  381. """
  382. Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
  383. sequences with word-level normalized bounding boxes and optional labels.
  384. Args:
  385. text (`str`, `List[str]`, `List[List[str]]`):
  386. The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
  387. (words of a single example or questions of a batch of examples) or a list of list of strings (batch of
  388. words).
  389. text_pair (`List[str]`, `List[List[str]]`):
  390. The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
  391. (pretokenized string).
  392. boxes (`List[List[int]]`, `List[List[List[int]]]`):
  393. Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
  394. word_labels (`List[int]`, `List[List[int]]`, *optional*):
  395. Word-level integer labels (for token classification tasks such as FUNSD, CORD).
  396. """
  397. # Input type checking for clearer error
  398. def _is_valid_text_input(t):
  399. if isinstance(t, str):
  400. # Strings are fine
  401. return True
  402. elif isinstance(t, (list, tuple)):
  403. # List are fine as long as they are...
  404. if len(t) == 0:
  405. # ... empty
  406. return True
  407. elif isinstance(t[0], str):
  408. # ... list of strings
  409. return True
  410. elif isinstance(t[0], (list, tuple)):
  411. # ... list with an empty list or with a list of strings
  412. return len(t[0]) == 0 or isinstance(t[0][0], str)
  413. else:
  414. return False
  415. else:
  416. return False
  417. if text_pair is not None:
  418. # in case text + text_pair are provided, text = questions, text_pair = words
  419. if not _is_valid_text_input(text):
  420. raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ")
  421. if not isinstance(text_pair, (list, tuple)):
  422. raise ValueError(
  423. "words must of type `List[str]` (single pretokenized example), "
  424. "or `List[List[str]]` (batch of pretokenized examples)."
  425. )
  426. else:
  427. # in case only text is provided => must be words
  428. if not isinstance(text, (list, tuple)):
  429. raise ValueError(
  430. "Words must of type `List[str]` (single pretokenized example), "
  431. "or `List[List[str]]` (batch of pretokenized examples)."
  432. )
  433. if text_pair is not None:
  434. is_batched = isinstance(text, (list, tuple))
  435. else:
  436. is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
  437. words = text if text_pair is None else text_pair
  438. if boxes is None:
  439. raise ValueError("You must provide corresponding bounding boxes")
  440. if is_batched:
  441. if len(words) != len(boxes):
  442. raise ValueError("You must provide words and boxes for an equal amount of examples")
  443. for words_example, boxes_example in zip(words, boxes):
  444. if len(words_example) != len(boxes_example):
  445. raise ValueError("You must provide as many words as there are bounding boxes")
  446. else:
  447. if len(words) != len(boxes):
  448. raise ValueError("You must provide as many words as there are bounding boxes")
  449. if is_batched:
  450. if text_pair is not None and len(text) != len(text_pair):
  451. raise ValueError(
  452. f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:"
  453. f" {len(text_pair)}."
  454. )
  455. batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
  456. is_pair = bool(text_pair is not None)
  457. return self.batch_encode_plus(
  458. batch_text_or_text_pairs=batch_text_or_text_pairs,
  459. is_pair=is_pair,
  460. boxes=boxes,
  461. word_labels=word_labels,
  462. add_special_tokens=add_special_tokens,
  463. padding=padding,
  464. truncation=truncation,
  465. max_length=max_length,
  466. stride=stride,
  467. pad_to_multiple_of=pad_to_multiple_of,
  468. padding_side=padding_side,
  469. return_tensors=return_tensors,
  470. return_token_type_ids=return_token_type_ids,
  471. return_attention_mask=return_attention_mask,
  472. return_overflowing_tokens=return_overflowing_tokens,
  473. return_special_tokens_mask=return_special_tokens_mask,
  474. return_offsets_mapping=return_offsets_mapping,
  475. return_length=return_length,
  476. verbose=verbose,
  477. **kwargs,
  478. )
  479. else:
  480. return self.encode_plus(
  481. text=text,
  482. text_pair=text_pair,
  483. boxes=boxes,
  484. word_labels=word_labels,
  485. add_special_tokens=add_special_tokens,
  486. padding=padding,
  487. truncation=truncation,
  488. max_length=max_length,
  489. stride=stride,
  490. pad_to_multiple_of=pad_to_multiple_of,
  491. padding_side=padding_side,
  492. return_tensors=return_tensors,
  493. return_token_type_ids=return_token_type_ids,
  494. return_attention_mask=return_attention_mask,
  495. return_overflowing_tokens=return_overflowing_tokens,
  496. return_special_tokens_mask=return_special_tokens_mask,
  497. return_offsets_mapping=return_offsets_mapping,
  498. return_length=return_length,
  499. verbose=verbose,
  500. **kwargs,
  501. )
  502. def _batch_encode_plus(
  503. self,
  504. batch_text_or_text_pairs: Union[
  505. List[TextInput],
  506. List[TextInputPair],
  507. List[PreTokenizedInput],
  508. ],
  509. is_pair: bool = None,
  510. boxes: Optional[List[List[List[int]]]] = None,
  511. word_labels: Optional[List[List[int]]] = None,
  512. add_special_tokens: bool = True,
  513. padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
  514. truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
  515. max_length: Optional[int] = None,
  516. stride: int = 0,
  517. pad_to_multiple_of: Optional[int] = None,
  518. padding_side: Optional[bool] = None,
  519. return_tensors: Optional[Union[str, TensorType]] = None,
  520. return_token_type_ids: Optional[bool] = None,
  521. return_attention_mask: Optional[bool] = None,
  522. return_overflowing_tokens: bool = False,
  523. return_special_tokens_mask: bool = False,
  524. return_offsets_mapping: bool = False,
  525. return_length: bool = False,
  526. verbose: bool = True,
  527. **kwargs,
  528. ) -> BatchEncoding:
  529. if return_offsets_mapping:
  530. raise NotImplementedError(
  531. "return_offset_mapping is not available when using Python tokenizers. "
  532. "To use this feature, change your tokenizer to one deriving from "
  533. "transformers.PreTrainedTokenizerFast."
  534. )
  535. batch_outputs = self._batch_prepare_for_model(
  536. batch_text_or_text_pairs=batch_text_or_text_pairs,
  537. is_pair=is_pair,
  538. boxes=boxes,
  539. word_labels=word_labels,
  540. add_special_tokens=add_special_tokens,
  541. padding_strategy=padding_strategy,
  542. truncation_strategy=truncation_strategy,
  543. max_length=max_length,
  544. stride=stride,
  545. pad_to_multiple_of=pad_to_multiple_of,
  546. padding_side=padding_side,
  547. return_attention_mask=return_attention_mask,
  548. return_token_type_ids=return_token_type_ids,
  549. return_overflowing_tokens=return_overflowing_tokens,
  550. return_special_tokens_mask=return_special_tokens_mask,
  551. return_length=return_length,
  552. return_tensors=return_tensors,
  553. verbose=verbose,
  554. )
  555. return BatchEncoding(batch_outputs)
  556. @add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING)
  557. def _batch_prepare_for_model(
  558. self,
  559. batch_text_or_text_pairs,
  560. is_pair: bool = None,
  561. boxes: Optional[List[List[int]]] = None,
  562. word_labels: Optional[List[List[int]]] = None,
  563. add_special_tokens: bool = True,
  564. padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
  565. truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
  566. max_length: Optional[int] = None,
  567. stride: int = 0,
  568. pad_to_multiple_of: Optional[int] = None,
  569. padding_side: Optional[bool] = None,
  570. return_tensors: Optional[str] = None,
  571. return_token_type_ids: Optional[bool] = None,
  572. return_attention_mask: Optional[bool] = None,
  573. return_overflowing_tokens: bool = False,
  574. return_special_tokens_mask: bool = False,
  575. return_length: bool = False,
  576. verbose: bool = True,
  577. ) -> BatchEncoding:
  578. """
  579. Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
  580. adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
  581. manages a moving window (with user defined stride) for overflowing tokens
  582. Args:
  583. batch_ids_pairs: list of tokenized input ids or input ids pairs
  584. """
  585. batch_outputs = {}
  586. for idx, example in enumerate(zip(batch_text_or_text_pairs, boxes)):
  587. batch_text_or_text_pair, boxes_example = example
  588. outputs = self.prepare_for_model(
  589. batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair,
  590. batch_text_or_text_pair[1] if is_pair else None,
  591. boxes_example,
  592. word_labels=word_labels[idx] if word_labels is not None else None,
  593. add_special_tokens=add_special_tokens,
  594. padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward
  595. truncation=truncation_strategy.value,
  596. max_length=max_length,
  597. stride=stride,
  598. pad_to_multiple_of=None, # we pad in batch afterward
  599. padding_side=None, # we pad in batch afterward
  600. return_attention_mask=False, # we pad in batch afterward
  601. return_token_type_ids=return_token_type_ids,
  602. return_overflowing_tokens=return_overflowing_tokens,
  603. return_special_tokens_mask=return_special_tokens_mask,
  604. return_length=return_length,
  605. return_tensors=None, # We convert the whole batch to tensors at the end
  606. prepend_batch_axis=False,
  607. verbose=verbose,
  608. )
  609. for key, value in outputs.items():
  610. if key not in batch_outputs:
  611. batch_outputs[key] = []
  612. batch_outputs[key].append(value)
  613. batch_outputs = self.pad(
  614. batch_outputs,
  615. padding=padding_strategy.value,
  616. max_length=max_length,
  617. pad_to_multiple_of=pad_to_multiple_of,
  618. padding_side=padding_side,
  619. return_attention_mask=return_attention_mask,
  620. )
  621. batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
  622. return batch_outputs
  623. def _encode_plus(
  624. self,
  625. text: Union[TextInput, PreTokenizedInput],
  626. text_pair: Optional[PreTokenizedInput] = None,
  627. boxes: Optional[List[List[int]]] = None,
  628. word_labels: Optional[List[int]] = None,
  629. add_special_tokens: bool = True,
  630. padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
  631. truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
  632. max_length: Optional[int] = None,
  633. stride: int = 0,
  634. pad_to_multiple_of: Optional[int] = None,
  635. padding_side: Optional[bool] = None,
  636. return_tensors: Optional[Union[str, TensorType]] = None,
  637. return_token_type_ids: Optional[bool] = None,
  638. return_attention_mask: Optional[bool] = None,
  639. return_overflowing_tokens: bool = False,
  640. return_special_tokens_mask: bool = False,
  641. return_offsets_mapping: bool = False,
  642. return_length: bool = False,
  643. verbose: bool = True,
  644. **kwargs,
  645. ) -> BatchEncoding:
  646. if return_offsets_mapping:
  647. raise NotImplementedError(
  648. "return_offset_mapping is not available when using Python tokenizers. "
  649. "To use this feature, change your tokenizer to one deriving from "
  650. "transformers.PreTrainedTokenizerFast. "
  651. "More information on available tokenizers at "
  652. "https://github.com/huggingface/transformers/pull/2674"
  653. )
  654. return self.prepare_for_model(
  655. text=text,
  656. text_pair=text_pair,
  657. boxes=boxes,
  658. word_labels=word_labels,
  659. add_special_tokens=add_special_tokens,
  660. padding=padding_strategy.value,
  661. truncation=truncation_strategy.value,
  662. max_length=max_length,
  663. stride=stride,
  664. pad_to_multiple_of=pad_to_multiple_of,
  665. padding_side=padding_side,
  666. return_tensors=return_tensors,
  667. prepend_batch_axis=True,
  668. return_attention_mask=return_attention_mask,
  669. return_token_type_ids=return_token_type_ids,
  670. return_overflowing_tokens=return_overflowing_tokens,
  671. return_special_tokens_mask=return_special_tokens_mask,
  672. return_length=return_length,
  673. verbose=verbose,
  674. )
  675. @add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING)
  676. def prepare_for_model(
  677. self,
  678. text: Union[TextInput, PreTokenizedInput],
  679. text_pair: Optional[PreTokenizedInput] = None,
  680. boxes: Optional[List[List[int]]] = None,
  681. word_labels: Optional[List[int]] = None,
  682. add_special_tokens: bool = True,
  683. padding: Union[bool, str, PaddingStrategy] = False,
  684. truncation: Union[bool, str, TruncationStrategy] = None,
  685. max_length: Optional[int] = None,
  686. stride: int = 0,
  687. pad_to_multiple_of: Optional[int] = None,
  688. padding_side: Optional[bool] = None,
  689. return_tensors: Optional[Union[str, TensorType]] = None,
  690. return_token_type_ids: Optional[bool] = None,
  691. return_attention_mask: Optional[bool] = None,
  692. return_overflowing_tokens: bool = False,
  693. return_special_tokens_mask: bool = False,
  694. return_offsets_mapping: bool = False,
  695. return_length: bool = False,
  696. verbose: bool = True,
  697. prepend_batch_axis: bool = False,
  698. **kwargs,
  699. ) -> BatchEncoding:
  700. """
  701. Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens,
  702. truncates sequences if overflowing while taking into account the special tokens and manages a moving window
  703. (with user defined stride) for overflowing tokens.
  704. Word-level `boxes` are turned into token-level `bbox`. If provided, word-level `word_labels` are turned into
  705. token-level `labels`. The word label is used for the first token of the word, while remaining tokens are
  706. labeled with -100, such that they will be ignored by the loss function.
  707. Args:
  708. text (`str`, `List[str]`, `List[List[str]]`):
  709. The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
  710. text_pair (`List[str]` or `List[int]`, *optional*):
  711. Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
  712. list of list of strings (words of a batch of examples).
  713. """
  714. # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
  715. padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
  716. padding=padding,
  717. truncation=truncation,
  718. max_length=max_length,
  719. pad_to_multiple_of=pad_to_multiple_of,
  720. verbose=verbose,
  721. **kwargs,
  722. )
  723. tokens = []
  724. pair_tokens = []
  725. token_boxes = []
  726. pair_token_boxes = []
  727. labels = []
  728. if text_pair is None:
  729. if word_labels is None:
  730. # CASE 1: document image classification (training + inference) + CASE 2: token classification (inference)
  731. for word, box in zip(text, boxes):
  732. if len(word) < 1: # skip empty words
  733. continue
  734. word_tokens = self.tokenize(word)
  735. tokens.extend(word_tokens)
  736. token_boxes.extend([box] * len(word_tokens))
  737. else:
  738. # CASE 2: token classification (training)
  739. for word, box, label in zip(text, boxes, word_labels):
  740. if len(word) < 1: # skip empty words
  741. continue
  742. word_tokens = self.tokenize(word)
  743. tokens.extend(word_tokens)
  744. token_boxes.extend([box] * len(word_tokens))
  745. if self.only_label_first_subword:
  746. # Use the real label id for the first token of the word, and padding ids for the remaining tokens
  747. labels.extend([label] + [self.pad_token_label] * (len(word_tokens) - 1))
  748. else:
  749. labels.extend([label] * len(word_tokens))
  750. else:
  751. # CASE 3: document visual question answering (inference)
  752. # text = question
  753. # text_pair = words
  754. tokens = self.tokenize(text)
  755. token_boxes = [self.pad_token_box for _ in range(len(tokens))] + [self.sep_token_box]
  756. for word, box in zip(text_pair, boxes):
  757. if len(word) < 1: # skip empty words
  758. continue
  759. word_tokens = self.tokenize(word)
  760. pair_tokens.extend(word_tokens)
  761. pair_token_boxes.extend([box] * len(word_tokens))
  762. # Create ids + pair_ids
  763. ids = self.convert_tokens_to_ids(tokens)
  764. pair_ids = self.convert_tokens_to_ids(pair_tokens) if pair_tokens else None
  765. # Compute the total size of the returned encodings
  766. pair = bool(pair_ids is not None)
  767. len_ids = len(ids)
  768. len_pair_ids = len(pair_ids) if pair else 0
  769. total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
  770. # Truncation: Handle max sequence length
  771. overflowing_tokens = []
  772. overflowing_token_boxes = []
  773. overflowing_labels = []
  774. if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length:
  775. (
  776. ids,
  777. token_boxes,
  778. pair_ids,
  779. pair_token_boxes,
  780. labels,
  781. overflowing_tokens,
  782. overflowing_token_boxes,
  783. overflowing_labels,
  784. ) = self.truncate_sequences(
  785. ids,
  786. token_boxes,
  787. pair_ids=pair_ids,
  788. pair_token_boxes=pair_token_boxes,
  789. labels=labels,
  790. num_tokens_to_remove=total_len - max_length,
  791. truncation_strategy=truncation_strategy,
  792. stride=stride,
  793. )
  794. if return_token_type_ids and not add_special_tokens:
  795. raise ValueError(
  796. "Asking to return token_type_ids while setting add_special_tokens to False "
  797. "results in an undefined behavior. Please set add_special_tokens to True or "
  798. "set return_token_type_ids to None."
  799. )
  800. # Load from model defaults
  801. if return_token_type_ids is None:
  802. return_token_type_ids = "token_type_ids" in self.model_input_names
  803. if return_attention_mask is None:
  804. return_attention_mask = "attention_mask" in self.model_input_names
  805. encoded_inputs = {}
  806. if return_overflowing_tokens:
  807. encoded_inputs["overflowing_tokens"] = overflowing_tokens
  808. encoded_inputs["overflowing_token_boxes"] = overflowing_token_boxes
  809. encoded_inputs["overflowing_labels"] = overflowing_labels
  810. encoded_inputs["num_truncated_tokens"] = total_len - max_length
  811. # Add special tokens
  812. if add_special_tokens:
  813. sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
  814. token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
  815. token_boxes = [self.cls_token_box] + token_boxes + [self.sep_token_box]
  816. if pair_token_boxes:
  817. pair_token_boxes = pair_token_boxes + [self.sep_token_box]
  818. if labels:
  819. labels = [self.pad_token_label] + labels + [self.pad_token_label]
  820. else:
  821. sequence = ids + pair_ids if pair else ids
  822. token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else [])
  823. # Build output dictionary
  824. encoded_inputs["input_ids"] = sequence
  825. encoded_inputs["bbox"] = token_boxes + pair_token_boxes
  826. if return_token_type_ids:
  827. encoded_inputs["token_type_ids"] = token_type_ids
  828. if return_special_tokens_mask:
  829. if add_special_tokens:
  830. encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids)
  831. else:
  832. encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
  833. if labels:
  834. encoded_inputs["labels"] = labels
  835. # Check lengths
  836. self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose)
  837. # Padding
  838. if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
  839. encoded_inputs = self.pad(
  840. encoded_inputs,
  841. max_length=max_length,
  842. padding=padding_strategy.value,
  843. pad_to_multiple_of=pad_to_multiple_of,
  844. padding_side=padding_side,
  845. return_attention_mask=return_attention_mask,
  846. )
  847. if return_length:
  848. encoded_inputs["length"] = len(encoded_inputs["input_ids"])
  849. batch_outputs = BatchEncoding(
  850. encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis
  851. )
  852. return batch_outputs
  853. def truncate_sequences(
  854. self,
  855. ids: List[int],
  856. token_boxes: List[List[int]],
  857. pair_ids: Optional[List[int]] = None,
  858. pair_token_boxes: Optional[List[List[int]]] = None,
  859. labels: Optional[List[int]] = None,
  860. num_tokens_to_remove: int = 0,
  861. truncation_strategy: Union[str, TruncationStrategy] = "longest_first",
  862. stride: int = 0,
  863. ) -> Tuple[List[int], List[int], List[int]]:
  864. """
  865. Truncates a sequence pair in-place following the strategy.
  866. Args:
  867. ids (`List[int]`):
  868. Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and
  869. `convert_tokens_to_ids` methods.
  870. token_boxes (`List[List[int]]`):
  871. Bounding boxes of the first sequence.
  872. pair_ids (`List[int]`, *optional*):
  873. Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`
  874. and `convert_tokens_to_ids` methods.
  875. pair_token_boxes (`List[List[int]]`, *optional*):
  876. Bounding boxes of the second sequence.
  877. labels (`List[int]`, *optional*):
  878. Labels of the first sequence (for token classification tasks).
  879. num_tokens_to_remove (`int`, *optional*, defaults to 0):
  880. Number of tokens to remove using the truncation strategy.
  881. truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
  882. The strategy to follow for truncation. Can be:
  883. - `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
  884. maximum acceptable input length for the model if that argument is not provided. This will truncate
  885. token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a
  886. batch of pairs) is provided.
  887. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
  888. maximum acceptable input length for the model if that argument is not provided. This will only
  889. truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
  890. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
  891. maximum acceptable input length for the model if that argument is not provided. This will only
  892. truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
  893. - `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater
  894. than the model maximum admissible input size).
  895. stride (`int`, *optional*, defaults to 0):
  896. If set to a positive number, the overflowing tokens returned will contain some tokens from the main
  897. sequence returned. The value of this argument defines the number of additional tokens.
  898. Returns:
  899. `Tuple[List[int], List[int], List[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of
  900. overflowing tokens.
  901. """
  902. if num_tokens_to_remove <= 0:
  903. return ids, token_boxes, pair_ids, pair_token_boxes, labels, [], [], []
  904. if not isinstance(truncation_strategy, TruncationStrategy):
  905. truncation_strategy = TruncationStrategy(truncation_strategy)
  906. overflowing_tokens = []
  907. overflowing_token_boxes = []
  908. overflowing_labels = []
  909. if truncation_strategy == TruncationStrategy.LONGEST_FIRST:
  910. for _ in range(num_tokens_to_remove):
  911. if pair_ids is None or len(ids) > len(pair_ids):
  912. if not overflowing_tokens:
  913. window_len = min(len(ids), stride + 1)
  914. else:
  915. window_len = 1
  916. overflowing_tokens.extend(ids[-window_len:])
  917. overflowing_token_boxes.extend(token_boxes[-window_len:])
  918. overflowing_labels.extend(labels[-window_len:])
  919. ids = ids[:-1]
  920. token_boxes = token_boxes[:-1]
  921. labels = labels[:-1]
  922. else:
  923. if not overflowing_tokens:
  924. window_len = min(len(pair_ids), stride + 1)
  925. else:
  926. window_len = 1
  927. overflowing_tokens.extend(pair_ids[-window_len:])
  928. overflowing_token_boxes.extend(pair_token_boxes[-window_len:])
  929. pair_ids = pair_ids[:-1]
  930. pair_token_boxes = pair_token_boxes[:-1]
  931. elif truncation_strategy == TruncationStrategy.ONLY_FIRST:
  932. if len(ids) > num_tokens_to_remove:
  933. window_len = min(len(ids), stride + num_tokens_to_remove)
  934. overflowing_tokens = ids[-window_len:]
  935. overflowing_token_boxes = token_boxes[-window_len:]
  936. overflowing_labels = labels[-window_len:]
  937. ids = ids[:-num_tokens_to_remove]
  938. token_boxes = token_boxes[:-num_tokens_to_remove]
  939. labels = labels[:-num_tokens_to_remove]
  940. else:
  941. logger.error(
  942. f"We need to remove {num_tokens_to_remove} to truncate the input "
  943. f"but the first sequence has a length {len(ids)}. "
  944. f"Please select another truncation strategy than {truncation_strategy}, "
  945. "for instance 'longest_first' or 'only_second'."
  946. )
  947. elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None:
  948. if len(pair_ids) > num_tokens_to_remove:
  949. window_len = min(len(pair_ids), stride + num_tokens_to_remove)
  950. overflowing_tokens = pair_ids[-window_len:]
  951. overflowing_token_boxes = pair_token_boxes[-window_len:]
  952. pair_ids = pair_ids[:-num_tokens_to_remove]
  953. pair_token_boxes = pair_token_boxes[:-num_tokens_to_remove]
  954. else:
  955. logger.error(
  956. f"We need to remove {num_tokens_to_remove} to truncate the input "
  957. f"but the second sequence has a length {len(pair_ids)}. "
  958. f"Please select another truncation strategy than {truncation_strategy}, "
  959. "for instance 'longest_first' or 'only_first'."
  960. )
  961. return (
  962. ids,
  963. token_boxes,
  964. pair_ids,
  965. pair_token_boxes,
  966. labels,
  967. overflowing_tokens,
  968. overflowing_token_boxes,
  969. overflowing_labels,
  970. )
  971. def _pad(
  972. self,
  973. encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
  974. max_length: Optional[int] = None,
  975. padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
  976. pad_to_multiple_of: Optional[int] = None,
  977. padding_side: Optional[bool] = None,
  978. return_attention_mask: Optional[bool] = None,
  979. ) -> dict:
  980. """
  981. Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
  982. Args:
  983. encoded_inputs:
  984. Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
  985. max_length: maximum length of the returned list and optionally padding length (see below).
  986. Will truncate by taking into account the special tokens.
  987. padding_strategy: PaddingStrategy to use for padding.
  988. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
  989. - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
  990. - PaddingStrategy.DO_NOT_PAD: Do not pad
  991. The tokenizer padding sides are defined in self.padding_side:
  992. - 'left': pads on the left of the sequences
  993. - 'right': pads on the right of the sequences
  994. pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
  995. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
  996. `>= 7.5` (Volta).
  997. padding_side (`str`, *optional*):
  998. The side on which the model should have padding applied. Should be selected between ['right', 'left'].
  999. Default value is picked from the class attribute of the same name.
  1000. return_attention_mask:
  1001. (optional) Set to False to avoid returning attention mask (default: set to model specifics)
  1002. """
  1003. # Load from model defaults
  1004. if return_attention_mask is None:
  1005. return_attention_mask = "attention_mask" in self.model_input_names
  1006. required_input = encoded_inputs[self.model_input_names[0]]
  1007. if padding_strategy == PaddingStrategy.LONGEST:
  1008. max_length = len(required_input)
  1009. if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
  1010. max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
  1011. needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
  1012. # Initialize attention mask if not present.
  1013. if return_attention_mask and "attention_mask" not in encoded_inputs:
  1014. encoded_inputs["attention_mask"] = [1] * len(required_input)
  1015. if needs_to_be_padded:
  1016. difference = max_length - len(required_input)
  1017. padding_side = padding_side if padding_side is not None else self.padding_side
  1018. if padding_side == "right":
  1019. if return_attention_mask:
  1020. encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
  1021. if "token_type_ids" in encoded_inputs:
  1022. encoded_inputs["token_type_ids"] = (
  1023. encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
  1024. )
  1025. if "bbox" in encoded_inputs:
  1026. encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference
  1027. if "labels" in encoded_inputs:
  1028. encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference
  1029. if "special_tokens_mask" in encoded_inputs:
  1030. encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
  1031. encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
  1032. elif padding_side == "left":
  1033. if return_attention_mask:
  1034. encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
  1035. if "token_type_ids" in encoded_inputs:
  1036. encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
  1037. "token_type_ids"
  1038. ]
  1039. if "bbox" in encoded_inputs:
  1040. encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"]
  1041. if "labels" in encoded_inputs:
  1042. encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"]
  1043. if "special_tokens_mask" in encoded_inputs:
  1044. encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
  1045. encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
  1046. else:
  1047. raise ValueError("Invalid padding strategy:" + str(padding_side))
  1048. return encoded_inputs