tokenization_layoutlmv3_fast.py 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853
  1. # coding=utf-8
  2. # Copyright 2022 The HuggingFace Inc. team.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """
  16. Fast tokenization class for LayoutLMv3. It overwrites 2 methods of the slow tokenizer class, namely _batch_encode_plus
  17. and _encode_plus, in which the Rust tokenizer is used.
  18. """
  19. import json
  20. from typing import Dict, List, Optional, Tuple, Union
  21. from tokenizers import pre_tokenizers, processors
  22. from ...tokenization_utils_base import (
  23. BatchEncoding,
  24. EncodedInput,
  25. PaddingStrategy,
  26. PreTokenizedInput,
  27. TensorType,
  28. TextInput,
  29. TextInputPair,
  30. TruncationStrategy,
  31. )
  32. from ...tokenization_utils_fast import PreTrainedTokenizerFast
  33. from ...utils import add_end_docstrings, logging
  34. from .tokenization_layoutlmv3 import (
  35. LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING,
  36. LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING,
  37. LayoutLMv3Tokenizer,
  38. )
  39. logger = logging.get_logger(__name__)
  40. VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
  41. class LayoutLMv3TokenizerFast(PreTrainedTokenizerFast):
  42. r"""
  43. Construct a "fast" LayoutLMv3 tokenizer (backed by HuggingFace's *tokenizers* library). Based on BPE.
  44. This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
  45. refer to this superclass for more information regarding those methods.
  46. Args:
  47. vocab_file (`str`):
  48. Path to the vocabulary file.
  49. merges_file (`str`):
  50. Path to the merges file.
  51. errors (`str`, *optional*, defaults to `"replace"`):
  52. Paradigm to follow when decoding bytes to UTF-8. See
  53. [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
  54. bos_token (`str`, *optional*, defaults to `"<s>"`):
  55. The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
  56. <Tip>
  57. When building a sequence using special tokens, this is not the token that is used for the beginning of
  58. sequence. The token used is the `cls_token`.
  59. </Tip>
  60. eos_token (`str`, *optional*, defaults to `"</s>"`):
  61. The end of sequence token.
  62. <Tip>
  63. When building a sequence using special tokens, this is not the token that is used for the end of sequence.
  64. The token used is the `sep_token`.
  65. </Tip>
  66. sep_token (`str`, *optional*, defaults to `"</s>"`):
  67. The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
  68. sequence classification or for a text and a question for question answering. It is also used as the last
  69. token of a sequence built with special tokens.
  70. cls_token (`str`, *optional*, defaults to `"<s>"`):
  71. The classifier token which is used when doing sequence classification (classification of the whole sequence
  72. instead of per-token classification). It is the first token of the sequence when built with special tokens.
  73. unk_token (`str`, *optional*, defaults to `"<unk>"`):
  74. The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
  75. token instead.
  76. pad_token (`str`, *optional*, defaults to `"<pad>"`):
  77. The token used for padding, for example when batching sequences of different lengths.
  78. mask_token (`str`, *optional*, defaults to `"<mask>"`):
  79. The token used for masking values. This is the token used when training this model with masked language
  80. modeling. This is the token which the model will try to predict.
  81. add_prefix_space (`bool`, *optional*, defaults to `False`):
  82. Whether or not to add an initial space to the input. This allows to treat the leading word just as any
  83. other word. (RoBERTa tokenizer detect beginning of words by the preceding space).
  84. trim_offsets (`bool`, *optional*, defaults to `True`):
  85. Whether the post processing step should trim offsets to avoid including whitespaces.
  86. cls_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
  87. The bounding box to use for the special [CLS] token.
  88. sep_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
  89. The bounding box to use for the special [SEP] token.
  90. pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
  91. The bounding box to use for the special [PAD] token.
  92. pad_token_label (`int`, *optional*, defaults to -100):
  93. The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
  94. CrossEntropyLoss.
  95. only_label_first_subword (`bool`, *optional*, defaults to `True`):
  96. Whether or not to only label the first subword, in case word labels are provided.
  97. """
  98. vocab_files_names = VOCAB_FILES_NAMES
  99. model_input_names = ["input_ids", "attention_mask"]
  100. slow_tokenizer_class = LayoutLMv3Tokenizer
  101. def __init__(
  102. self,
  103. vocab_file=None,
  104. merges_file=None,
  105. tokenizer_file=None,
  106. errors="replace",
  107. bos_token="<s>",
  108. eos_token="</s>",
  109. sep_token="</s>",
  110. cls_token="<s>",
  111. unk_token="<unk>",
  112. pad_token="<pad>",
  113. mask_token="<mask>",
  114. add_prefix_space=True,
  115. trim_offsets=True,
  116. cls_token_box=[0, 0, 0, 0],
  117. sep_token_box=[0, 0, 0, 0],
  118. pad_token_box=[0, 0, 0, 0],
  119. pad_token_label=-100,
  120. only_label_first_subword=True,
  121. **kwargs,
  122. ):
  123. super().__init__(
  124. vocab_file,
  125. merges_file,
  126. tokenizer_file=tokenizer_file,
  127. errors=errors,
  128. bos_token=bos_token,
  129. eos_token=eos_token,
  130. sep_token=sep_token,
  131. cls_token=cls_token,
  132. unk_token=unk_token,
  133. pad_token=pad_token,
  134. mask_token=mask_token,
  135. add_prefix_space=add_prefix_space,
  136. trim_offsets=trim_offsets,
  137. cls_token_box=cls_token_box,
  138. sep_token_box=sep_token_box,
  139. pad_token_box=pad_token_box,
  140. pad_token_label=pad_token_label,
  141. only_label_first_subword=only_label_first_subword,
  142. **kwargs,
  143. )
  144. pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
  145. if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
  146. pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type"))
  147. pre_tok_state["add_prefix_space"] = add_prefix_space
  148. self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
  149. self.add_prefix_space = add_prefix_space
  150. tokenizer_component = "post_processor"
  151. tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None)
  152. if tokenizer_component_instance:
  153. state = json.loads(tokenizer_component_instance.__getstate__())
  154. # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
  155. if "sep" in state:
  156. state["sep"] = tuple(state["sep"])
  157. if "cls" in state:
  158. state["cls"] = tuple(state["cls"])
  159. changes_to_apply = False
  160. if state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
  161. state["add_prefix_space"] = add_prefix_space
  162. changes_to_apply = True
  163. if state.get("trim_offsets", trim_offsets) != trim_offsets:
  164. state["trim_offsets"] = trim_offsets
  165. changes_to_apply = True
  166. if changes_to_apply:
  167. component_class = getattr(processors, state.pop("type"))
  168. new_value = component_class(**state)
  169. setattr(self.backend_tokenizer, tokenizer_component, new_value)
  170. # additional properties
  171. self.cls_token_box = cls_token_box
  172. self.sep_token_box = sep_token_box
  173. self.pad_token_box = pad_token_box
  174. self.pad_token_label = pad_token_label
  175. self.only_label_first_subword = only_label_first_subword
  176. @add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
  177. # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast.__call__
  178. def __call__(
  179. self,
  180. text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
  181. text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
  182. boxes: Union[List[List[int]], List[List[List[int]]]] = None,
  183. word_labels: Optional[Union[List[int], List[List[int]]]] = None,
  184. add_special_tokens: bool = True,
  185. padding: Union[bool, str, PaddingStrategy] = False,
  186. truncation: Union[bool, str, TruncationStrategy] = None,
  187. max_length: Optional[int] = None,
  188. stride: int = 0,
  189. pad_to_multiple_of: Optional[int] = None,
  190. padding_side: Optional[bool] = None,
  191. return_tensors: Optional[Union[str, TensorType]] = None,
  192. return_token_type_ids: Optional[bool] = None,
  193. return_attention_mask: Optional[bool] = None,
  194. return_overflowing_tokens: bool = False,
  195. return_special_tokens_mask: bool = False,
  196. return_offsets_mapping: bool = False,
  197. return_length: bool = False,
  198. verbose: bool = True,
  199. **kwargs,
  200. ) -> BatchEncoding:
  201. """
  202. Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
  203. sequences with word-level normalized bounding boxes and optional labels.
  204. Args:
  205. text (`str`, `List[str]`, `List[List[str]]`):
  206. The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
  207. (words of a single example or questions of a batch of examples) or a list of list of strings (batch of
  208. words).
  209. text_pair (`List[str]`, `List[List[str]]`):
  210. The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
  211. (pretokenized string).
  212. boxes (`List[List[int]]`, `List[List[List[int]]]`):
  213. Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
  214. word_labels (`List[int]`, `List[List[int]]`, *optional*):
  215. Word-level integer labels (for token classification tasks such as FUNSD, CORD).
  216. """
  217. # Input type checking for clearer error
  218. def _is_valid_text_input(t):
  219. if isinstance(t, str):
  220. # Strings are fine
  221. return True
  222. elif isinstance(t, (list, tuple)):
  223. # List are fine as long as they are...
  224. if len(t) == 0:
  225. # ... empty
  226. return True
  227. elif isinstance(t[0], str):
  228. # ... list of strings
  229. return True
  230. elif isinstance(t[0], (list, tuple)):
  231. # ... list with an empty list or with a list of strings
  232. return len(t[0]) == 0 or isinstance(t[0][0], str)
  233. else:
  234. return False
  235. else:
  236. return False
  237. if text_pair is not None:
  238. # in case text + text_pair are provided, text = questions, text_pair = words
  239. if not _is_valid_text_input(text):
  240. raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ")
  241. if not isinstance(text_pair, (list, tuple)):
  242. raise ValueError(
  243. "Words must be of type `List[str]` (single pretokenized example), "
  244. "or `List[List[str]]` (batch of pretokenized examples)."
  245. )
  246. else:
  247. # in case only text is provided => must be words
  248. if not isinstance(text, (list, tuple)):
  249. raise ValueError(
  250. "Words must be of type `List[str]` (single pretokenized example), "
  251. "or `List[List[str]]` (batch of pretokenized examples)."
  252. )
  253. if text_pair is not None:
  254. is_batched = isinstance(text, (list, tuple))
  255. else:
  256. is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
  257. words = text if text_pair is None else text_pair
  258. if boxes is None:
  259. raise ValueError("You must provide corresponding bounding boxes")
  260. if is_batched:
  261. if len(words) != len(boxes):
  262. raise ValueError("You must provide words and boxes for an equal amount of examples")
  263. for words_example, boxes_example in zip(words, boxes):
  264. if len(words_example) != len(boxes_example):
  265. raise ValueError("You must provide as many words as there are bounding boxes")
  266. else:
  267. if len(words) != len(boxes):
  268. raise ValueError("You must provide as many words as there are bounding boxes")
  269. if is_batched:
  270. if text_pair is not None and len(text) != len(text_pair):
  271. raise ValueError(
  272. f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:"
  273. f" {len(text_pair)}."
  274. )
  275. batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
  276. is_pair = bool(text_pair is not None)
  277. return self.batch_encode_plus(
  278. batch_text_or_text_pairs=batch_text_or_text_pairs,
  279. is_pair=is_pair,
  280. boxes=boxes,
  281. word_labels=word_labels,
  282. add_special_tokens=add_special_tokens,
  283. padding=padding,
  284. truncation=truncation,
  285. max_length=max_length,
  286. stride=stride,
  287. pad_to_multiple_of=pad_to_multiple_of,
  288. padding_side=padding_side,
  289. return_tensors=return_tensors,
  290. return_token_type_ids=return_token_type_ids,
  291. return_attention_mask=return_attention_mask,
  292. return_overflowing_tokens=return_overflowing_tokens,
  293. return_special_tokens_mask=return_special_tokens_mask,
  294. return_offsets_mapping=return_offsets_mapping,
  295. return_length=return_length,
  296. verbose=verbose,
  297. **kwargs,
  298. )
  299. else:
  300. return self.encode_plus(
  301. text=text,
  302. text_pair=text_pair,
  303. boxes=boxes,
  304. word_labels=word_labels,
  305. add_special_tokens=add_special_tokens,
  306. padding=padding,
  307. truncation=truncation,
  308. max_length=max_length,
  309. stride=stride,
  310. pad_to_multiple_of=pad_to_multiple_of,
  311. padding_side=padding_side,
  312. return_tensors=return_tensors,
  313. return_token_type_ids=return_token_type_ids,
  314. return_attention_mask=return_attention_mask,
  315. return_overflowing_tokens=return_overflowing_tokens,
  316. return_special_tokens_mask=return_special_tokens_mask,
  317. return_offsets_mapping=return_offsets_mapping,
  318. return_length=return_length,
  319. verbose=verbose,
  320. **kwargs,
  321. )
  322. @add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
  323. # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast.batch_encode_plus
  324. def batch_encode_plus(
  325. self,
  326. batch_text_or_text_pairs: Union[
  327. List[TextInput],
  328. List[TextInputPair],
  329. List[PreTokenizedInput],
  330. ],
  331. is_pair: bool = None,
  332. boxes: Optional[List[List[List[int]]]] = None,
  333. word_labels: Optional[Union[List[int], List[List[int]]]] = None,
  334. add_special_tokens: bool = True,
  335. padding: Union[bool, str, PaddingStrategy] = False,
  336. truncation: Union[bool, str, TruncationStrategy] = None,
  337. max_length: Optional[int] = None,
  338. stride: int = 0,
  339. pad_to_multiple_of: Optional[int] = None,
  340. padding_side: Optional[bool] = None,
  341. return_tensors: Optional[Union[str, TensorType]] = None,
  342. return_token_type_ids: Optional[bool] = None,
  343. return_attention_mask: Optional[bool] = None,
  344. return_overflowing_tokens: bool = False,
  345. return_special_tokens_mask: bool = False,
  346. return_offsets_mapping: bool = False,
  347. return_length: bool = False,
  348. verbose: bool = True,
  349. **kwargs,
  350. ) -> BatchEncoding:
  351. # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
  352. padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
  353. padding=padding,
  354. truncation=truncation,
  355. max_length=max_length,
  356. pad_to_multiple_of=pad_to_multiple_of,
  357. verbose=verbose,
  358. **kwargs,
  359. )
  360. return self._batch_encode_plus(
  361. batch_text_or_text_pairs=batch_text_or_text_pairs,
  362. is_pair=is_pair,
  363. boxes=boxes,
  364. word_labels=word_labels,
  365. add_special_tokens=add_special_tokens,
  366. padding_strategy=padding_strategy,
  367. truncation_strategy=truncation_strategy,
  368. max_length=max_length,
  369. stride=stride,
  370. pad_to_multiple_of=pad_to_multiple_of,
  371. padding_side=padding_side,
  372. return_tensors=return_tensors,
  373. return_token_type_ids=return_token_type_ids,
  374. return_attention_mask=return_attention_mask,
  375. return_overflowing_tokens=return_overflowing_tokens,
  376. return_special_tokens_mask=return_special_tokens_mask,
  377. return_offsets_mapping=return_offsets_mapping,
  378. return_length=return_length,
  379. verbose=verbose,
  380. **kwargs,
  381. )
  382. # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast.tokenize
  383. def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> List[str]:
  384. batched_input = [(text, pair)] if pair else [text]
  385. encodings = self._tokenizer.encode_batch(
  386. batched_input, add_special_tokens=add_special_tokens, is_pretokenized=False, **kwargs
  387. )
  388. return encodings[0].tokens
  389. @add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
  390. # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast.encode_plus
  391. def encode_plus(
  392. self,
  393. text: Union[TextInput, PreTokenizedInput],
  394. text_pair: Optional[PreTokenizedInput] = None,
  395. boxes: Optional[List[List[int]]] = None,
  396. word_labels: Optional[List[int]] = None,
  397. add_special_tokens: bool = True,
  398. padding: Union[bool, str, PaddingStrategy] = False,
  399. truncation: Union[bool, str, TruncationStrategy] = None,
  400. max_length: Optional[int] = None,
  401. stride: int = 0,
  402. pad_to_multiple_of: Optional[int] = None,
  403. padding_side: Optional[bool] = None,
  404. return_tensors: Optional[Union[str, TensorType]] = None,
  405. return_token_type_ids: Optional[bool] = None,
  406. return_attention_mask: Optional[bool] = None,
  407. return_overflowing_tokens: bool = False,
  408. return_special_tokens_mask: bool = False,
  409. return_offsets_mapping: bool = False,
  410. return_length: bool = False,
  411. verbose: bool = True,
  412. **kwargs,
  413. ) -> BatchEncoding:
  414. """
  415. Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
  416. `__call__` should be used instead.
  417. Args:
  418. text (`str`, `List[str]`, `List[List[str]]`):
  419. The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
  420. text_pair (`List[str]` or `List[int]`, *optional*):
  421. Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
  422. list of list of strings (words of a batch of examples).
  423. """
  424. # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
  425. padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
  426. padding=padding,
  427. truncation=truncation,
  428. max_length=max_length,
  429. pad_to_multiple_of=pad_to_multiple_of,
  430. verbose=verbose,
  431. **kwargs,
  432. )
  433. return self._encode_plus(
  434. text=text,
  435. boxes=boxes,
  436. text_pair=text_pair,
  437. word_labels=word_labels,
  438. add_special_tokens=add_special_tokens,
  439. padding_strategy=padding_strategy,
  440. truncation_strategy=truncation_strategy,
  441. max_length=max_length,
  442. stride=stride,
  443. pad_to_multiple_of=pad_to_multiple_of,
  444. padding_side=padding_side,
  445. return_tensors=return_tensors,
  446. return_token_type_ids=return_token_type_ids,
  447. return_attention_mask=return_attention_mask,
  448. return_overflowing_tokens=return_overflowing_tokens,
  449. return_special_tokens_mask=return_special_tokens_mask,
  450. return_offsets_mapping=return_offsets_mapping,
  451. return_length=return_length,
  452. verbose=verbose,
  453. **kwargs,
  454. )
  455. def _batch_encode_plus(
  456. self,
  457. batch_text_or_text_pairs: Union[
  458. List[TextInput],
  459. List[TextInputPair],
  460. List[PreTokenizedInput],
  461. ],
  462. is_pair: bool = None,
  463. boxes: Optional[List[List[List[int]]]] = None,
  464. word_labels: Optional[List[List[int]]] = None,
  465. add_special_tokens: bool = True,
  466. padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
  467. truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
  468. max_length: Optional[int] = None,
  469. stride: int = 0,
  470. pad_to_multiple_of: Optional[int] = None,
  471. padding_side: Optional[bool] = None,
  472. return_tensors: Optional[str] = None,
  473. return_token_type_ids: Optional[bool] = None,
  474. return_attention_mask: Optional[bool] = None,
  475. return_overflowing_tokens: bool = False,
  476. return_special_tokens_mask: bool = False,
  477. return_offsets_mapping: bool = False,
  478. return_length: bool = False,
  479. verbose: bool = True,
  480. ) -> BatchEncoding:
  481. if not isinstance(batch_text_or_text_pairs, list):
  482. raise TypeError(f"batch_text_or_text_pairs has to be a list (got {type(batch_text_or_text_pairs)})")
  483. # Set the truncation and padding strategy and restore the initial configuration
  484. self.set_truncation_and_padding(
  485. padding_strategy=padding_strategy,
  486. truncation_strategy=truncation_strategy,
  487. max_length=max_length,
  488. stride=stride,
  489. pad_to_multiple_of=pad_to_multiple_of,
  490. padding_side=padding_side,
  491. )
  492. if is_pair:
  493. batch_text_or_text_pairs = [(text.split(), text_pair) for text, text_pair in batch_text_or_text_pairs]
  494. encodings = self._tokenizer.encode_batch(
  495. batch_text_or_text_pairs,
  496. add_special_tokens=add_special_tokens,
  497. is_pretokenized=True, # we set this to True as LayoutLMv3 always expects pretokenized inputs
  498. )
  499. # Convert encoding to dict
  500. # `Tokens` has type: Tuple[
  501. # List[Dict[str, List[List[int]]]] or List[Dict[str, 2D-Tensor]],
  502. # List[EncodingFast]
  503. # ]
  504. # with nested dimensions corresponding to batch, overflows, sequence length
  505. tokens_and_encodings = [
  506. self._convert_encoding(
  507. encoding=encoding,
  508. return_token_type_ids=return_token_type_ids,
  509. return_attention_mask=return_attention_mask,
  510. return_overflowing_tokens=return_overflowing_tokens,
  511. return_special_tokens_mask=return_special_tokens_mask,
  512. return_offsets_mapping=True
  513. if word_labels is not None
  514. else return_offsets_mapping, # we use offsets to create the labels
  515. return_length=return_length,
  516. verbose=verbose,
  517. )
  518. for encoding in encodings
  519. ]
  520. # Convert the output to have dict[list] from list[dict] and remove the additional overflows dimension
  521. # From (variable) shape (batch, overflows, sequence length) to ~ (batch * overflows, sequence length)
  522. # (we say ~ because the number of overflow varies with the example in the batch)
  523. #
  524. # To match each overflowing sample with the original sample in the batch
  525. # we add an overflow_to_sample_mapping array (see below)
  526. sanitized_tokens = {}
  527. for key in tokens_and_encodings[0][0].keys():
  528. stack = [e for item, _ in tokens_and_encodings for e in item[key]]
  529. sanitized_tokens[key] = stack
  530. sanitized_encodings = [e for _, item in tokens_and_encodings for e in item]
  531. # If returning overflowing tokens, we need to return a mapping
  532. # from the batch idx to the original sample
  533. if return_overflowing_tokens:
  534. overflow_to_sample_mapping = []
  535. for i, (toks, _) in enumerate(tokens_and_encodings):
  536. overflow_to_sample_mapping += [i] * len(toks["input_ids"])
  537. sanitized_tokens["overflow_to_sample_mapping"] = overflow_to_sample_mapping
  538. for input_ids in sanitized_tokens["input_ids"]:
  539. self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose)
  540. # create the token boxes
  541. token_boxes = []
  542. for batch_index in range(len(sanitized_tokens["input_ids"])):
  543. if return_overflowing_tokens:
  544. original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
  545. else:
  546. original_index = batch_index
  547. token_boxes_example = []
  548. for id, sequence_id, word_id in zip(
  549. sanitized_tokens["input_ids"][batch_index],
  550. sanitized_encodings[batch_index].sequence_ids,
  551. sanitized_encodings[batch_index].word_ids,
  552. ):
  553. if word_id is not None:
  554. if is_pair and sequence_id == 0:
  555. token_boxes_example.append(self.pad_token_box)
  556. else:
  557. token_boxes_example.append(boxes[original_index][word_id])
  558. else:
  559. if id == self.cls_token_id:
  560. token_boxes_example.append(self.cls_token_box)
  561. elif id == self.sep_token_id:
  562. token_boxes_example.append(self.sep_token_box)
  563. elif id == self.pad_token_id:
  564. token_boxes_example.append(self.pad_token_box)
  565. else:
  566. raise ValueError("Id not recognized")
  567. token_boxes.append(token_boxes_example)
  568. sanitized_tokens["bbox"] = token_boxes
  569. # optionally, create the labels
  570. if word_labels is not None:
  571. labels = []
  572. for batch_index in range(len(sanitized_tokens["input_ids"])):
  573. if return_overflowing_tokens:
  574. original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
  575. else:
  576. original_index = batch_index
  577. labels_example = []
  578. previous_token_empty = False
  579. for id, offset, word_id in zip(
  580. sanitized_tokens["input_ids"][batch_index],
  581. sanitized_tokens["offset_mapping"][batch_index],
  582. sanitized_encodings[batch_index].word_ids,
  583. ):
  584. if word_id is not None:
  585. if self.only_label_first_subword:
  586. if offset[0] == 0 and not previous_token_empty:
  587. # Use the real label id for the first token of the word, and padding ids for the remaining tokens
  588. labels_example.append(word_labels[original_index][word_id])
  589. else:
  590. labels_example.append(self.pad_token_label)
  591. if offset == (0, 0):
  592. previous_token_empty = True
  593. else:
  594. previous_token_empty = False
  595. else:
  596. labels_example.append(word_labels[original_index][word_id])
  597. else:
  598. labels_example.append(self.pad_token_label)
  599. labels.append(labels_example)
  600. sanitized_tokens["labels"] = labels
  601. # finally, remove offsets if the user didn't want them
  602. if not return_offsets_mapping:
  603. del sanitized_tokens["offset_mapping"]
  604. return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors)
  605. # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast._encode_plus
  606. def _encode_plus(
  607. self,
  608. text: Union[TextInput, PreTokenizedInput],
  609. text_pair: Optional[PreTokenizedInput] = None,
  610. boxes: Optional[List[List[int]]] = None,
  611. word_labels: Optional[List[int]] = None,
  612. add_special_tokens: bool = True,
  613. padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
  614. truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
  615. max_length: Optional[int] = None,
  616. stride: int = 0,
  617. pad_to_multiple_of: Optional[int] = None,
  618. padding_side: Optional[bool] = None,
  619. return_tensors: Optional[bool] = None,
  620. return_token_type_ids: Optional[bool] = None,
  621. return_attention_mask: Optional[bool] = None,
  622. return_overflowing_tokens: bool = False,
  623. return_special_tokens_mask: bool = False,
  624. return_offsets_mapping: bool = False,
  625. return_length: bool = False,
  626. verbose: bool = True,
  627. **kwargs,
  628. ) -> BatchEncoding:
  629. # make it a batched input
  630. # 2 options:
  631. # 1) only text, in case text must be a list of str
  632. # 2) text + text_pair, in which case text = str and text_pair a list of str
  633. batched_input = [(text, text_pair)] if text_pair else [text]
  634. batched_boxes = [boxes]
  635. batched_word_labels = [word_labels] if word_labels is not None else None
  636. batched_output = self._batch_encode_plus(
  637. batched_input,
  638. is_pair=bool(text_pair is not None),
  639. boxes=batched_boxes,
  640. word_labels=batched_word_labels,
  641. add_special_tokens=add_special_tokens,
  642. padding_strategy=padding_strategy,
  643. truncation_strategy=truncation_strategy,
  644. max_length=max_length,
  645. stride=stride,
  646. pad_to_multiple_of=pad_to_multiple_of,
  647. padding_side=padding_side,
  648. return_tensors=return_tensors,
  649. return_token_type_ids=return_token_type_ids,
  650. return_attention_mask=return_attention_mask,
  651. return_overflowing_tokens=return_overflowing_tokens,
  652. return_special_tokens_mask=return_special_tokens_mask,
  653. return_offsets_mapping=return_offsets_mapping,
  654. return_length=return_length,
  655. verbose=verbose,
  656. **kwargs,
  657. )
  658. # Return tensor is None, then we can remove the leading batch axis
  659. # Overflowing tokens are returned as a batch of output so we keep them in this case
  660. if return_tensors is None and not return_overflowing_tokens:
  661. batched_output = BatchEncoding(
  662. {
  663. key: value[0] if len(value) > 0 and isinstance(value[0], list) else value
  664. for key, value in batched_output.items()
  665. },
  666. batched_output.encodings,
  667. )
  668. self._eventual_warn_about_too_long_sequence(batched_output["input_ids"], max_length, verbose)
  669. return batched_output
  670. # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast._pad
  671. def _pad(
  672. self,
  673. encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
  674. max_length: Optional[int] = None,
  675. padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
  676. pad_to_multiple_of: Optional[int] = None,
  677. padding_side: Optional[bool] = None,
  678. return_attention_mask: Optional[bool] = None,
  679. ) -> dict:
  680. """
  681. Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
  682. Args:
  683. encoded_inputs:
  684. Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
  685. max_length: maximum length of the returned list and optionally padding length (see below).
  686. Will truncate by taking into account the special tokens.
  687. padding_strategy: PaddingStrategy to use for padding.
  688. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
  689. - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
  690. - PaddingStrategy.DO_NOT_PAD: Do not pad
  691. The tokenizer padding sides are defined in self.padding_side:
  692. - 'left': pads on the left of the sequences
  693. - 'right': pads on the right of the sequences
  694. pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
  695. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
  696. `>= 7.5` (Volta).
  697. padding_side:
  698. The side on which the model should have padding applied. Should be selected between ['right', 'left'].
  699. Default value is picked from the class attribute of the same name.
  700. return_attention_mask:
  701. (optional) Set to False to avoid returning attention mask (default: set to model specifics)
  702. """
  703. # Load from model defaults
  704. if return_attention_mask is None:
  705. return_attention_mask = "attention_mask" in self.model_input_names
  706. required_input = encoded_inputs[self.model_input_names[0]]
  707. if padding_strategy == PaddingStrategy.LONGEST:
  708. max_length = len(required_input)
  709. if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
  710. max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
  711. needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
  712. # Initialize attention mask if not present.
  713. if return_attention_mask and "attention_mask" not in encoded_inputs:
  714. encoded_inputs["attention_mask"] = [1] * len(required_input)
  715. if needs_to_be_padded:
  716. difference = max_length - len(required_input)
  717. padding_side = padding_side if padding_side is not None else self.padding_side
  718. if padding_side == "right":
  719. if return_attention_mask:
  720. encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
  721. if "token_type_ids" in encoded_inputs:
  722. encoded_inputs["token_type_ids"] = (
  723. encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
  724. )
  725. if "bbox" in encoded_inputs:
  726. encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference
  727. if "labels" in encoded_inputs:
  728. encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference
  729. if "special_tokens_mask" in encoded_inputs:
  730. encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
  731. encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
  732. elif padding_side == "left":
  733. if return_attention_mask:
  734. encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
  735. if "token_type_ids" in encoded_inputs:
  736. encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
  737. "token_type_ids"
  738. ]
  739. if "bbox" in encoded_inputs:
  740. encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"]
  741. if "labels" in encoded_inputs:
  742. encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"]
  743. if "special_tokens_mask" in encoded_inputs:
  744. encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
  745. encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
  746. else:
  747. raise ValueError("Invalid padding strategy:" + str(padding_side))
  748. return encoded_inputs
  749. # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast.save_vocabulary
  750. def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
  751. files = self._tokenizer.model.save(save_directory, name=filename_prefix)
  752. return tuple(files)
  753. def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
  754. output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id]
  755. if token_ids_1 is None:
  756. return output
  757. return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id]
  758. def create_token_type_ids_from_sequences(
  759. self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
  760. ) -> List[int]:
  761. """
  762. Args:
  763. Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not:
  764. make use of token type ids, therefore a list of zeros is returned.
  765. token_ids_0 (`List[int]`):
  766. List of IDs.
  767. token_ids_1 (`List[int]`, *optional*):
  768. Optional second list of IDs for sequence pairs.
  769. Returns:
  770. `List[int]`: List of zeros.
  771. """
  772. sep = [self.sep_token_id]
  773. cls = [self.cls_token_id]
  774. if token_ids_1 is None:
  775. return len(cls + token_ids_0 + sep) * [0]
  776. return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]