tokenization_layoutlmv2.py 71 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565
  1. # coding=utf-8
  2. # Copyright Microsoft Research and The HuggingFace Inc. team. All rights reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """Tokenization class for LayoutLMv2."""
  16. import collections
  17. import os
  18. import sys
  19. import unicodedata
  20. from typing import Dict, List, Optional, Tuple, Union
  21. from ...tokenization_utils import AddedToken, PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
  22. from ...tokenization_utils_base import (
  23. BatchEncoding,
  24. EncodedInput,
  25. PreTokenizedInput,
  26. TextInput,
  27. TextInputPair,
  28. TruncationStrategy,
  29. )
  30. from ...utils import PaddingStrategy, TensorType, add_end_docstrings, logging
  31. logger = logging.get_logger(__name__)
  32. VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
  33. LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING = r"""
  34. add_special_tokens (`bool`, *optional*, defaults to `True`):
  35. Whether or not to encode the sequences with the special tokens relative to their model.
  36. padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
  37. Activates and controls padding. Accepts the following values:
  38. - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
  39. sequence if provided).
  40. - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
  41. acceptable input length for the model if that argument is not provided.
  42. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
  43. lengths).
  44. truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
  45. Activates and controls truncation. Accepts the following values:
  46. - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
  47. to the maximum acceptable input length for the model if that argument is not provided. This will
  48. truncate token by token, removing a token from the longest sequence in the pair if a pair of
  49. sequences (or a batch of pairs) is provided.
  50. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
  51. maximum acceptable input length for the model if that argument is not provided. This will only
  52. truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
  53. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
  54. maximum acceptable input length for the model if that argument is not provided. This will only
  55. truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
  56. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
  57. greater than the model maximum admissible input size).
  58. max_length (`int`, *optional*):
  59. Controls the maximum length to use by one of the truncation/padding parameters.
  60. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
  61. is required by one of the truncation/padding parameters. If the model has no specific maximum input
  62. length (like XLNet) truncation/padding to a maximum length will be deactivated.
  63. stride (`int`, *optional*, defaults to 0):
  64. If set to a number along with `max_length`, the overflowing tokens returned when
  65. `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
  66. returned to provide some overlap between truncated and overflowing sequences. The value of this
  67. argument defines the number of overlapping tokens.
  68. pad_to_multiple_of (`int`, *optional*):
  69. If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
  70. the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
  71. return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
  72. If set, will return tensors instead of list of python integers. Acceptable values are:
  73. - `'tf'`: Return TensorFlow `tf.constant` objects.
  74. - `'pt'`: Return PyTorch `torch.Tensor` objects.
  75. - `'np'`: Return Numpy `np.ndarray` objects.
  76. """
  77. LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r"""
  78. return_token_type_ids (`bool`, *optional*):
  79. Whether to return token type IDs. If left to the default, will return the token type IDs according to
  80. the specific tokenizer's default, defined by the `return_outputs` attribute.
  81. [What are token type IDs?](../glossary#token-type-ids)
  82. return_attention_mask (`bool`, *optional*):
  83. Whether to return the attention mask. If left to the default, will return the attention mask according
  84. to the specific tokenizer's default, defined by the `return_outputs` attribute.
  85. [What are attention masks?](../glossary#attention-mask)
  86. return_overflowing_tokens (`bool`, *optional*, defaults to `False`):
  87. Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch
  88. of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead
  89. of returning overflowing tokens.
  90. return_special_tokens_mask (`bool`, *optional*, defaults to `False`):
  91. Whether or not to return special tokens mask information.
  92. return_offsets_mapping (`bool`, *optional*, defaults to `False`):
  93. Whether or not to return `(char_start, char_end)` for each token.
  94. This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using
  95. Python's tokenizer, this method will raise `NotImplementedError`.
  96. return_length (`bool`, *optional*, defaults to `False`):
  97. Whether or not to return the lengths of the encoded inputs.
  98. verbose (`bool`, *optional*, defaults to `True`):
  99. Whether or not to print more information and warnings.
  100. **kwargs: passed to the `self.tokenize()` method
  101. Return:
  102. [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
  103. - **input_ids** -- List of token ids to be fed to a model.
  104. [What are input IDs?](../glossary#input-ids)
  105. - **bbox** -- List of bounding boxes to be fed to a model.
  106. - **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or
  107. if *"token_type_ids"* is in `self.model_input_names`).
  108. [What are token type IDs?](../glossary#token-type-ids)
  109. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
  110. `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`).
  111. [What are attention masks?](../glossary#attention-mask)
  112. - **labels** -- List of labels to be fed to a model. (when `word_labels` is specified).
  113. - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and
  114. `return_overflowing_tokens=True`).
  115. - **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and
  116. `return_overflowing_tokens=True`).
  117. - **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying
  118. regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`).
  119. - **length** -- The length of the inputs (when `return_length=True`).
  120. """
  121. def load_vocab(vocab_file):
  122. """Loads a vocabulary file into a dictionary."""
  123. vocab = collections.OrderedDict()
  124. with open(vocab_file, "r", encoding="utf-8") as reader:
  125. tokens = reader.readlines()
  126. for index, token in enumerate(tokens):
  127. token = token.rstrip("\n")
  128. vocab[token] = index
  129. return vocab
  130. def whitespace_tokenize(text):
  131. """Runs basic whitespace cleaning and splitting on a piece of text."""
  132. text = text.strip()
  133. if not text:
  134. return []
  135. tokens = text.split()
  136. return tokens
  137. table = dict.fromkeys(i for i in range(sys.maxunicode) if unicodedata.category(chr(i)).startswith("P"))
  138. def subfinder(mylist, pattern):
  139. matches = []
  140. indices = []
  141. for idx, i in enumerate(range(len(mylist))):
  142. if mylist[i] == pattern[0] and mylist[i : i + len(pattern)] == pattern:
  143. matches.append(pattern)
  144. indices.append(idx)
  145. if matches:
  146. return matches[0], indices[0]
  147. else:
  148. return None, 0
  149. class LayoutLMv2Tokenizer(PreTrainedTokenizer):
  150. r"""
  151. Construct a LayoutLMv2 tokenizer. Based on WordPiece. [`LayoutLMv2Tokenizer`] can be used to turn words, word-level
  152. bounding boxes and optional word labels to token-level `input_ids`, `attention_mask`, `token_type_ids`, `bbox`, and
  153. optional `labels` (for token classification).
  154. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
  155. this superclass for more information regarding those methods.
  156. [`LayoutLMv2Tokenizer`] runs end-to-end tokenization: punctuation splitting and wordpiece. It also turns the
  157. word-level bounding boxes into token-level bounding boxes.
  158. """
  159. vocab_files_names = VOCAB_FILES_NAMES
  160. def __init__(
  161. self,
  162. vocab_file,
  163. do_lower_case=True,
  164. do_basic_tokenize=True,
  165. never_split=None,
  166. unk_token="[UNK]",
  167. sep_token="[SEP]",
  168. pad_token="[PAD]",
  169. cls_token="[CLS]",
  170. mask_token="[MASK]",
  171. cls_token_box=[0, 0, 0, 0],
  172. sep_token_box=[1000, 1000, 1000, 1000],
  173. pad_token_box=[0, 0, 0, 0],
  174. pad_token_label=-100,
  175. only_label_first_subword=True,
  176. tokenize_chinese_chars=True,
  177. strip_accents=None,
  178. model_max_length: int = 512,
  179. additional_special_tokens: Optional[List[str]] = None,
  180. **kwargs,
  181. ):
  182. sep_token = AddedToken(sep_token, special=True) if isinstance(sep_token, str) else sep_token
  183. unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token
  184. pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token
  185. cls_token = AddedToken(cls_token, special=True) if isinstance(cls_token, str) else cls_token
  186. mask_token = AddedToken(mask_token, special=True) if isinstance(mask_token, str) else mask_token
  187. if not os.path.isfile(vocab_file):
  188. raise ValueError(
  189. f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
  190. " model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
  191. )
  192. self.vocab = load_vocab(vocab_file)
  193. self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
  194. self.do_basic_tokenize = do_basic_tokenize
  195. if do_basic_tokenize:
  196. self.basic_tokenizer = BasicTokenizer(
  197. do_lower_case=do_lower_case,
  198. never_split=never_split,
  199. tokenize_chinese_chars=tokenize_chinese_chars,
  200. strip_accents=strip_accents,
  201. )
  202. self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
  203. # additional properties
  204. self.cls_token_box = cls_token_box
  205. self.sep_token_box = sep_token_box
  206. self.pad_token_box = pad_token_box
  207. self.pad_token_label = pad_token_label
  208. self.only_label_first_subword = only_label_first_subword
  209. super().__init__(
  210. do_lower_case=do_lower_case,
  211. do_basic_tokenize=do_basic_tokenize,
  212. never_split=never_split,
  213. unk_token=unk_token,
  214. sep_token=sep_token,
  215. pad_token=pad_token,
  216. cls_token=cls_token,
  217. mask_token=mask_token,
  218. cls_token_box=cls_token_box,
  219. sep_token_box=sep_token_box,
  220. pad_token_box=pad_token_box,
  221. pad_token_label=pad_token_label,
  222. only_label_first_subword=only_label_first_subword,
  223. tokenize_chinese_chars=tokenize_chinese_chars,
  224. strip_accents=strip_accents,
  225. model_max_length=model_max_length,
  226. additional_special_tokens=additional_special_tokens,
  227. **kwargs,
  228. )
  229. @property
  230. def do_lower_case(self):
  231. return self.basic_tokenizer.do_lower_case
  232. @property
  233. def vocab_size(self):
  234. return len(self.vocab)
  235. def get_vocab(self):
  236. return dict(self.vocab, **self.added_tokens_encoder)
  237. def _tokenize(self, text):
  238. split_tokens = []
  239. if self.do_basic_tokenize:
  240. for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
  241. # If the token is part of the never_split set
  242. if token in self.basic_tokenizer.never_split:
  243. split_tokens.append(token)
  244. else:
  245. split_tokens += self.wordpiece_tokenizer.tokenize(token)
  246. else:
  247. split_tokens = self.wordpiece_tokenizer.tokenize(text)
  248. return split_tokens
  249. def _convert_token_to_id(self, token):
  250. """Converts a token (str) in an id using the vocab."""
  251. return self.vocab.get(token, self.vocab.get(self.unk_token))
  252. def _convert_id_to_token(self, index):
  253. """Converts an index (integer) in a token (str) using the vocab."""
  254. return self.ids_to_tokens.get(index, self.unk_token)
  255. def convert_tokens_to_string(self, tokens):
  256. """Converts a sequence of tokens (string) in a single string."""
  257. out_string = " ".join(tokens).replace(" ##", "").strip()
  258. return out_string
  259. def build_inputs_with_special_tokens(
  260. self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
  261. ) -> List[int]:
  262. """
  263. Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
  264. adding special tokens. A BERT sequence has the following format:
  265. - single sequence: `[CLS] X [SEP]`
  266. - pair of sequences: `[CLS] A [SEP] B [SEP]`
  267. Args:
  268. token_ids_0 (`List[int]`):
  269. List of IDs to which the special tokens will be added.
  270. token_ids_1 (`List[int]`, *optional*):
  271. Optional second list of IDs for sequence pairs.
  272. Returns:
  273. `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
  274. """
  275. if token_ids_1 is None:
  276. return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
  277. cls = [self.cls_token_id]
  278. sep = [self.sep_token_id]
  279. return cls + token_ids_0 + sep + token_ids_1 + sep
  280. def get_special_tokens_mask(
  281. self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
  282. ) -> List[int]:
  283. """
  284. Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
  285. special tokens using the tokenizer `prepare_for_model` method.
  286. Args:
  287. token_ids_0 (`List[int]`):
  288. List of IDs.
  289. token_ids_1 (`List[int]`, *optional*):
  290. Optional second list of IDs for sequence pairs.
  291. already_has_special_tokens (`bool`, *optional*, defaults to `False`):
  292. Whether or not the token list is already formatted with special tokens for the model.
  293. Returns:
  294. `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
  295. """
  296. if already_has_special_tokens:
  297. return super().get_special_tokens_mask(
  298. token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
  299. )
  300. if token_ids_1 is not None:
  301. return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
  302. return [1] + ([0] * len(token_ids_0)) + [1]
  303. def create_token_type_ids_from_sequences(
  304. self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
  305. ) -> List[int]:
  306. """
  307. Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
  308. pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second
  309. sequence | If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
  310. Args:
  311. token_ids_0 (`List[int]`):
  312. List of IDs.
  313. token_ids_1 (`List[int]`, *optional*):
  314. Optional second list of IDs for sequence pairs.
  315. Returns:
  316. `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
  317. """
  318. sep = [self.sep_token_id]
  319. cls = [self.cls_token_id]
  320. if token_ids_1 is None:
  321. return len(cls + token_ids_0 + sep) * [0]
  322. return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
  323. def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
  324. index = 0
  325. if os.path.isdir(save_directory):
  326. vocab_file = os.path.join(
  327. save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
  328. )
  329. else:
  330. vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
  331. with open(vocab_file, "w", encoding="utf-8") as writer:
  332. for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
  333. if index != token_index:
  334. logger.warning(
  335. f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
  336. " Please check that the vocabulary is not corrupted!"
  337. )
  338. index = token_index
  339. writer.write(token + "\n")
  340. index += 1
  341. return (vocab_file,)
  342. @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
  343. def __call__(
  344. self,
  345. text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
  346. text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
  347. boxes: Union[List[List[int]], List[List[List[int]]]] = None,
  348. word_labels: Optional[Union[List[int], List[List[int]]]] = None,
  349. add_special_tokens: bool = True,
  350. padding: Union[bool, str, PaddingStrategy] = False,
  351. truncation: Union[bool, str, TruncationStrategy] = None,
  352. max_length: Optional[int] = None,
  353. stride: int = 0,
  354. pad_to_multiple_of: Optional[int] = None,
  355. padding_side: Optional[bool] = None,
  356. return_tensors: Optional[Union[str, TensorType]] = None,
  357. return_token_type_ids: Optional[bool] = None,
  358. return_attention_mask: Optional[bool] = None,
  359. return_overflowing_tokens: bool = False,
  360. return_special_tokens_mask: bool = False,
  361. return_offsets_mapping: bool = False,
  362. return_length: bool = False,
  363. verbose: bool = True,
  364. **kwargs,
  365. ) -> BatchEncoding:
  366. """
  367. Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
  368. sequences with word-level normalized bounding boxes and optional labels.
  369. Args:
  370. text (`str`, `List[str]`, `List[List[str]]`):
  371. The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
  372. (words of a single example or questions of a batch of examples) or a list of list of strings (batch of
  373. words).
  374. text_pair (`List[str]`, `List[List[str]]`):
  375. The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
  376. (pretokenized string).
  377. boxes (`List[List[int]]`, `List[List[List[int]]]`):
  378. Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
  379. word_labels (`List[int]`, `List[List[int]]`, *optional*):
  380. Word-level integer labels (for token classification tasks such as FUNSD, CORD).
  381. """
  382. # Input type checking for clearer error
  383. def _is_valid_text_input(t):
  384. if isinstance(t, str):
  385. # Strings are fine
  386. return True
  387. elif isinstance(t, (list, tuple)):
  388. # List are fine as long as they are...
  389. if len(t) == 0:
  390. # ... empty
  391. return True
  392. elif isinstance(t[0], str):
  393. # ... list of strings
  394. return True
  395. elif isinstance(t[0], (list, tuple)):
  396. # ... list with an empty list or with a list of strings
  397. return len(t[0]) == 0 or isinstance(t[0][0], str)
  398. else:
  399. return False
  400. else:
  401. return False
  402. if text_pair is not None:
  403. # in case text + text_pair are provided, text = questions, text_pair = words
  404. if not _is_valid_text_input(text):
  405. raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ")
  406. if not isinstance(text_pair, (list, tuple)):
  407. raise ValueError(
  408. "Words must be of type `List[str]` (single pretokenized example), "
  409. "or `List[List[str]]` (batch of pretokenized examples)."
  410. )
  411. else:
  412. # in case only text is provided => must be words
  413. if not isinstance(text, (list, tuple)):
  414. raise ValueError(
  415. "Words must be of type `List[str]` (single pretokenized example), "
  416. "or `List[List[str]]` (batch of pretokenized examples)."
  417. )
  418. if text_pair is not None:
  419. is_batched = isinstance(text, (list, tuple))
  420. else:
  421. is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
  422. words = text if text_pair is None else text_pair
  423. if boxes is None:
  424. raise ValueError("You must provide corresponding bounding boxes")
  425. if is_batched:
  426. if len(words) != len(boxes):
  427. raise ValueError("You must provide words and boxes for an equal amount of examples")
  428. for words_example, boxes_example in zip(words, boxes):
  429. if len(words_example) != len(boxes_example):
  430. raise ValueError("You must provide as many words as there are bounding boxes")
  431. else:
  432. if len(words) != len(boxes):
  433. raise ValueError("You must provide as many words as there are bounding boxes")
  434. if is_batched:
  435. if text_pair is not None and len(text) != len(text_pair):
  436. raise ValueError(
  437. f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:"
  438. f" {len(text_pair)}."
  439. )
  440. batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
  441. is_pair = bool(text_pair is not None)
  442. return self.batch_encode_plus(
  443. batch_text_or_text_pairs=batch_text_or_text_pairs,
  444. is_pair=is_pair,
  445. boxes=boxes,
  446. word_labels=word_labels,
  447. add_special_tokens=add_special_tokens,
  448. padding=padding,
  449. truncation=truncation,
  450. max_length=max_length,
  451. stride=stride,
  452. pad_to_multiple_of=pad_to_multiple_of,
  453. padding_side=padding_side,
  454. return_tensors=return_tensors,
  455. return_token_type_ids=return_token_type_ids,
  456. return_attention_mask=return_attention_mask,
  457. return_overflowing_tokens=return_overflowing_tokens,
  458. return_special_tokens_mask=return_special_tokens_mask,
  459. return_offsets_mapping=return_offsets_mapping,
  460. return_length=return_length,
  461. verbose=verbose,
  462. **kwargs,
  463. )
  464. else:
  465. return self.encode_plus(
  466. text=text,
  467. text_pair=text_pair,
  468. boxes=boxes,
  469. word_labels=word_labels,
  470. add_special_tokens=add_special_tokens,
  471. padding=padding,
  472. truncation=truncation,
  473. max_length=max_length,
  474. stride=stride,
  475. pad_to_multiple_of=pad_to_multiple_of,
  476. padding_side=padding_side,
  477. return_tensors=return_tensors,
  478. return_token_type_ids=return_token_type_ids,
  479. return_attention_mask=return_attention_mask,
  480. return_overflowing_tokens=return_overflowing_tokens,
  481. return_special_tokens_mask=return_special_tokens_mask,
  482. return_offsets_mapping=return_offsets_mapping,
  483. return_length=return_length,
  484. verbose=verbose,
  485. **kwargs,
  486. )
  487. @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
  488. def batch_encode_plus(
  489. self,
  490. batch_text_or_text_pairs: Union[
  491. List[TextInput],
  492. List[TextInputPair],
  493. List[PreTokenizedInput],
  494. ],
  495. is_pair: bool = None,
  496. boxes: Optional[List[List[List[int]]]] = None,
  497. word_labels: Optional[Union[List[int], List[List[int]]]] = None,
  498. add_special_tokens: bool = True,
  499. padding: Union[bool, str, PaddingStrategy] = False,
  500. truncation: Union[bool, str, TruncationStrategy] = None,
  501. max_length: Optional[int] = None,
  502. stride: int = 0,
  503. pad_to_multiple_of: Optional[int] = None,
  504. padding_side: Optional[bool] = None,
  505. return_tensors: Optional[Union[str, TensorType]] = None,
  506. return_token_type_ids: Optional[bool] = None,
  507. return_attention_mask: Optional[bool] = None,
  508. return_overflowing_tokens: bool = False,
  509. return_special_tokens_mask: bool = False,
  510. return_offsets_mapping: bool = False,
  511. return_length: bool = False,
  512. verbose: bool = True,
  513. **kwargs,
  514. ) -> BatchEncoding:
  515. # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
  516. padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
  517. padding=padding,
  518. truncation=truncation,
  519. max_length=max_length,
  520. pad_to_multiple_of=pad_to_multiple_of,
  521. verbose=verbose,
  522. **kwargs,
  523. )
  524. return self._batch_encode_plus(
  525. batch_text_or_text_pairs=batch_text_or_text_pairs,
  526. is_pair=is_pair,
  527. boxes=boxes,
  528. word_labels=word_labels,
  529. add_special_tokens=add_special_tokens,
  530. padding_strategy=padding_strategy,
  531. truncation_strategy=truncation_strategy,
  532. max_length=max_length,
  533. stride=stride,
  534. pad_to_multiple_of=pad_to_multiple_of,
  535. padding_side=padding_side,
  536. return_tensors=return_tensors,
  537. return_token_type_ids=return_token_type_ids,
  538. return_attention_mask=return_attention_mask,
  539. return_overflowing_tokens=return_overflowing_tokens,
  540. return_special_tokens_mask=return_special_tokens_mask,
  541. return_offsets_mapping=return_offsets_mapping,
  542. return_length=return_length,
  543. verbose=verbose,
  544. **kwargs,
  545. )
  546. def _batch_encode_plus(
  547. self,
  548. batch_text_or_text_pairs: Union[
  549. List[TextInput],
  550. List[TextInputPair],
  551. List[PreTokenizedInput],
  552. ],
  553. is_pair: bool = None,
  554. boxes: Optional[List[List[List[int]]]] = None,
  555. word_labels: Optional[List[List[int]]] = None,
  556. add_special_tokens: bool = True,
  557. padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
  558. truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
  559. max_length: Optional[int] = None,
  560. stride: int = 0,
  561. pad_to_multiple_of: Optional[int] = None,
  562. padding_side: Optional[bool] = None,
  563. return_tensors: Optional[Union[str, TensorType]] = None,
  564. return_token_type_ids: Optional[bool] = None,
  565. return_attention_mask: Optional[bool] = None,
  566. return_overflowing_tokens: bool = False,
  567. return_special_tokens_mask: bool = False,
  568. return_offsets_mapping: bool = False,
  569. return_length: bool = False,
  570. verbose: bool = True,
  571. **kwargs,
  572. ) -> BatchEncoding:
  573. if return_offsets_mapping:
  574. raise NotImplementedError(
  575. "return_offset_mapping is not available when using Python tokenizers. "
  576. "To use this feature, change your tokenizer to one deriving from "
  577. "transformers.PreTrainedTokenizerFast."
  578. )
  579. batch_outputs = self._batch_prepare_for_model(
  580. batch_text_or_text_pairs=batch_text_or_text_pairs,
  581. is_pair=is_pair,
  582. boxes=boxes,
  583. word_labels=word_labels,
  584. add_special_tokens=add_special_tokens,
  585. padding_strategy=padding_strategy,
  586. truncation_strategy=truncation_strategy,
  587. max_length=max_length,
  588. stride=stride,
  589. pad_to_multiple_of=pad_to_multiple_of,
  590. padding_side=padding_side,
  591. return_attention_mask=return_attention_mask,
  592. return_token_type_ids=return_token_type_ids,
  593. return_overflowing_tokens=return_overflowing_tokens,
  594. return_special_tokens_mask=return_special_tokens_mask,
  595. return_length=return_length,
  596. return_tensors=return_tensors,
  597. verbose=verbose,
  598. )
  599. return BatchEncoding(batch_outputs)
  600. @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
  601. def _batch_prepare_for_model(
  602. self,
  603. batch_text_or_text_pairs,
  604. is_pair: bool = None,
  605. boxes: Optional[List[List[int]]] = None,
  606. word_labels: Optional[List[List[int]]] = None,
  607. add_special_tokens: bool = True,
  608. padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
  609. truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
  610. max_length: Optional[int] = None,
  611. stride: int = 0,
  612. pad_to_multiple_of: Optional[int] = None,
  613. padding_side: Optional[bool] = None,
  614. return_tensors: Optional[str] = None,
  615. return_token_type_ids: Optional[bool] = None,
  616. return_attention_mask: Optional[bool] = None,
  617. return_overflowing_tokens: bool = False,
  618. return_special_tokens_mask: bool = False,
  619. return_length: bool = False,
  620. verbose: bool = True,
  621. ) -> BatchEncoding:
  622. """
  623. Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
  624. adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
  625. manages a moving window (with user defined stride) for overflowing tokens.
  626. Args:
  627. batch_ids_pairs: list of tokenized input ids or input ids pairs
  628. """
  629. batch_outputs = {}
  630. for idx, example in enumerate(zip(batch_text_or_text_pairs, boxes)):
  631. batch_text_or_text_pair, boxes_example = example
  632. outputs = self.prepare_for_model(
  633. batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair,
  634. batch_text_or_text_pair[1] if is_pair else None,
  635. boxes_example,
  636. word_labels=word_labels[idx] if word_labels is not None else None,
  637. add_special_tokens=add_special_tokens,
  638. padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward
  639. truncation=truncation_strategy.value,
  640. max_length=max_length,
  641. stride=stride,
  642. pad_to_multiple_of=None, # we pad in batch afterward
  643. padding_side=None, # we pad in batch afterward
  644. return_attention_mask=False, # we pad in batch afterward
  645. return_token_type_ids=return_token_type_ids,
  646. return_overflowing_tokens=return_overflowing_tokens,
  647. return_special_tokens_mask=return_special_tokens_mask,
  648. return_length=return_length,
  649. return_tensors=None, # We convert the whole batch to tensors at the end
  650. prepend_batch_axis=False,
  651. verbose=verbose,
  652. )
  653. for key, value in outputs.items():
  654. if key not in batch_outputs:
  655. batch_outputs[key] = []
  656. batch_outputs[key].append(value)
  657. batch_outputs = self.pad(
  658. batch_outputs,
  659. padding=padding_strategy.value,
  660. max_length=max_length,
  661. pad_to_multiple_of=pad_to_multiple_of,
  662. padding_side=padding_side,
  663. return_attention_mask=return_attention_mask,
  664. )
  665. batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
  666. return batch_outputs
  667. @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING)
  668. def encode(
  669. self,
  670. text: Union[TextInput, PreTokenizedInput],
  671. text_pair: Optional[PreTokenizedInput] = None,
  672. boxes: Optional[List[List[int]]] = None,
  673. word_labels: Optional[List[int]] = None,
  674. add_special_tokens: bool = True,
  675. padding: Union[bool, str, PaddingStrategy] = False,
  676. truncation: Union[bool, str, TruncationStrategy] = None,
  677. max_length: Optional[int] = None,
  678. stride: int = 0,
  679. pad_to_multiple_of: Optional[int] = None,
  680. padding_side: Optional[bool] = None,
  681. return_tensors: Optional[Union[str, TensorType]] = None,
  682. return_token_type_ids: Optional[bool] = None,
  683. return_attention_mask: Optional[bool] = None,
  684. return_overflowing_tokens: bool = False,
  685. return_special_tokens_mask: bool = False,
  686. return_offsets_mapping: bool = False,
  687. return_length: bool = False,
  688. verbose: bool = True,
  689. **kwargs,
  690. ) -> List[int]:
  691. encoded_inputs = self.encode_plus(
  692. text=text,
  693. text_pair=text_pair,
  694. boxes=boxes,
  695. word_labels=word_labels,
  696. add_special_tokens=add_special_tokens,
  697. padding=padding,
  698. truncation=truncation,
  699. max_length=max_length,
  700. stride=stride,
  701. pad_to_multiple_of=pad_to_multiple_of,
  702. padding_side=padding_side,
  703. return_tensors=return_tensors,
  704. return_token_type_ids=return_token_type_ids,
  705. return_attention_mask=return_attention_mask,
  706. return_overflowing_tokens=return_overflowing_tokens,
  707. return_special_tokens_mask=return_special_tokens_mask,
  708. return_offsets_mapping=return_offsets_mapping,
  709. return_length=return_length,
  710. verbose=verbose,
  711. **kwargs,
  712. )
  713. return encoded_inputs["input_ids"]
  714. @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
  715. def encode_plus(
  716. self,
  717. text: Union[TextInput, PreTokenizedInput],
  718. text_pair: Optional[PreTokenizedInput] = None,
  719. boxes: Optional[List[List[int]]] = None,
  720. word_labels: Optional[List[int]] = None,
  721. add_special_tokens: bool = True,
  722. padding: Union[bool, str, PaddingStrategy] = False,
  723. truncation: Union[bool, str, TruncationStrategy] = None,
  724. max_length: Optional[int] = None,
  725. stride: int = 0,
  726. pad_to_multiple_of: Optional[int] = None,
  727. padding_side: Optional[bool] = None,
  728. return_tensors: Optional[Union[str, TensorType]] = None,
  729. return_token_type_ids: Optional[bool] = None,
  730. return_attention_mask: Optional[bool] = None,
  731. return_overflowing_tokens: bool = False,
  732. return_special_tokens_mask: bool = False,
  733. return_offsets_mapping: bool = False,
  734. return_length: bool = False,
  735. verbose: bool = True,
  736. **kwargs,
  737. ) -> BatchEncoding:
  738. """
  739. Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
  740. `__call__` should be used instead.
  741. Args:
  742. text (`str`, `List[str]`, `List[List[str]]`):
  743. The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
  744. text_pair (`List[str]` or `List[int]`, *optional*):
  745. Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
  746. list of list of strings (words of a batch of examples).
  747. """
  748. # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
  749. padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
  750. padding=padding,
  751. truncation=truncation,
  752. max_length=max_length,
  753. pad_to_multiple_of=pad_to_multiple_of,
  754. verbose=verbose,
  755. **kwargs,
  756. )
  757. return self._encode_plus(
  758. text=text,
  759. boxes=boxes,
  760. text_pair=text_pair,
  761. word_labels=word_labels,
  762. add_special_tokens=add_special_tokens,
  763. padding_strategy=padding_strategy,
  764. truncation_strategy=truncation_strategy,
  765. max_length=max_length,
  766. stride=stride,
  767. pad_to_multiple_of=pad_to_multiple_of,
  768. padding_side=padding_side,
  769. return_tensors=return_tensors,
  770. return_token_type_ids=return_token_type_ids,
  771. return_attention_mask=return_attention_mask,
  772. return_overflowing_tokens=return_overflowing_tokens,
  773. return_special_tokens_mask=return_special_tokens_mask,
  774. return_offsets_mapping=return_offsets_mapping,
  775. return_length=return_length,
  776. verbose=verbose,
  777. **kwargs,
  778. )
  779. def _encode_plus(
  780. self,
  781. text: Union[TextInput, PreTokenizedInput],
  782. text_pair: Optional[PreTokenizedInput] = None,
  783. boxes: Optional[List[List[int]]] = None,
  784. word_labels: Optional[List[int]] = None,
  785. add_special_tokens: bool = True,
  786. padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
  787. truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
  788. max_length: Optional[int] = None,
  789. stride: int = 0,
  790. pad_to_multiple_of: Optional[int] = None,
  791. padding_side: Optional[bool] = None,
  792. return_tensors: Optional[Union[str, TensorType]] = None,
  793. return_token_type_ids: Optional[bool] = None,
  794. return_attention_mask: Optional[bool] = None,
  795. return_overflowing_tokens: bool = False,
  796. return_special_tokens_mask: bool = False,
  797. return_offsets_mapping: bool = False,
  798. return_length: bool = False,
  799. verbose: bool = True,
  800. **kwargs,
  801. ) -> BatchEncoding:
  802. if return_offsets_mapping:
  803. raise NotImplementedError(
  804. "return_offset_mapping is not available when using Python tokenizers. "
  805. "To use this feature, change your tokenizer to one deriving from "
  806. "transformers.PreTrainedTokenizerFast. "
  807. "More information on available tokenizers at "
  808. "https://github.com/huggingface/transformers/pull/2674"
  809. )
  810. return self.prepare_for_model(
  811. text=text,
  812. text_pair=text_pair,
  813. boxes=boxes,
  814. word_labels=word_labels,
  815. add_special_tokens=add_special_tokens,
  816. padding=padding_strategy.value,
  817. truncation=truncation_strategy.value,
  818. max_length=max_length,
  819. stride=stride,
  820. pad_to_multiple_of=pad_to_multiple_of,
  821. padding_side=padding_side,
  822. return_tensors=return_tensors,
  823. prepend_batch_axis=True,
  824. return_attention_mask=return_attention_mask,
  825. return_token_type_ids=return_token_type_ids,
  826. return_overflowing_tokens=return_overflowing_tokens,
  827. return_special_tokens_mask=return_special_tokens_mask,
  828. return_length=return_length,
  829. verbose=verbose,
  830. )
  831. @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
  832. def prepare_for_model(
  833. self,
  834. text: Union[TextInput, PreTokenizedInput],
  835. text_pair: Optional[PreTokenizedInput] = None,
  836. boxes: Optional[List[List[int]]] = None,
  837. word_labels: Optional[List[int]] = None,
  838. add_special_tokens: bool = True,
  839. padding: Union[bool, str, PaddingStrategy] = False,
  840. truncation: Union[bool, str, TruncationStrategy] = None,
  841. max_length: Optional[int] = None,
  842. stride: int = 0,
  843. pad_to_multiple_of: Optional[int] = None,
  844. padding_side: Optional[bool] = None,
  845. return_tensors: Optional[Union[str, TensorType]] = None,
  846. return_token_type_ids: Optional[bool] = None,
  847. return_attention_mask: Optional[bool] = None,
  848. return_overflowing_tokens: bool = False,
  849. return_special_tokens_mask: bool = False,
  850. return_offsets_mapping: bool = False,
  851. return_length: bool = False,
  852. verbose: bool = True,
  853. prepend_batch_axis: bool = False,
  854. **kwargs,
  855. ) -> BatchEncoding:
  856. """
  857. Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens,
  858. truncates sequences if overflowing while taking into account the special tokens and manages a moving window
  859. (with user defined stride) for overflowing tokens. Please Note, for *text_pair* different than `None` and
  860. *truncation_strategy = longest_first* or `True`, it is not possible to return overflowing tokens. Such a
  861. combination of arguments will raise an error.
  862. Word-level `boxes` are turned into token-level `bbox`. If provided, word-level `word_labels` are turned into
  863. token-level `labels`. The word label is used for the first token of the word, while remaining tokens are
  864. labeled with -100, such that they will be ignored by the loss function.
  865. Args:
  866. text (`str`, `List[str]`, `List[List[str]]`):
  867. The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
  868. text_pair (`List[str]` or `List[int]`, *optional*):
  869. Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
  870. list of list of strings (words of a batch of examples).
  871. """
  872. # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
  873. padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
  874. padding=padding,
  875. truncation=truncation,
  876. max_length=max_length,
  877. pad_to_multiple_of=pad_to_multiple_of,
  878. verbose=verbose,
  879. **kwargs,
  880. )
  881. tokens = []
  882. pair_tokens = []
  883. token_boxes = []
  884. pair_token_boxes = []
  885. labels = []
  886. if text_pair is None:
  887. if word_labels is None:
  888. # CASE 1: document image classification (training + inference) + CASE 2: token classification (inference)
  889. for word, box in zip(text, boxes):
  890. if len(word) < 1: # skip empty words
  891. continue
  892. word_tokens = self.tokenize(word)
  893. tokens.extend(word_tokens)
  894. token_boxes.extend([box] * len(word_tokens))
  895. else:
  896. # CASE 2: token classification (training)
  897. for word, box, label in zip(text, boxes, word_labels):
  898. if len(word) < 1: # skip empty words
  899. continue
  900. word_tokens = self.tokenize(word)
  901. tokens.extend(word_tokens)
  902. token_boxes.extend([box] * len(word_tokens))
  903. if self.only_label_first_subword:
  904. # Use the real label id for the first token of the word, and padding ids for the remaining tokens
  905. labels.extend([label] + [self.pad_token_label] * (len(word_tokens) - 1))
  906. else:
  907. labels.extend([label] * len(word_tokens))
  908. else:
  909. # CASE 3: document visual question answering (inference)
  910. # text = question
  911. # text_pair = words
  912. tokens = self.tokenize(text)
  913. token_boxes = [self.pad_token_box for _ in range(len(tokens))]
  914. for word, box in zip(text_pair, boxes):
  915. if len(word) < 1: # skip empty words
  916. continue
  917. word_tokens = self.tokenize(word)
  918. pair_tokens.extend(word_tokens)
  919. pair_token_boxes.extend([box] * len(word_tokens))
  920. # Create ids + pair_ids
  921. ids = self.convert_tokens_to_ids(tokens)
  922. pair_ids = self.convert_tokens_to_ids(pair_tokens) if pair_tokens else None
  923. if (
  924. return_overflowing_tokens
  925. and truncation_strategy == TruncationStrategy.LONGEST_FIRST
  926. and pair_ids is not None
  927. ):
  928. raise ValueError(
  929. "Not possible to return overflowing tokens for pair of sequences with the "
  930. "`longest_first`. Please select another truncation strategy than `longest_first`, "
  931. "for instance `only_second` or `only_first`."
  932. )
  933. # Compute the total size of the returned encodings
  934. pair = bool(pair_ids is not None)
  935. len_ids = len(ids)
  936. len_pair_ids = len(pair_ids) if pair else 0
  937. total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
  938. # Truncation: Handle max sequence length
  939. overflowing_tokens = []
  940. overflowing_token_boxes = []
  941. overflowing_labels = []
  942. if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length:
  943. (
  944. ids,
  945. token_boxes,
  946. pair_ids,
  947. pair_token_boxes,
  948. labels,
  949. overflowing_tokens,
  950. overflowing_token_boxes,
  951. overflowing_labels,
  952. ) = self.truncate_sequences(
  953. ids,
  954. token_boxes,
  955. pair_ids=pair_ids,
  956. pair_token_boxes=pair_token_boxes,
  957. labels=labels,
  958. num_tokens_to_remove=total_len - max_length,
  959. truncation_strategy=truncation_strategy,
  960. stride=stride,
  961. )
  962. if return_token_type_ids and not add_special_tokens:
  963. raise ValueError(
  964. "Asking to return token_type_ids while setting add_special_tokens to False "
  965. "results in an undefined behavior. Please set add_special_tokens to True or "
  966. "set return_token_type_ids to None."
  967. )
  968. # Load from model defaults
  969. if return_token_type_ids is None:
  970. return_token_type_ids = "token_type_ids" in self.model_input_names
  971. if return_attention_mask is None:
  972. return_attention_mask = "attention_mask" in self.model_input_names
  973. encoded_inputs = {}
  974. if return_overflowing_tokens:
  975. encoded_inputs["overflowing_tokens"] = overflowing_tokens
  976. encoded_inputs["overflowing_token_boxes"] = overflowing_token_boxes
  977. encoded_inputs["overflowing_labels"] = overflowing_labels
  978. encoded_inputs["num_truncated_tokens"] = total_len - max_length
  979. # Add special tokens
  980. if add_special_tokens:
  981. sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
  982. token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
  983. token_boxes = [self.cls_token_box] + token_boxes + [self.sep_token_box]
  984. if pair_token_boxes:
  985. pair_token_boxes = pair_token_boxes + [self.sep_token_box]
  986. if labels:
  987. labels = [self.pad_token_label] + labels + [self.pad_token_label]
  988. else:
  989. sequence = ids + pair_ids if pair else ids
  990. token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else [])
  991. # Build output dictionary
  992. encoded_inputs["input_ids"] = sequence
  993. encoded_inputs["bbox"] = token_boxes + pair_token_boxes
  994. if return_token_type_ids:
  995. encoded_inputs["token_type_ids"] = token_type_ids
  996. if return_special_tokens_mask:
  997. if add_special_tokens:
  998. encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids)
  999. else:
  1000. encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
  1001. if labels:
  1002. encoded_inputs["labels"] = labels
  1003. # Check lengths
  1004. self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose)
  1005. # Padding
  1006. if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
  1007. encoded_inputs = self.pad(
  1008. encoded_inputs,
  1009. max_length=max_length,
  1010. padding=padding_strategy.value,
  1011. pad_to_multiple_of=pad_to_multiple_of,
  1012. padding_side=padding_side,
  1013. return_attention_mask=return_attention_mask,
  1014. )
  1015. if return_length:
  1016. encoded_inputs["length"] = len(encoded_inputs["input_ids"])
  1017. batch_outputs = BatchEncoding(
  1018. encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis
  1019. )
  1020. return batch_outputs
  1021. def truncate_sequences(
  1022. self,
  1023. ids: List[int],
  1024. token_boxes: List[List[int]],
  1025. pair_ids: Optional[List[int]] = None,
  1026. pair_token_boxes: Optional[List[List[int]]] = None,
  1027. labels: Optional[List[int]] = None,
  1028. num_tokens_to_remove: int = 0,
  1029. truncation_strategy: Union[str, TruncationStrategy] = "longest_first",
  1030. stride: int = 0,
  1031. ) -> Tuple[List[int], List[int], List[int]]:
  1032. """
  1033. Truncates a sequence pair in-place following the strategy.
  1034. Args:
  1035. ids (`List[int]`):
  1036. Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and
  1037. `convert_tokens_to_ids` methods.
  1038. token_boxes (`List[List[int]]`):
  1039. Bounding boxes of the first sequence.
  1040. pair_ids (`List[int]`, *optional*):
  1041. Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`
  1042. and `convert_tokens_to_ids` methods.
  1043. pair_token_boxes (`List[List[int]]`, *optional*):
  1044. Bounding boxes of the second sequence.
  1045. labels (`List[int]`, *optional*):
  1046. Labels of the first sequence (for token classification tasks).
  1047. num_tokens_to_remove (`int`, *optional*, defaults to 0):
  1048. Number of tokens to remove using the truncation strategy.
  1049. truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
  1050. The strategy to follow for truncation. Can be:
  1051. - `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
  1052. maximum acceptable input length for the model if that argument is not provided. This will truncate
  1053. token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a
  1054. batch of pairs) is provided.
  1055. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
  1056. maximum acceptable input length for the model if that argument is not provided. This will only
  1057. truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
  1058. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
  1059. maximum acceptable input length for the model if that argument is not provided. This will only
  1060. truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
  1061. - `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater
  1062. than the model maximum admissible input size).
  1063. stride (`int`, *optional*, defaults to 0):
  1064. If set to a positive number, the overflowing tokens returned will contain some tokens from the main
  1065. sequence returned. The value of this argument defines the number of additional tokens.
  1066. Returns:
  1067. `Tuple[List[int], List[int], List[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of
  1068. overflowing tokens. Note: The *longest_first* strategy returns empty list of overflowing tokens if a pair
  1069. of sequences (or a batch of pairs) is provided.
  1070. """
  1071. if num_tokens_to_remove <= 0:
  1072. return ids, token_boxes, pair_ids, pair_token_boxes, labels, [], [], []
  1073. if not isinstance(truncation_strategy, TruncationStrategy):
  1074. truncation_strategy = TruncationStrategy(truncation_strategy)
  1075. overflowing_tokens = []
  1076. overflowing_token_boxes = []
  1077. overflowing_labels = []
  1078. if truncation_strategy == TruncationStrategy.ONLY_FIRST or (
  1079. truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is None
  1080. ):
  1081. if len(ids) > num_tokens_to_remove:
  1082. window_len = min(len(ids), stride + num_tokens_to_remove)
  1083. overflowing_tokens = ids[-window_len:]
  1084. overflowing_token_boxes = token_boxes[-window_len:]
  1085. overflowing_labels = labels[-window_len:]
  1086. ids = ids[:-num_tokens_to_remove]
  1087. token_boxes = token_boxes[:-num_tokens_to_remove]
  1088. labels = labels[:-num_tokens_to_remove]
  1089. else:
  1090. error_msg = (
  1091. f"We need to remove {num_tokens_to_remove} to truncate the input "
  1092. f"but the first sequence has a length {len(ids)}. "
  1093. )
  1094. if truncation_strategy == TruncationStrategy.ONLY_FIRST:
  1095. error_msg = (
  1096. error_msg + "Please select another truncation strategy than "
  1097. f"{truncation_strategy}, for instance 'longest_first' or 'only_second'."
  1098. )
  1099. logger.error(error_msg)
  1100. elif truncation_strategy == TruncationStrategy.LONGEST_FIRST:
  1101. logger.warning(
  1102. "Be aware, overflowing tokens are not returned for the setting you have chosen,"
  1103. f" i.e. sequence pairs with the '{TruncationStrategy.LONGEST_FIRST.value}' "
  1104. "truncation strategy. So the returned list will always be empty even if some "
  1105. "tokens have been removed."
  1106. )
  1107. for _ in range(num_tokens_to_remove):
  1108. if pair_ids is None or len(ids) > len(pair_ids):
  1109. ids = ids[:-1]
  1110. token_boxes = token_boxes[:-1]
  1111. labels = labels[:-1]
  1112. else:
  1113. pair_ids = pair_ids[:-1]
  1114. pair_token_boxes = pair_token_boxes[:-1]
  1115. elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None:
  1116. if len(pair_ids) > num_tokens_to_remove:
  1117. window_len = min(len(pair_ids), stride + num_tokens_to_remove)
  1118. overflowing_tokens = pair_ids[-window_len:]
  1119. overflowing_token_boxes = pair_token_boxes[-window_len:]
  1120. pair_ids = pair_ids[:-num_tokens_to_remove]
  1121. pair_token_boxes = pair_token_boxes[:-num_tokens_to_remove]
  1122. else:
  1123. logger.error(
  1124. f"We need to remove {num_tokens_to_remove} to truncate the input "
  1125. f"but the second sequence has a length {len(pair_ids)}. "
  1126. f"Please select another truncation strategy than {truncation_strategy}, "
  1127. "for instance 'longest_first' or 'only_first'."
  1128. )
  1129. return (
  1130. ids,
  1131. token_boxes,
  1132. pair_ids,
  1133. pair_token_boxes,
  1134. labels,
  1135. overflowing_tokens,
  1136. overflowing_token_boxes,
  1137. overflowing_labels,
  1138. )
  1139. def _pad(
  1140. self,
  1141. encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
  1142. max_length: Optional[int] = None,
  1143. padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
  1144. pad_to_multiple_of: Optional[int] = None,
  1145. padding_side: Optional[bool] = None,
  1146. return_attention_mask: Optional[bool] = None,
  1147. ) -> dict:
  1148. """
  1149. Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
  1150. Args:
  1151. encoded_inputs:
  1152. Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
  1153. max_length: maximum length of the returned list and optionally padding length (see below).
  1154. Will truncate by taking into account the special tokens.
  1155. padding_strategy: PaddingStrategy to use for padding.
  1156. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
  1157. - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
  1158. - PaddingStrategy.DO_NOT_PAD: Do not pad
  1159. The tokenizer padding sides are defined in self.padding_side:
  1160. - 'left': pads on the left of the sequences
  1161. - 'right': pads on the right of the sequences
  1162. pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
  1163. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
  1164. `>= 7.5` (Volta).
  1165. padding_side:
  1166. The side on which the model should have padding applied. Should be selected between ['right', 'left'].
  1167. Default value is picked from the class attribute of the same name.
  1168. return_attention_mask:
  1169. (optional) Set to False to avoid returning attention mask (default: set to model specifics)
  1170. """
  1171. # Load from model defaults
  1172. if return_attention_mask is None:
  1173. return_attention_mask = "attention_mask" in self.model_input_names
  1174. required_input = encoded_inputs[self.model_input_names[0]]
  1175. if padding_strategy == PaddingStrategy.LONGEST:
  1176. max_length = len(required_input)
  1177. if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
  1178. max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
  1179. needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
  1180. # Initialize attention mask if not present.
  1181. if return_attention_mask and "attention_mask" not in encoded_inputs:
  1182. encoded_inputs["attention_mask"] = [1] * len(required_input)
  1183. if needs_to_be_padded:
  1184. difference = max_length - len(required_input)
  1185. padding_side = padding_side if padding_side is not None else self.padding_side
  1186. if padding_side == "right":
  1187. if return_attention_mask:
  1188. encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
  1189. if "token_type_ids" in encoded_inputs:
  1190. encoded_inputs["token_type_ids"] = (
  1191. encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
  1192. )
  1193. if "bbox" in encoded_inputs:
  1194. encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference
  1195. if "labels" in encoded_inputs:
  1196. encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference
  1197. if "special_tokens_mask" in encoded_inputs:
  1198. encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
  1199. encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
  1200. elif padding_side == "left":
  1201. if return_attention_mask:
  1202. encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
  1203. if "token_type_ids" in encoded_inputs:
  1204. encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
  1205. "token_type_ids"
  1206. ]
  1207. if "bbox" in encoded_inputs:
  1208. encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"]
  1209. if "labels" in encoded_inputs:
  1210. encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"]
  1211. if "special_tokens_mask" in encoded_inputs:
  1212. encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
  1213. encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
  1214. else:
  1215. raise ValueError("Invalid padding strategy:" + str(padding_side))
  1216. return encoded_inputs
  1217. # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
  1218. class BasicTokenizer:
  1219. """
  1220. Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
  1221. Args:
  1222. do_lower_case (`bool`, *optional*, defaults to `True`):
  1223. Whether or not to lowercase the input when tokenizing.
  1224. never_split (`Iterable`, *optional*):
  1225. Collection of tokens which will never be split during tokenization. Only has an effect when
  1226. `do_basic_tokenize=True`
  1227. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
  1228. Whether or not to tokenize Chinese characters.
  1229. This should likely be deactivated for Japanese (see this
  1230. [issue](https://github.com/huggingface/transformers/issues/328)).
  1231. strip_accents (`bool`, *optional*):
  1232. Whether or not to strip all accents. If this option is not specified, then it will be determined by the
  1233. value for `lowercase` (as in the original BERT).
  1234. do_split_on_punc (`bool`, *optional*, defaults to `True`):
  1235. In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
  1236. the full context of the words, such as contractions.
  1237. """
  1238. def __init__(
  1239. self,
  1240. do_lower_case=True,
  1241. never_split=None,
  1242. tokenize_chinese_chars=True,
  1243. strip_accents=None,
  1244. do_split_on_punc=True,
  1245. ):
  1246. if never_split is None:
  1247. never_split = []
  1248. self.do_lower_case = do_lower_case
  1249. self.never_split = set(never_split)
  1250. self.tokenize_chinese_chars = tokenize_chinese_chars
  1251. self.strip_accents = strip_accents
  1252. self.do_split_on_punc = do_split_on_punc
  1253. def tokenize(self, text, never_split=None):
  1254. """
  1255. Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
  1256. Args:
  1257. never_split (`List[str]`, *optional*)
  1258. Kept for backward compatibility purposes. Now implemented directly at the base class level (see
  1259. [`PreTrainedTokenizer.tokenize`]) List of token not to split.
  1260. """
  1261. # union() returns a new set by concatenating the two sets.
  1262. never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
  1263. text = self._clean_text(text)
  1264. # This was added on November 1st, 2018 for the multilingual and Chinese
  1265. # models. This is also applied to the English models now, but it doesn't
  1266. # matter since the English models were not trained on any Chinese data
  1267. # and generally don't have any Chinese data in them (there are Chinese
  1268. # characters in the vocabulary because Wikipedia does have some Chinese
  1269. # words in the English Wikipedia.).
  1270. if self.tokenize_chinese_chars:
  1271. text = self._tokenize_chinese_chars(text)
  1272. # prevents treating the same character with different unicode codepoints as different characters
  1273. unicode_normalized_text = unicodedata.normalize("NFC", text)
  1274. orig_tokens = whitespace_tokenize(unicode_normalized_text)
  1275. split_tokens = []
  1276. for token in orig_tokens:
  1277. if token not in never_split:
  1278. if self.do_lower_case:
  1279. token = token.lower()
  1280. if self.strip_accents is not False:
  1281. token = self._run_strip_accents(token)
  1282. elif self.strip_accents:
  1283. token = self._run_strip_accents(token)
  1284. split_tokens.extend(self._run_split_on_punc(token, never_split))
  1285. output_tokens = whitespace_tokenize(" ".join(split_tokens))
  1286. return output_tokens
  1287. def _run_strip_accents(self, text):
  1288. """Strips accents from a piece of text."""
  1289. text = unicodedata.normalize("NFD", text)
  1290. output = []
  1291. for char in text:
  1292. cat = unicodedata.category(char)
  1293. if cat == "Mn":
  1294. continue
  1295. output.append(char)
  1296. return "".join(output)
  1297. def _run_split_on_punc(self, text, never_split=None):
  1298. """Splits punctuation on a piece of text."""
  1299. if not self.do_split_on_punc or (never_split is not None and text in never_split):
  1300. return [text]
  1301. chars = list(text)
  1302. i = 0
  1303. start_new_word = True
  1304. output = []
  1305. while i < len(chars):
  1306. char = chars[i]
  1307. if _is_punctuation(char):
  1308. output.append([char])
  1309. start_new_word = True
  1310. else:
  1311. if start_new_word:
  1312. output.append([])
  1313. start_new_word = False
  1314. output[-1].append(char)
  1315. i += 1
  1316. return ["".join(x) for x in output]
  1317. def _tokenize_chinese_chars(self, text):
  1318. """Adds whitespace around any CJK character."""
  1319. output = []
  1320. for char in text:
  1321. cp = ord(char)
  1322. if self._is_chinese_char(cp):
  1323. output.append(" ")
  1324. output.append(char)
  1325. output.append(" ")
  1326. else:
  1327. output.append(char)
  1328. return "".join(output)
  1329. def _is_chinese_char(self, cp):
  1330. """Checks whether CP is the codepoint of a CJK character."""
  1331. # This defines a "chinese character" as anything in the CJK Unicode block:
  1332. # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
  1333. #
  1334. # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
  1335. # despite its name. The modern Korean Hangul alphabet is a different block,
  1336. # as is Japanese Hiragana and Katakana. Those alphabets are used to write
  1337. # space-separated words, so they are not treated specially and handled
  1338. # like the all of the other languages.
  1339. if (
  1340. (cp >= 0x4E00 and cp <= 0x9FFF)
  1341. or (cp >= 0x3400 and cp <= 0x4DBF) #
  1342. or (cp >= 0x20000 and cp <= 0x2A6DF) #
  1343. or (cp >= 0x2A700 and cp <= 0x2B73F) #
  1344. or (cp >= 0x2B740 and cp <= 0x2B81F) #
  1345. or (cp >= 0x2B820 and cp <= 0x2CEAF) #
  1346. or (cp >= 0xF900 and cp <= 0xFAFF)
  1347. or (cp >= 0x2F800 and cp <= 0x2FA1F) #
  1348. ): #
  1349. return True
  1350. return False
  1351. def _clean_text(self, text):
  1352. """Performs invalid character removal and whitespace cleanup on text."""
  1353. output = []
  1354. for char in text:
  1355. cp = ord(char)
  1356. if cp == 0 or cp == 0xFFFD or _is_control(char):
  1357. continue
  1358. if _is_whitespace(char):
  1359. output.append(" ")
  1360. else:
  1361. output.append(char)
  1362. return "".join(output)
  1363. # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
  1364. class WordpieceTokenizer:
  1365. """Runs WordPiece tokenization."""
  1366. def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
  1367. self.vocab = vocab
  1368. self.unk_token = unk_token
  1369. self.max_input_chars_per_word = max_input_chars_per_word
  1370. def tokenize(self, text):
  1371. """
  1372. Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
  1373. tokenization using the given vocabulary.
  1374. For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
  1375. Args:
  1376. text: A single token or whitespace separated tokens. This should have
  1377. already been passed through *BasicTokenizer*.
  1378. Returns:
  1379. A list of wordpiece tokens.
  1380. """
  1381. output_tokens = []
  1382. for token in whitespace_tokenize(text):
  1383. chars = list(token)
  1384. if len(chars) > self.max_input_chars_per_word:
  1385. output_tokens.append(self.unk_token)
  1386. continue
  1387. is_bad = False
  1388. start = 0
  1389. sub_tokens = []
  1390. while start < len(chars):
  1391. end = len(chars)
  1392. cur_substr = None
  1393. while start < end:
  1394. substr = "".join(chars[start:end])
  1395. if start > 0:
  1396. substr = "##" + substr
  1397. if substr in self.vocab:
  1398. cur_substr = substr
  1399. break
  1400. end -= 1
  1401. if cur_substr is None:
  1402. is_bad = True
  1403. break
  1404. sub_tokens.append(cur_substr)
  1405. start = end
  1406. if is_bad:
  1407. output_tokens.append(self.unk_token)
  1408. else:
  1409. output_tokens.extend(sub_tokens)
  1410. return output_tokens