| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809 |
- # coding=utf-8
- # Copyright 2021 The HuggingFace Inc. team.
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- """
- Fast tokenization class for LayoutLMv2. It overwrites 2 methods of the slow tokenizer class, namely _batch_encode_plus
- and _encode_plus, in which the Rust tokenizer is used.
- """
- import json
- from typing import Dict, List, Optional, Tuple, Union
- from tokenizers import normalizers
- from ...tokenization_utils_base import (
- BatchEncoding,
- EncodedInput,
- PaddingStrategy,
- PreTokenizedInput,
- TensorType,
- TextInput,
- TextInputPair,
- TruncationStrategy,
- )
- from ...tokenization_utils_fast import PreTrainedTokenizerFast
- from ...utils import add_end_docstrings, logging
- from .tokenization_layoutlmv2 import (
- LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING,
- LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING,
- LayoutLMv2Tokenizer,
- )
- logger = logging.get_logger(__name__)
- VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
- class LayoutLMv2TokenizerFast(PreTrainedTokenizerFast):
- r"""
- Construct a "fast" LayoutLMv2 tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
- This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
- refer to this superclass for more information regarding those methods.
- Args:
- vocab_file (`str`):
- File containing the vocabulary.
- do_lower_case (`bool`, *optional*, defaults to `True`):
- Whether or not to lowercase the input when tokenizing.
- unk_token (`str`, *optional*, defaults to `"[UNK]"`):
- The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
- token instead.
- sep_token (`str`, *optional*, defaults to `"[SEP]"`):
- The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
- sequence classification or for a text and a question for question answering. It is also used as the last
- token of a sequence built with special tokens.
- pad_token (`str`, *optional*, defaults to `"[PAD]"`):
- The token used for padding, for example when batching sequences of different lengths.
- cls_token (`str`, *optional*, defaults to `"[CLS]"`):
- The classifier token which is used when doing sequence classification (classification of the whole sequence
- instead of per-token classification). It is the first token of the sequence when built with special tokens.
- mask_token (`str`, *optional*, defaults to `"[MASK]"`):
- The token used for masking values. This is the token used when training this model with masked language
- modeling. This is the token which the model will try to predict.
- cls_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
- The bounding box to use for the special [CLS] token.
- sep_token_box (`List[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`):
- The bounding box to use for the special [SEP] token.
- pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
- The bounding box to use for the special [PAD] token.
- pad_token_label (`int`, *optional*, defaults to -100):
- The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
- CrossEntropyLoss.
- only_label_first_subword (`bool`, *optional*, defaults to `True`):
- Whether or not to only label the first subword, in case word labels are provided.
- tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
- Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
- issue](https://github.com/huggingface/transformers/issues/328)).
- strip_accents (`bool`, *optional*):
- Whether or not to strip all accents. If this option is not specified, then it will be determined by the
- value for `lowercase` (as in the original LayoutLMv2).
- """
- vocab_files_names = VOCAB_FILES_NAMES
- slow_tokenizer_class = LayoutLMv2Tokenizer
- def __init__(
- self,
- vocab_file=None,
- tokenizer_file=None,
- do_lower_case=True,
- unk_token="[UNK]",
- sep_token="[SEP]",
- pad_token="[PAD]",
- cls_token="[CLS]",
- mask_token="[MASK]",
- cls_token_box=[0, 0, 0, 0],
- sep_token_box=[1000, 1000, 1000, 1000],
- pad_token_box=[0, 0, 0, 0],
- pad_token_label=-100,
- only_label_first_subword=True,
- tokenize_chinese_chars=True,
- strip_accents=None,
- **kwargs,
- ):
- super().__init__(
- vocab_file,
- tokenizer_file=tokenizer_file,
- do_lower_case=do_lower_case,
- unk_token=unk_token,
- sep_token=sep_token,
- pad_token=pad_token,
- cls_token=cls_token,
- mask_token=mask_token,
- cls_token_box=cls_token_box,
- sep_token_box=sep_token_box,
- pad_token_box=pad_token_box,
- pad_token_label=pad_token_label,
- only_label_first_subword=only_label_first_subword,
- tokenize_chinese_chars=tokenize_chinese_chars,
- strip_accents=strip_accents,
- **kwargs,
- )
- pre_tok_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
- if (
- pre_tok_state.get("lowercase", do_lower_case) != do_lower_case
- or pre_tok_state.get("strip_accents", strip_accents) != strip_accents
- ):
- pre_tok_class = getattr(normalizers, pre_tok_state.pop("type"))
- pre_tok_state["lowercase"] = do_lower_case
- pre_tok_state["strip_accents"] = strip_accents
- self.backend_tokenizer.normalizer = pre_tok_class(**pre_tok_state)
- self.do_lower_case = do_lower_case
- # additional properties
- self.cls_token_box = cls_token_box
- self.sep_token_box = sep_token_box
- self.pad_token_box = pad_token_box
- self.pad_token_label = pad_token_label
- self.only_label_first_subword = only_label_first_subword
- @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
- def __call__(
- self,
- text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
- text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
- boxes: Union[List[List[int]], List[List[List[int]]]] = None,
- word_labels: Optional[Union[List[int], List[List[int]]]] = None,
- add_special_tokens: bool = True,
- padding: Union[bool, str, PaddingStrategy] = False,
- truncation: Union[bool, str, TruncationStrategy] = None,
- max_length: Optional[int] = None,
- stride: int = 0,
- pad_to_multiple_of: Optional[int] = None,
- padding_side: Optional[bool] = None,
- return_tensors: Optional[Union[str, TensorType]] = None,
- return_token_type_ids: Optional[bool] = None,
- return_attention_mask: Optional[bool] = None,
- return_overflowing_tokens: bool = False,
- return_special_tokens_mask: bool = False,
- return_offsets_mapping: bool = False,
- return_length: bool = False,
- verbose: bool = True,
- **kwargs,
- ) -> BatchEncoding:
- """
- Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
- sequences with word-level normalized bounding boxes and optional labels.
- Args:
- text (`str`, `List[str]`, `List[List[str]]`):
- The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
- (words of a single example or questions of a batch of examples) or a list of list of strings (batch of
- words).
- text_pair (`List[str]`, `List[List[str]]`):
- The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
- (pretokenized string).
- boxes (`List[List[int]]`, `List[List[List[int]]]`):
- Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
- word_labels (`List[int]`, `List[List[int]]`, *optional*):
- Word-level integer labels (for token classification tasks such as FUNSD, CORD).
- """
- # Input type checking for clearer error
- def _is_valid_text_input(t):
- if isinstance(t, str):
- # Strings are fine
- return True
- elif isinstance(t, (list, tuple)):
- # List are fine as long as they are...
- if len(t) == 0:
- # ... empty
- return True
- elif isinstance(t[0], str):
- # ... list of strings
- return True
- elif isinstance(t[0], (list, tuple)):
- # ... list with an empty list or with a list of strings
- return len(t[0]) == 0 or isinstance(t[0][0], str)
- else:
- return False
- else:
- return False
- if text_pair is not None:
- # in case text + text_pair are provided, text = questions, text_pair = words
- if not _is_valid_text_input(text):
- raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ")
- if not isinstance(text_pair, (list, tuple)):
- raise ValueError(
- "Words must be of type `List[str]` (single pretokenized example), "
- "or `List[List[str]]` (batch of pretokenized examples)."
- )
- else:
- # in case only text is provided => must be words
- if not isinstance(text, (list, tuple)):
- raise ValueError(
- "Words must be of type `List[str]` (single pretokenized example), "
- "or `List[List[str]]` (batch of pretokenized examples)."
- )
- if text_pair is not None:
- is_batched = isinstance(text, (list, tuple))
- else:
- is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
- words = text if text_pair is None else text_pair
- if boxes is None:
- raise ValueError("You must provide corresponding bounding boxes")
- if is_batched:
- if len(words) != len(boxes):
- raise ValueError("You must provide words and boxes for an equal amount of examples")
- for words_example, boxes_example in zip(words, boxes):
- if len(words_example) != len(boxes_example):
- raise ValueError("You must provide as many words as there are bounding boxes")
- else:
- if len(words) != len(boxes):
- raise ValueError("You must provide as many words as there are bounding boxes")
- if is_batched:
- if text_pair is not None and len(text) != len(text_pair):
- raise ValueError(
- f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:"
- f" {len(text_pair)}."
- )
- batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
- is_pair = bool(text_pair is not None)
- return self.batch_encode_plus(
- batch_text_or_text_pairs=batch_text_or_text_pairs,
- is_pair=is_pair,
- boxes=boxes,
- word_labels=word_labels,
- add_special_tokens=add_special_tokens,
- padding=padding,
- truncation=truncation,
- max_length=max_length,
- stride=stride,
- pad_to_multiple_of=pad_to_multiple_of,
- padding_side=padding_side,
- return_tensors=return_tensors,
- return_token_type_ids=return_token_type_ids,
- return_attention_mask=return_attention_mask,
- return_overflowing_tokens=return_overflowing_tokens,
- return_special_tokens_mask=return_special_tokens_mask,
- return_offsets_mapping=return_offsets_mapping,
- return_length=return_length,
- verbose=verbose,
- **kwargs,
- )
- else:
- return self.encode_plus(
- text=text,
- text_pair=text_pair,
- boxes=boxes,
- word_labels=word_labels,
- add_special_tokens=add_special_tokens,
- padding=padding,
- truncation=truncation,
- max_length=max_length,
- stride=stride,
- pad_to_multiple_of=pad_to_multiple_of,
- padding_side=padding_side,
- return_tensors=return_tensors,
- return_token_type_ids=return_token_type_ids,
- return_attention_mask=return_attention_mask,
- return_overflowing_tokens=return_overflowing_tokens,
- return_special_tokens_mask=return_special_tokens_mask,
- return_offsets_mapping=return_offsets_mapping,
- return_length=return_length,
- verbose=verbose,
- **kwargs,
- )
- @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
- def batch_encode_plus(
- self,
- batch_text_or_text_pairs: Union[
- List[TextInput],
- List[TextInputPair],
- List[PreTokenizedInput],
- ],
- is_pair: bool = None,
- boxes: Optional[List[List[List[int]]]] = None,
- word_labels: Optional[Union[List[int], List[List[int]]]] = None,
- add_special_tokens: bool = True,
- padding: Union[bool, str, PaddingStrategy] = False,
- truncation: Union[bool, str, TruncationStrategy] = None,
- max_length: Optional[int] = None,
- stride: int = 0,
- pad_to_multiple_of: Optional[int] = None,
- padding_side: Optional[bool] = None,
- return_tensors: Optional[Union[str, TensorType]] = None,
- return_token_type_ids: Optional[bool] = None,
- return_attention_mask: Optional[bool] = None,
- return_overflowing_tokens: bool = False,
- return_special_tokens_mask: bool = False,
- return_offsets_mapping: bool = False,
- return_length: bool = False,
- verbose: bool = True,
- **kwargs,
- ) -> BatchEncoding:
- # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
- padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
- padding=padding,
- truncation=truncation,
- max_length=max_length,
- pad_to_multiple_of=pad_to_multiple_of,
- verbose=verbose,
- **kwargs,
- )
- return self._batch_encode_plus(
- batch_text_or_text_pairs=batch_text_or_text_pairs,
- is_pair=is_pair,
- boxes=boxes,
- word_labels=word_labels,
- add_special_tokens=add_special_tokens,
- padding_strategy=padding_strategy,
- truncation_strategy=truncation_strategy,
- max_length=max_length,
- stride=stride,
- pad_to_multiple_of=pad_to_multiple_of,
- padding_side=padding_side,
- return_tensors=return_tensors,
- return_token_type_ids=return_token_type_ids,
- return_attention_mask=return_attention_mask,
- return_overflowing_tokens=return_overflowing_tokens,
- return_special_tokens_mask=return_special_tokens_mask,
- return_offsets_mapping=return_offsets_mapping,
- return_length=return_length,
- verbose=verbose,
- **kwargs,
- )
- def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> List[str]:
- batched_input = [(text, pair)] if pair else [text]
- encodings = self._tokenizer.encode_batch(
- batched_input, add_special_tokens=add_special_tokens, is_pretokenized=False, **kwargs
- )
- return encodings[0].tokens
- @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
- def encode_plus(
- self,
- text: Union[TextInput, PreTokenizedInput],
- text_pair: Optional[PreTokenizedInput] = None,
- boxes: Optional[List[List[int]]] = None,
- word_labels: Optional[List[int]] = None,
- add_special_tokens: bool = True,
- padding: Union[bool, str, PaddingStrategy] = False,
- truncation: Union[bool, str, TruncationStrategy] = None,
- max_length: Optional[int] = None,
- stride: int = 0,
- pad_to_multiple_of: Optional[int] = None,
- padding_side: Optional[bool] = None,
- return_tensors: Optional[Union[str, TensorType]] = None,
- return_token_type_ids: Optional[bool] = None,
- return_attention_mask: Optional[bool] = None,
- return_overflowing_tokens: bool = False,
- return_special_tokens_mask: bool = False,
- return_offsets_mapping: bool = False,
- return_length: bool = False,
- verbose: bool = True,
- **kwargs,
- ) -> BatchEncoding:
- """
- Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
- `__call__` should be used instead.
- Args:
- text (`str`, `List[str]`, `List[List[str]]`):
- The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
- text_pair (`List[str]` or `List[int]`, *optional*):
- Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
- list of list of strings (words of a batch of examples).
- """
- # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
- padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
- padding=padding,
- truncation=truncation,
- max_length=max_length,
- pad_to_multiple_of=pad_to_multiple_of,
- verbose=verbose,
- **kwargs,
- )
- return self._encode_plus(
- text=text,
- boxes=boxes,
- text_pair=text_pair,
- word_labels=word_labels,
- add_special_tokens=add_special_tokens,
- padding_strategy=padding_strategy,
- truncation_strategy=truncation_strategy,
- max_length=max_length,
- stride=stride,
- pad_to_multiple_of=pad_to_multiple_of,
- padding_side=padding_side,
- return_tensors=return_tensors,
- return_token_type_ids=return_token_type_ids,
- return_attention_mask=return_attention_mask,
- return_overflowing_tokens=return_overflowing_tokens,
- return_special_tokens_mask=return_special_tokens_mask,
- return_offsets_mapping=return_offsets_mapping,
- return_length=return_length,
- verbose=verbose,
- **kwargs,
- )
- def _batch_encode_plus(
- self,
- batch_text_or_text_pairs: Union[
- List[TextInput],
- List[TextInputPair],
- List[PreTokenizedInput],
- ],
- is_pair: bool = None,
- boxes: Optional[List[List[List[int]]]] = None,
- word_labels: Optional[List[List[int]]] = None,
- add_special_tokens: bool = True,
- padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
- truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
- max_length: Optional[int] = None,
- stride: int = 0,
- pad_to_multiple_of: Optional[int] = None,
- padding_side: Optional[bool] = None,
- return_tensors: Optional[str] = None,
- return_token_type_ids: Optional[bool] = None,
- return_attention_mask: Optional[bool] = None,
- return_overflowing_tokens: bool = False,
- return_special_tokens_mask: bool = False,
- return_offsets_mapping: bool = False,
- return_length: bool = False,
- verbose: bool = True,
- ) -> BatchEncoding:
- if not isinstance(batch_text_or_text_pairs, list):
- raise TypeError(f"batch_text_or_text_pairs has to be a list (got {type(batch_text_or_text_pairs)})")
- # Set the truncation and padding strategy and restore the initial configuration
- self.set_truncation_and_padding(
- padding_strategy=padding_strategy,
- truncation_strategy=truncation_strategy,
- max_length=max_length,
- stride=stride,
- pad_to_multiple_of=pad_to_multiple_of,
- padding_side=padding_side,
- )
- if is_pair:
- batch_text_or_text_pairs = [(text.split(), text_pair) for text, text_pair in batch_text_or_text_pairs]
- encodings = self._tokenizer.encode_batch(
- batch_text_or_text_pairs,
- add_special_tokens=add_special_tokens,
- is_pretokenized=True, # we set this to True as LayoutLMv2 always expects pretokenized inputs
- )
- # Convert encoding to dict
- # `Tokens` has type: Tuple[
- # List[Dict[str, List[List[int]]]] or List[Dict[str, 2D-Tensor]],
- # List[EncodingFast]
- # ]
- # with nested dimensions corresponding to batch, overflows, sequence length
- tokens_and_encodings = [
- self._convert_encoding(
- encoding=encoding,
- return_token_type_ids=return_token_type_ids,
- return_attention_mask=return_attention_mask,
- return_overflowing_tokens=return_overflowing_tokens,
- return_special_tokens_mask=return_special_tokens_mask,
- return_offsets_mapping=True
- if word_labels is not None
- else return_offsets_mapping, # we use offsets to create the labels
- return_length=return_length,
- verbose=verbose,
- )
- for encoding in encodings
- ]
- # Convert the output to have dict[list] from list[dict] and remove the additional overflows dimension
- # From (variable) shape (batch, overflows, sequence length) to ~ (batch * overflows, sequence length)
- # (we say ~ because the number of overflow varies with the example in the batch)
- #
- # To match each overflowing sample with the original sample in the batch
- # we add an overflow_to_sample_mapping array (see below)
- sanitized_tokens = {}
- for key in tokens_and_encodings[0][0].keys():
- stack = [e for item, _ in tokens_and_encodings for e in item[key]]
- sanitized_tokens[key] = stack
- sanitized_encodings = [e for _, item in tokens_and_encodings for e in item]
- # If returning overflowing tokens, we need to return a mapping
- # from the batch idx to the original sample
- if return_overflowing_tokens:
- overflow_to_sample_mapping = []
- for i, (toks, _) in enumerate(tokens_and_encodings):
- overflow_to_sample_mapping += [i] * len(toks["input_ids"])
- sanitized_tokens["overflow_to_sample_mapping"] = overflow_to_sample_mapping
- for input_ids in sanitized_tokens["input_ids"]:
- self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose)
- # create the token boxes
- token_boxes = []
- for batch_index in range(len(sanitized_tokens["input_ids"])):
- if return_overflowing_tokens:
- original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
- else:
- original_index = batch_index
- token_boxes_example = []
- for id, sequence_id, word_id in zip(
- sanitized_tokens["input_ids"][batch_index],
- sanitized_encodings[batch_index].sequence_ids,
- sanitized_encodings[batch_index].word_ids,
- ):
- if word_id is not None:
- if is_pair and sequence_id == 0:
- token_boxes_example.append(self.pad_token_box)
- else:
- token_boxes_example.append(boxes[original_index][word_id])
- else:
- if id == self.cls_token_id:
- token_boxes_example.append(self.cls_token_box)
- elif id == self.sep_token_id:
- token_boxes_example.append(self.sep_token_box)
- elif id == self.pad_token_id:
- token_boxes_example.append(self.pad_token_box)
- else:
- raise ValueError("Id not recognized")
- token_boxes.append(token_boxes_example)
- sanitized_tokens["bbox"] = token_boxes
- # optionally, create the labels
- if word_labels is not None:
- labels = []
- for batch_index in range(len(sanitized_tokens["input_ids"])):
- if return_overflowing_tokens:
- original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
- else:
- original_index = batch_index
- labels_example = []
- for id, offset, word_id in zip(
- sanitized_tokens["input_ids"][batch_index],
- sanitized_tokens["offset_mapping"][batch_index],
- sanitized_encodings[batch_index].word_ids,
- ):
- if word_id is not None:
- if self.only_label_first_subword:
- if offset[0] == 0:
- # Use the real label id for the first token of the word, and padding ids for the remaining tokens
- labels_example.append(word_labels[original_index][word_id])
- else:
- labels_example.append(self.pad_token_label)
- else:
- labels_example.append(word_labels[original_index][word_id])
- else:
- labels_example.append(self.pad_token_label)
- labels.append(labels_example)
- sanitized_tokens["labels"] = labels
- # finally, remove offsets if the user didn't want them
- if not return_offsets_mapping:
- del sanitized_tokens["offset_mapping"]
- return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors)
- def _encode_plus(
- self,
- text: Union[TextInput, PreTokenizedInput],
- text_pair: Optional[PreTokenizedInput] = None,
- boxes: Optional[List[List[int]]] = None,
- word_labels: Optional[List[int]] = None,
- add_special_tokens: bool = True,
- padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
- truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
- max_length: Optional[int] = None,
- stride: int = 0,
- pad_to_multiple_of: Optional[int] = None,
- padding_side: Optional[bool] = None,
- return_tensors: Optional[bool] = None,
- return_token_type_ids: Optional[bool] = None,
- return_attention_mask: Optional[bool] = None,
- return_overflowing_tokens: bool = False,
- return_special_tokens_mask: bool = False,
- return_offsets_mapping: bool = False,
- return_length: bool = False,
- verbose: bool = True,
- **kwargs,
- ) -> BatchEncoding:
- # make it a batched input
- # 2 options:
- # 1) only text, in case text must be a list of str
- # 2) text + text_pair, in which case text = str and text_pair a list of str
- batched_input = [(text, text_pair)] if text_pair else [text]
- batched_boxes = [boxes]
- batched_word_labels = [word_labels] if word_labels is not None else None
- batched_output = self._batch_encode_plus(
- batched_input,
- is_pair=bool(text_pair is not None),
- boxes=batched_boxes,
- word_labels=batched_word_labels,
- add_special_tokens=add_special_tokens,
- padding_strategy=padding_strategy,
- truncation_strategy=truncation_strategy,
- max_length=max_length,
- stride=stride,
- pad_to_multiple_of=pad_to_multiple_of,
- padding_side=padding_side,
- return_tensors=return_tensors,
- return_token_type_ids=return_token_type_ids,
- return_attention_mask=return_attention_mask,
- return_overflowing_tokens=return_overflowing_tokens,
- return_special_tokens_mask=return_special_tokens_mask,
- return_offsets_mapping=return_offsets_mapping,
- return_length=return_length,
- verbose=verbose,
- **kwargs,
- )
- # Return tensor is None, then we can remove the leading batch axis
- # Overflowing tokens are returned as a batch of output so we keep them in this case
- if return_tensors is None and not return_overflowing_tokens:
- batched_output = BatchEncoding(
- {
- key: value[0] if len(value) > 0 and isinstance(value[0], list) else value
- for key, value in batched_output.items()
- },
- batched_output.encodings,
- )
- self._eventual_warn_about_too_long_sequence(batched_output["input_ids"], max_length, verbose)
- return batched_output
- def _pad(
- self,
- encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
- max_length: Optional[int] = None,
- padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
- pad_to_multiple_of: Optional[int] = None,
- padding_side: Optional[bool] = None,
- return_attention_mask: Optional[bool] = None,
- ) -> dict:
- """
- Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
- Args:
- encoded_inputs:
- Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
- max_length: maximum length of the returned list and optionally padding length (see below).
- Will truncate by taking into account the special tokens.
- padding_strategy: PaddingStrategy to use for padding.
- - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- - PaddingStrategy.DO_NOT_PAD: Do not pad
- The tokenizer padding sides are defined in self.padding_side:
- - 'left': pads on the left of the sequences
- - 'right': pads on the right of the sequences
- pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
- This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
- `>= 7.5` (Volta).
- padding_side:
- The side on which the model should have padding applied. Should be selected between ['right', 'left'].
- Default value is picked from the class attribute of the same name.
- return_attention_mask:
- (optional) Set to False to avoid returning attention mask (default: set to model specifics)
- """
- # Load from model defaults
- if return_attention_mask is None:
- return_attention_mask = "attention_mask" in self.model_input_names
- required_input = encoded_inputs[self.model_input_names[0]]
- if padding_strategy == PaddingStrategy.LONGEST:
- max_length = len(required_input)
- if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
- max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
- needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
- # Initialize attention mask if not present.
- if return_attention_mask and "attention_mask" not in encoded_inputs:
- encoded_inputs["attention_mask"] = [1] * len(required_input)
- if needs_to_be_padded:
- difference = max_length - len(required_input)
- padding_side = padding_side if padding_side is not None else self.padding_side
- if padding_side == "right":
- if return_attention_mask:
- encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
- if "token_type_ids" in encoded_inputs:
- encoded_inputs["token_type_ids"] = (
- encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
- )
- if "bbox" in encoded_inputs:
- encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference
- if "labels" in encoded_inputs:
- encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference
- if "special_tokens_mask" in encoded_inputs:
- encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
- encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
- elif padding_side == "left":
- if return_attention_mask:
- encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
- if "token_type_ids" in encoded_inputs:
- encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
- "token_type_ids"
- ]
- if "bbox" in encoded_inputs:
- encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"]
- if "labels" in encoded_inputs:
- encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"]
- if "special_tokens_mask" in encoded_inputs:
- encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
- encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
- else:
- raise ValueError("Invalid padding strategy:" + str(padding_side))
- return encoded_inputs
- def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
- """
- Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
- adding special tokens. A BERT sequence has the following format:
- - single sequence: `[CLS] X [SEP]`
- - pair of sequences: `[CLS] A [SEP] B [SEP]`
- Args:
- token_ids_0 (`List[int]`):
- List of IDs to which the special tokens will be added.
- token_ids_1 (`List[int]`, *optional*):
- Optional second list of IDs for sequence pairs.
- Returns:
- `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
- """
- output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
- if token_ids_1:
- output += token_ids_1 + [self.sep_token_id]
- return output
- def create_token_type_ids_from_sequences(
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
- ) -> List[int]:
- """
- Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
- pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second
- sequence | If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
- Args:
- token_ids_0 (`List[int]`):
- List of IDs.
- token_ids_1 (`List[int]`, *optional*):
- Optional second list of IDs for sequence pairs.
- Returns:
- `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
- """
- sep = [self.sep_token_id]
- cls = [self.cls_token_id]
- if token_ids_1 is None:
- return len(cls + token_ids_0 + sep) * [0]
- return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
- def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
- files = self._tokenizer.model.save(save_directory, name=filename_prefix)
- return tuple(files)
|