| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447 |
- # coding=utf-8
- # Copyright 2018 T5 Authors and HuggingFace Inc. team.
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- """Tokenization class for model T5."""
- import os
- import re
- import warnings
- from shutil import copyfile
- from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
- import sentencepiece as spm
- from ...convert_slow_tokenizer import import_protobuf
- from ...tokenization_utils import PreTrainedTokenizer
- from ...tokenization_utils_base import AddedToken
- if TYPE_CHECKING:
- from ...tokenization_utils_base import TextInput
- from ...utils import logging
- logger = logging.get_logger(__name__)
- VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
- # TODO(PVP) - this should be removed in Transformers v5
- SPIECE_UNDERLINE = "▁"
- class T5Tokenizer(PreTrainedTokenizer):
- """
- Construct a T5 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
- This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
- this superclass for more information regarding those methods.
- Args:
- vocab_file (`str`):
- [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
- contains the vocabulary necessary to instantiate a tokenizer.
- eos_token (`str`, *optional*, defaults to `"</s>"`):
- The end of sequence token.
- <Tip>
- When building a sequence using special tokens, this is not the token that is used for the end of sequence.
- The token used is the `sep_token`.
- </Tip>
- unk_token (`str`, *optional*, defaults to `"<unk>"`):
- The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
- token instead.
- pad_token (`str`, *optional*, defaults to `"<pad>"`):
- The token used for padding, for example when batching sequences of different lengths.
- extra_ids (`int`, *optional*, defaults to 100):
- Add a number of extra ids added to the vocabulary for use as sentinels. These tokens are
- accessible as "<extra_id_{%d}>" where "{%d}" is a number between 0 and extra_ids-1. These tokens can be
- retrieved by calling get_sentinel_tokens method and token ids can be by calling get_sentinel_token_ids
- method
- additional_special_tokens (`List[str]`, *optional*):
- Additional special tokens used by the tokenizer.
- sp_model_kwargs (`dict`, *optional*):
- Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
- SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
- to set:
- - `enable_sampling`: Enable subword regularization.
- - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- - `nbest_size = {0,1}`: No sampling is performed.
- - `nbest_size > 1`: samples from the nbest_size results.
- - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
- using forward-filtering-and-backward-sampling algorithm.
- - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
- BPE-dropout.
- legacy (`bool`, *optional*):
- Whether or not the `legacy` behaviour of the tokenizer should be used. Legacy is before the merge of #24622
- and #25224 which includes fixes to properly handle tokens that appear after special tokens. A simple
- example:
- - `legacy=True`:
- ```python
- >>> from transformers import T5Tokenizer
- >>> tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-base", legacy=True)
- >>> tokenizer.encode("Hello <extra_id_0>.")
- [8774, 32099, 3, 5, 1]
- ```
- - `legacy=False`:
- ```python
- >>> from transformers import T5Tokenizer
- >>> tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-base", legacy=False)
- >>> tokenizer.encode("Hello <extra_id_0>.") # the extra space `[3]` is no longer here
- [8774, 32099, 5, 1]
- ```
- Checkout the [pull request](https://github.com/huggingface/transformers/pull/24565) for more details.
- add_prefix_space (`bool`, *optional*, defaults to `False`):
- Whether or not to add an initial space to the input. This allows to treat the leading word just as any
- other word.
- Attributes:
- sp_model (`SentencePieceProcessor`):
- The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
- """
- vocab_files_names = VOCAB_FILES_NAMES
- model_input_names = ["input_ids", "attention_mask"]
- def __init__(
- self,
- vocab_file,
- eos_token="</s>",
- unk_token="<unk>",
- pad_token="<pad>",
- extra_ids=100,
- additional_special_tokens=None,
- sp_model_kwargs: Optional[Dict[str, Any]] = None,
- legacy=None,
- add_prefix_space=True,
- **kwargs,
- ) -> None:
- pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token
- unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token
- eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token
- self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
- self.vocab_file = vocab_file
- self._extra_ids = extra_ids
- self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
- self.sp_model.Load(vocab_file)
- if additional_special_tokens is not None:
- extra_tokens = [x for x in additional_special_tokens if "<extra_id_" in str(x)]
- if len(extra_tokens) < 1:
- additional_special_tokens += [f"<extra_id_{i}>" for i in range(extra_ids)]
- elif extra_ids > 0 and extra_ids != len(extra_tokens):
- raise ValueError(
- f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
- " provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
- " tokens"
- )
- else:
- extra_tokens = [f"<extra_id_{i}>" for i in range(extra_ids)]
- additional_special_tokens = extra_tokens
- # for legacy purpose, we keep this. Will be removed and tests updated. (when `added_tokens_decoder` is not passed as kwargs)
- self._added_tokens_decoder = {}
- for i in range(len(extra_tokens)):
- self._added_tokens_decoder[len(self.sp_model) - 1 + extra_ids - i] = AddedToken(
- f"<extra_id_{i}>", single_word=False, lstrip=True, rstrip=True, special=True, normalized=False
- )
- if legacy is None:
- logger.warning_once(
- f"You are using the default legacy behaviour of the {self.__class__}. This is"
- " expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you."
- " If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it"
- " means, and thoroughly read the reason why this was added as explained in"
- " https://github.com/huggingface/transformers/pull/24565"
- )
- legacy = True
- self.legacy = legacy
- self.sp_model = self.get_spm_processor(kwargs.pop("from_slow", False))
- self.vocab_file = vocab_file
- self._extra_ids = extra_ids
- self.add_prefix_space = add_prefix_space
- super().__init__(
- eos_token=eos_token,
- unk_token=unk_token,
- pad_token=pad_token,
- extra_ids=extra_ids,
- additional_special_tokens=additional_special_tokens,
- sp_model_kwargs=self.sp_model_kwargs,
- legacy=legacy,
- add_prefix_space=add_prefix_space,
- **kwargs,
- )
- # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_spm_processor
- def get_spm_processor(self, from_slow=False):
- tokenizer = spm.SentencePieceProcessor(**self.sp_model_kwargs)
- if self.legacy or from_slow: # no dependency on protobuf
- tokenizer.Load(self.vocab_file)
- return tokenizer
- with open(self.vocab_file, "rb") as f:
- sp_model = f.read()
- model_pb2 = import_protobuf(f"The new behaviour of {self.__class__.__name__} (with `self.legacy = False`)")
- model = model_pb2.ModelProto.FromString(sp_model)
- normalizer_spec = model_pb2.NormalizerSpec()
- normalizer_spec.add_dummy_prefix = False
- model.normalizer_spec.MergeFrom(normalizer_spec)
- sp_model = model.SerializeToString()
- tokenizer.LoadFromSerializedProto(sp_model)
- return tokenizer
- @staticmethod
- def _eventually_correct_t5_max_length(pretrained_model_name_or_path, max_model_length, init_max_model_length):
- if pretrained_model_name_or_path in T5Tokenizer.max_model_input_sizes:
- deprecated_max_model_length = T5Tokenizer.max_model_input_sizes[pretrained_model_name_or_path]
- if init_max_model_length is not None and init_max_model_length != max_model_length:
- return init_max_model_length
- elif init_max_model_length is None:
- warnings.warn(
- "This tokenizer was incorrectly instantiated with a model max length of"
- f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
- " behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
- " `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
- f" {pretrained_model_name_or_path} automatically truncating your input to"
- f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
- f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
- " `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
- " instantiate this tokenizer with `model_max_length` set to your preferred value.",
- FutureWarning,
- )
- return max_model_length
- @property
- def vocab_size(self):
- return self.sp_model.get_piece_size()
- def get_vocab(self):
- vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
- vocab.update(self.added_tokens_encoder)
- return vocab
- def get_special_tokens_mask(
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
- ) -> List[int]:
- """
- Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
- special tokens using the tokenizer `prepare_for_model` method.
- Args:
- token_ids_0 (`List[int]`):
- List of IDs.
- token_ids_1 (`List[int]`, *optional*):
- Optional second list of IDs for sequence pairs.
- already_has_special_tokens (`bool`, *optional*, defaults to `False`):
- Whether or not the token list is already formatted with special tokens for the model.
- Returns:
- `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
- """
- if already_has_special_tokens:
- return super().get_special_tokens_mask(
- token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
- )
- # normal case: some special tokens
- if token_ids_1 is None:
- return ([0] * len(token_ids_0)) + [1]
- return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
- def get_sentinel_tokens(self):
- return list(
- set(filter(lambda x: bool(re.search(r"<extra_id_\d+>", x)) is not None, self.additional_special_tokens))
- )
- def get_sentinel_token_ids(self):
- return [self.convert_tokens_to_ids(token) for token in self.get_sentinel_tokens()]
- def _add_eos_if_not_present(self, token_ids: List[int]) -> List[int]:
- """Do not add eos again if user already added it."""
- if len(token_ids) > 0 and token_ids[-1] == self.eos_token_id:
- warnings.warn(
- f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
- " eos tokens being added."
- )
- return token_ids
- else:
- return token_ids + [self.eos_token_id]
- def create_token_type_ids_from_sequences(
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
- ) -> List[int]:
- """
- Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
- use of token type ids, therefore a list of zeros is returned.
- Args:
- token_ids_0 (`List[int]`):
- List of IDs.
- token_ids_1 (`List[int]`, *optional*):
- Optional second list of IDs for sequence pairs.
- Returns:
- `List[int]`: List of zeros.
- """
- eos = [self.eos_token_id]
- if token_ids_1 is None:
- return len(token_ids_0 + eos) * [0]
- return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
- def build_inputs_with_special_tokens(
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
- ) -> List[int]:
- """
- Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
- adding special tokens. A sequence has the following format:
- - single sequence: `X </s>`
- - pair of sequences: `A </s> B </s>`
- Args:
- token_ids_0 (`List[int]`):
- List of IDs to which the special tokens will be added.
- token_ids_1 (`List[int]`, *optional*):
- Optional second list of IDs for sequence pairs.
- Returns:
- `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
- """
- token_ids_0 = self._add_eos_if_not_present(token_ids_0)
- if token_ids_1 is None:
- return token_ids_0
- else:
- token_ids_1 = self._add_eos_if_not_present(token_ids_1)
- return token_ids_0 + token_ids_1
- def __getstate__(self):
- state = self.__dict__.copy()
- state["sp_model"] = None
- return state
- def __setstate__(self, d):
- self.__dict__ = d
- # for backward compatibility
- if not hasattr(self, "sp_model_kwargs"):
- self.sp_model_kwargs = {}
- self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
- self.sp_model.Load(self.vocab_file)
- def tokenize(self, text: "TextInput", **kwargs) -> List[str]:
- """
- Converts a string to a list of tokens. If `self.legacy` is set to `False`, a prefix token is added unless the
- first token is special.
- """
- if self.legacy or len(text) == 0:
- return super().tokenize(text, **kwargs)
- text = text.replace(SPIECE_UNDERLINE, " ")
- if self.add_prefix_space:
- text = SPIECE_UNDERLINE + text
- tokens = super().tokenize(text, **kwargs)
- if len(tokens) > 1 and tokens[0] == SPIECE_UNDERLINE and tokens[1] in self.all_special_tokens:
- tokens = tokens[1:]
- return tokens
- @property
- def unk_token_length(self):
- return len(self.sp_model.encode(str(self.unk_token)))
- def _tokenize(self, text, **kwargs):
- """
- Returns a tokenized string.
- We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any
- SPIECE_UNDERLINE. For example: `self.sp_model.encode(f"{SPIECE_UNDERLINE}Hey", out_type = str)` will give
- `['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f"{unk_token}text"` and strip the
- `unk_token`. Here is an example with `unk_token = "<unk>"` and `unk_token_length = 4`.
- `self.tokenizer.sp_model.encode("<unk> Hey", out_type = str)[4:]`.
- """
- if self.legacy or not text.startswith((SPIECE_UNDERLINE, " ")):
- return self.sp_model.encode(text, out_type=str)
- # 1. Encode string + prefix ex: "<unk> Hey"
- tokens = self.sp_model.encode(self.unk_token + text, out_type=str)
- # 2. Remove self.unk_token from ['<','unk','>', '▁Hey']
- return tokens[self.unk_token_length :] if len(tokens) >= self.unk_token_length else tokens
- def _convert_token_to_id(self, token):
- """Converts a token (str) in an id using the vocab."""
- return self.sp_model.piece_to_id(token)
- def _convert_id_to_token(self, index):
- """Converts an index (integer) in a token (str) using the vocab."""
- token = self.sp_model.IdToPiece(index)
- return token
- def convert_tokens_to_string(self, tokens):
- """Converts a sequence of tokens (string) in a single string."""
- # since we manually add the prefix space, we have to remove it when decoding
- if tokens[0].startswith(SPIECE_UNDERLINE) and self.add_prefix_space:
- tokens[0] = tokens[0][1:]
- current_sub_tokens = []
- out_string = ""
- prev_is_special = False
- for token in tokens:
- # make sure that special tokens are not decoded using sentencepiece model
- if token in self.all_special_tokens:
- if not prev_is_special:
- out_string += " "
- out_string += self.sp_model.decode(current_sub_tokens) + token
- prev_is_special = True
- current_sub_tokens = []
- else:
- current_sub_tokens.append(token)
- prev_is_special = False
- out_string += self.sp_model.decode(current_sub_tokens)
- return out_string.strip()
- def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
- if not os.path.isdir(save_directory):
- logger.error(f"Vocabulary path ({save_directory}) should be a directory")
- return
- out_vocab_file = os.path.join(
- save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
- )
- if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
- copyfile(self.vocab_file, out_vocab_file)
- elif not os.path.isfile(self.vocab_file):
- with open(out_vocab_file, "wb") as fi:
- content_spiece_model = self.sp_model.serialized_model_proto()
- fi.write(content_spiece_model)
- return (out_vocab_file,)
|