| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338 |
- # coding=utf-8
- # Copyright 2021 Google Research, Google AI, Google Brain and the HuggingFace Inc. team.
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- """Tokenization classes for FNet model."""
- import os
- import unicodedata
- from shutil import copyfile
- from typing import Any, Dict, List, Optional, Tuple
- import sentencepiece as spm
- from ...tokenization_utils import AddedToken, PreTrainedTokenizer
- from ...utils import logging
- logger = logging.get_logger(__name__)
- VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
- SPIECE_UNDERLINE = "▁"
- class FNetTokenizer(PreTrainedTokenizer):
- """
- Construct an FNet tokenizer. Adapted from [`AlbertTokenizer`]. Based on
- [SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`]
- which contains most of the main methods. Users should refer to this superclass for more information regarding those
- methods.
- Args:
- vocab_file (`str`):
- [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
- contains the vocabulary necessary to instantiate a tokenizer.
- do_lower_case (`bool`, *optional*, defaults to `False`):
- Whether or not to lowercase the input when tokenizing.
- remove_space (`bool`, *optional*, defaults to `True`):
- Whether or not to strip the text when tokenizing (removing excess spaces before and after the string).
- keep_accents (`bool`, *optional*, defaults to `True`):
- Whether or not to keep accents when tokenizing.
- unk_token (`str`, *optional*, defaults to `"<unk>"`):
- The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
- token instead.
- sep_token (`str`, *optional*, defaults to `"[SEP]"`):
- The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
- sequence classification or for a text and a question for question answering. It is also used as the last
- token of a sequence built with special tokens.
- pad_token (`str`, *optional*, defaults to `"<pad>"`):
- The token used for padding, for example when batching sequences of different lengths.
- cls_token (`str`, *optional*, defaults to `"[CLS]"`):
- The classifier token which is used when doing sequence classification (classification of the whole sequence
- instead of per-token classification). It is the first token of the sequence when built with special tokens.
- mask_token (`str`, *optional*, defaults to `"[MASK]"`):
- The token used for masking values. This is the token used when training this model with masked language
- modeling. This is the token which the model will try to predict.
- sp_model_kwargs (`dict`, *optional*):
- Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
- SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
- to set:
- - `enable_sampling`: Enable subword regularization.
- - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- - `nbest_size = {0,1}`: No sampling is performed.
- - `nbest_size > 1`: samples from the nbest_size results.
- - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
- using forward-filtering-and-backward-sampling algorithm.
- - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
- BPE-dropout.
- Attributes:
- sp_model (`SentencePieceProcessor`):
- The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
- """
- vocab_files_names = VOCAB_FILES_NAMES
- model_input_names = ["input_ids", "token_type_ids"]
- def __init__(
- self,
- vocab_file,
- do_lower_case=False,
- remove_space=True,
- keep_accents=True,
- unk_token="<unk>",
- sep_token="[SEP]",
- pad_token="<pad>",
- cls_token="[CLS]",
- mask_token="[MASK]",
- sp_model_kwargs: Optional[Dict[str, Any]] = None,
- **kwargs,
- ) -> None:
- # Mask token behave like a normal word, i.e. include the space before it and
- # is included in the raw text, there should be a match in a non-normalized sentence.
- mask_token = AddedToken(mask_token, lstrip=True, special=True) if isinstance(mask_token, str) else mask_token
- cls_token = AddedToken(cls_token, special=True) if isinstance(cls_token, str) else cls_token
- sep_token = AddedToken(sep_token, special=True) if isinstance(sep_token, str) else sep_token
- mask_token = AddedToken(mask_token, special=True) if isinstance(mask_token, str) else mask_token
- self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
- self.do_lower_case = do_lower_case
- self.remove_space = remove_space
- self.keep_accents = keep_accents
- self.vocab_file = vocab_file
- self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
- self.sp_model.Load(vocab_file)
- super().__init__(
- do_lower_case=do_lower_case,
- remove_space=remove_space,
- keep_accents=keep_accents,
- unk_token=unk_token,
- sep_token=sep_token,
- pad_token=pad_token,
- cls_token=cls_token,
- mask_token=mask_token,
- sp_model_kwargs=self.sp_model_kwargs,
- **kwargs,
- )
- @property
- def vocab_size(self):
- return len(self.sp_model)
- def get_vocab(self):
- vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
- vocab.update(self.added_tokens_encoder)
- return vocab
- def __getstate__(self):
- state = self.__dict__.copy()
- state["sp_model"] = None
- return state
- def __setstate__(self, d):
- self.__dict__ = d
- # for backward compatibility
- if not hasattr(self, "sp_model_kwargs"):
- self.sp_model_kwargs = {}
- self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
- self.sp_model.Load(self.vocab_file)
- def preprocess_text(self, inputs):
- if self.remove_space:
- outputs = " ".join(inputs.strip().split())
- else:
- outputs = inputs
- outputs = outputs.replace("``", '"').replace("''", '"')
- if not self.keep_accents:
- outputs = unicodedata.normalize("NFKD", outputs)
- outputs = "".join([c for c in outputs if not unicodedata.combining(c)])
- if self.do_lower_case:
- outputs = outputs.lower()
- return outputs
- def _tokenize(self, text: str) -> List[str]:
- """Tokenize a string."""
- text = self.preprocess_text(text)
- pieces = self.sp_model.encode(text, out_type=str)
- new_pieces = []
- for piece in pieces:
- if len(piece) > 1 and piece[-1] == str(",") and piece[-2].isdigit():
- cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, ""))
- if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
- if len(cur_pieces[0]) == 1:
- cur_pieces = cur_pieces[1:]
- else:
- cur_pieces[0] = cur_pieces[0][1:]
- cur_pieces.append(piece[-1])
- new_pieces.extend(cur_pieces)
- else:
- new_pieces.append(piece)
- return new_pieces
- def _convert_token_to_id(self, token):
- """Converts a token (str) in an id using the vocab."""
- return self.sp_model.PieceToId(token)
- def _convert_id_to_token(self, index):
- """Converts an index (integer) in a token (str) using the vocab."""
- return self.sp_model.IdToPiece(index)
- # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.convert_tokens_to_string
- def convert_tokens_to_string(self, tokens):
- """Converts a sequence of tokens (string) in a single string."""
- current_sub_tokens = []
- out_string = ""
- prev_is_special = False
- for token in tokens:
- # make sure that special tokens are not decoded using sentencepiece model
- if token in self.all_special_tokens:
- if not prev_is_special:
- out_string += " "
- out_string += self.sp_model.decode(current_sub_tokens) + token
- prev_is_special = True
- current_sub_tokens = []
- else:
- current_sub_tokens.append(token)
- prev_is_special = False
- out_string += self.sp_model.decode(current_sub_tokens)
- return out_string.strip()
- def _decode(
- self,
- token_ids: List[int],
- skip_special_tokens: bool = False,
- clean_up_tokenization_spaces: bool = None,
- spaces_between_special_tokens: bool = False,
- **kwargs,
- ) -> str:
- text = super()._decode(
- token_ids=token_ids,
- skip_special_tokens=skip_special_tokens,
- clean_up_tokenization_spaces=clean_up_tokenization_spaces,
- spaces_between_special_tokens=spaces_between_special_tokens,
- **kwargs,
- )
- # Mimic the behavior of the Rust tokenizer:
- # No space after <unk>
- if not spaces_between_special_tokens:
- text = text.replace("<unk> ", "<unk>")
- return text
- def build_inputs_with_special_tokens(
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
- ) -> List[int]:
- """
- Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
- adding special tokens. An FNet sequence has the following format:
- - single sequence: `[CLS] X [SEP]`
- - pair of sequences: `[CLS] A [SEP] B [SEP]`
- Args:
- token_ids_0 (`List[int]`):
- List of IDs to which the special tokens will be added.
- token_ids_1 (`List[int]`, *optional*):
- Optional second list of IDs for sequence pairs.
- Returns:
- `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
- """
- sep = [self.sep_token_id]
- cls = [self.cls_token_id]
- if token_ids_1 is None:
- return cls + token_ids_0 + sep
- return cls + token_ids_0 + sep + token_ids_1 + sep
- def get_special_tokens_mask(
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
- ) -> List[int]:
- """
- Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
- special tokens using the tokenizer `prepare_for_model` method.
- Args:
- token_ids_0 (`List[int]`):
- List of IDs.
- token_ids_1 (`List[int]`, *optional*):
- Optional second list of IDs for sequence pairs.
- already_has_special_tokens (`bool`, *optional*, defaults to `False`):
- Whether or not the token list is already formatted with special tokens for the model.
- Returns:
- `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
- """
- if already_has_special_tokens:
- return super().get_special_tokens_mask(
- token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
- )
- if token_ids_1 is not None:
- return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
- return [1] + ([0] * len(token_ids_0)) + [1]
- def create_token_type_ids_from_sequences(
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
- ) -> List[int]:
- """
- Create a mask from the two sequences passed to be used in a sequence-pair classification task. An FNet sequence
- pair mask has the following format: :
- ```
- 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence |
- ```
- If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
- Args:
- token_ids_0 (`List[int]`):
- List of IDs.
- token_ids_1 (`List[int]`, *optional*):
- Optional second list of IDs for sequence pairs.
- Returns:
- `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
- """
- sep = [self.sep_token_id]
- cls = [self.cls_token_id]
- if token_ids_1 is None:
- return len(cls + token_ids_0 + sep) * [0]
- return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
- def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
- if not os.path.isdir(save_directory):
- logger.error(f"Vocabulary path ({save_directory}) should be a directory")
- return
- out_vocab_file = os.path.join(
- save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
- )
- if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
- copyfile(self.vocab_file, out_vocab_file)
- elif not os.path.isfile(self.vocab_file):
- with open(out_vocab_file, "wb") as fi:
- content_spiece_model = self.sp_model.serialized_model_proto()
- fi.write(content_spiece_model)
- return (out_vocab_file,)
|