| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285 |
- # coding=utf-8
- # Copyright 2020 Google and The HuggingFace Inc. team.
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- import os
- from shutil import copyfile
- from typing import Any, Dict, List, Optional, Tuple
- import sentencepiece as spm
- from ...tokenization_utils import AddedToken, PreTrainedTokenizer
- from ...utils import logging
- SPIECE_UNDERLINE = "▁"
- VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
- logger = logging.get_logger(__name__)
- # TODO ArthurZ refactor this to only use the added_tokens_encoder
- class PegasusTokenizer(PreTrainedTokenizer):
- r"""
- Construct a PEGASUS tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
- This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
- this superclass for more information regarding those methods.
- Args:
- vocab_file (`str`):
- [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
- contains the vocabulary necessary to instantiate a tokenizer.
- pad_token (`str`, *optional*, defaults to `"<pad>"`):
- The token used for padding, for example when batching sequences of different lengths.
- eos_token (`str`, *optional*, defaults to `"</s>"`):
- The end of sequence token.
- <Tip>
- When building a sequence using special tokens, this is not the token that is used for the end of sequence.
- The token used is the `sep_token`.
- </Tip>
- unk_token (`str`, *optional*, defaults to `"<unk>"`):
- The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
- token instead.
- mask_token (`str`, *optional*, defaults to `"<mask_2>"`):
- The token used for masking single token values. This is the token used when training this model with masked
- language modeling (MLM). This is the token that the PEGASUS encoder will try to predict during pretraining.
- It corresponds to *[MASK2]* in [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive
- Summarization](https://arxiv.org/pdf/1912.08777.pdf).
- mask_token_sent (`str`, *optional*, defaults to `"<mask_1>"`):
- The token used for masking whole target sentences. This is the token used when training this model with gap
- sentences generation (GSG). This is the sentence that the PEGASUS decoder will try to predict during
- pretraining. It corresponds to *[MASK1]* in [PEGASUS: Pre-training with Extracted Gap-sentences for
- Abstractive Summarization](https://arxiv.org/pdf/1912.08777.pdf).
- additional_special_tokens (`List[str]`, *optional*):
- Additional special tokens used by the tokenizer. If no additional_special_tokens are provided <mask_2> and
- <unk_2, ..., unk_102> are used as additional special tokens corresponding to the [original PEGASUS
- tokenizer](https://github.com/google-research/pegasus/blob/939830367bcf411193d2b5eca2f2f90f3f9260ca/pegasus/ops/pretrain_parsing_ops.cc#L66)
- that uses the tokens 2 - 104 only for pretraining
- sp_model_kwargs (`dict`, *optional*):
- Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
- SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
- to set:
- - `enable_sampling`: Enable subword regularization.
- - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- - `nbest_size = {0,1}`: No sampling is performed.
- - `nbest_size > 1`: samples from the nbest_size results.
- - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
- using forward-filtering-and-backward-sampling algorithm.
- - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
- BPE-dropout.
- """
- vocab_files_names = VOCAB_FILES_NAMES
- model_input_names = ["input_ids", "attention_mask"]
- def __init__(
- self,
- vocab_file,
- pad_token="<pad>",
- eos_token="</s>",
- unk_token="<unk>",
- mask_token="<mask_2>",
- mask_token_sent="<mask_1>",
- additional_special_tokens=None,
- offset=103, # entries 2 - 104 are only used for pretraining
- sp_model_kwargs: Optional[Dict[str, Any]] = None,
- **kwargs,
- ) -> None:
- self.offset = offset
- if additional_special_tokens is not None:
- if not isinstance(additional_special_tokens, list):
- raise TypeError(
- f"additional_special_tokens should be of type {type(list)}, but is"
- f" {type(additional_special_tokens)}"
- )
- additional_special_tokens_extended = (
- ([mask_token_sent] + additional_special_tokens)
- if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
- else additional_special_tokens
- )
- # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
- additional_special_tokens_extended += [
- f"<unk_{i}>" for i in range(len(additional_special_tokens_extended), self.offset - 1)
- ]
- if len(set(additional_special_tokens_extended)) != len(additional_special_tokens_extended):
- raise ValueError(
- "Please make sure that the provided additional_special_tokens do not contain an incorrectly"
- f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}."
- )
- additional_special_tokens = additional_special_tokens_extended
- else:
- additional_special_tokens_extended = []
- additional_special_tokens = [mask_token_sent] if mask_token_sent is not None else []
- additional_special_tokens += [f"<unk_{i}>" for i in range(2, self.offset)]
- self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
- self.mask_token_sent = mask_token_sent
- self.vocab_file = vocab_file
- self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
- self.sp_model.Load(vocab_file)
- _added_tokens_decoder = {
- 0: AddedToken(str(pad_token), special=True),
- 1: AddedToken(str(eos_token), special=True),
- }
- if self.mask_token_sent is not None:
- _added_tokens_decoder[2] = AddedToken(mask_token_sent, special=True)
- _added_tokens_decoder[3] = AddedToken(str(mask_token), special=True)
- for i in range(2, self.offset):
- _added_tokens_decoder[len(_added_tokens_decoder)] = AddedToken(f"<unk_{i}>", special=True)
- # Force update as we want to make sure vocab is enforced (same as fast)
- self._added_tokens_decoder = kwargs.pop("added_tokens_decoder", {})
- self._added_tokens_decoder.update(_added_tokens_decoder)
- super().__init__(
- eos_token=eos_token,
- unk_token=unk_token,
- mask_token=mask_token,
- pad_token=pad_token,
- mask_token_sent=mask_token_sent,
- offset=offset,
- additional_special_tokens=additional_special_tokens,
- sp_model_kwargs=self.sp_model_kwargs,
- **kwargs,
- )
- @property
- def vocab_size(self) -> int:
- return len(self.sp_model) + self.offset
- def get_vocab(self) -> Dict[str, int]:
- vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
- vocab.update(self.added_tokens_encoder)
- return vocab
- def __getstate__(self):
- state = self.__dict__.copy()
- state["sp_model"] = None
- return state
- def __setstate__(self, d):
- self.__dict__ = d
- # for backward compatibility
- if not hasattr(self, "sp_model_kwargs"):
- self.sp_model_kwargs = {}
- self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
- self.sp_model.Load(self.vocab_file)
- def _tokenize(self, text: str) -> List[str]:
- """Take as input a string and return a list of strings (tokens) for words/sub-words"""
- return self.sp_model.encode(text, out_type=str)
- def _convert_token_to_id(self, token: str) -> int:
- """Converts a token (str) to an id using the vocab."""
- sp_id = self.sp_model.piece_to_id(token)
- return sp_id + self.offset
- def _convert_id_to_token(self, index: int) -> str:
- """Converts an index (integer) to a token (str) using the vocab."""
- if index < self.offset:
- return self.sp_model.IdToPiece(index)
- token = self.sp_model.IdToPiece(index - self.offset)
- return token
- def convert_tokens_to_string(self, tokens):
- """Converts a sequence of tokens (string) in a single string."""
- current_sub_tokens = []
- out_string = ""
- for token in tokens:
- # make sure that special tokens are not decoded using sentencepiece model
- if token in self.all_special_tokens:
- out_string += self.sp_model.decode(current_sub_tokens) + token
- current_sub_tokens = []
- else:
- current_sub_tokens.append(token)
- out_string += self.sp_model.decode(current_sub_tokens)
- return out_string.strip()
- def num_special_tokens_to_add(self, pair=False):
- """Just EOS"""
- return 1
- def _special_token_mask(self, seq):
- all_special_ids = set(self.all_special_ids) # call it once instead of inside list comp
- all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
- return [1 if x in all_special_ids else 0 for x in seq]
- def get_special_tokens_mask(
- self, token_ids_0: List, token_ids_1: Optional[List] = None, already_has_special_tokens: bool = False
- ) -> List[int]:
- """Get list where entries are [1] if a token is [eos] or [pad] else 0."""
- if already_has_special_tokens:
- return self._special_token_mask(token_ids_0)
- elif token_ids_1 is None:
- return self._special_token_mask(token_ids_0) + [1]
- else:
- return self._special_token_mask(token_ids_0 + token_ids_1) + [1]
- def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> List[int]:
- """
- Build model inputs from a sequence or a pair of sequences for sequence classification tasks by concatenating
- and adding special tokens. A PEGASUS sequence has the following format, where `X` represents the sequence:
- - single sequence: `X </s>`
- - pair of sequences: `A B </s>` (not intended use)
- BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
- separator.
- Args:
- token_ids_0 (`List[int]`):
- List of IDs to which the special tokens will be added.
- token_ids_1 (`List[int]`, *optional*):
- Optional second list of IDs for sequence pairs.
- Returns:
- `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
- """
- if token_ids_1 is None:
- return token_ids_0 + [self.eos_token_id]
- # We don't expect to process pairs, but leave the pair logic for API consistency
- return token_ids_0 + token_ids_1 + [self.eos_token_id]
- def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
- if not os.path.isdir(save_directory):
- logger.error(f"Vocabulary path ({save_directory}) should be a directory")
- return
- out_vocab_file = os.path.join(
- save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
- )
- if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
- copyfile(self.vocab_file, out_vocab_file)
- elif not os.path.isfile(self.vocab_file):
- with open(out_vocab_file, "wb") as fi:
- content_spiece_model = self.sp_model.serialized_model_proto()
- fi.write(content_spiece_model)
- return (out_vocab_file,)
|