tokenization_bart_fast.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276
  1. # coding=utf-8
  2. # Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. import json
  16. from typing import List, Optional, Tuple
  17. from tokenizers import pre_tokenizers, processors
  18. from ...tokenization_utils_base import AddedToken, BatchEncoding
  19. from ...tokenization_utils_fast import PreTrainedTokenizerFast
  20. from ...utils import logging
  21. from .tokenization_bart import BartTokenizer
  22. logger = logging.get_logger(__name__)
  23. VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
  24. # See all BART models at https://huggingface.co/models?filter=bart
  25. class BartTokenizerFast(PreTrainedTokenizerFast):
  26. r"""
  27. Construct a "fast" BART tokenizer (backed by HuggingFace's *tokenizers* library), derived from the GPT-2 tokenizer,
  28. using byte-level Byte-Pair-Encoding.
  29. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
  30. be encoded differently whether it is at the beginning of the sentence (without space) or not:
  31. ```python
  32. >>> from transformers import BartTokenizerFast
  33. >>> tokenizer = BartTokenizerFast.from_pretrained("facebook/bart-base")
  34. >>> tokenizer("Hello world")["input_ids"]
  35. [0, 31414, 232, 2]
  36. >>> tokenizer(" Hello world")["input_ids"]
  37. [0, 20920, 232, 2]
  38. ```
  39. You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
  40. call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
  41. <Tip>
  42. When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
  43. </Tip>
  44. This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
  45. refer to this superclass for more information regarding those methods.
  46. Args:
  47. vocab_file (`str`):
  48. Path to the vocabulary file.
  49. merges_file (`str`):
  50. Path to the merges file.
  51. errors (`str`, *optional*, defaults to `"replace"`):
  52. Paradigm to follow when decoding bytes to UTF-8. See
  53. [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
  54. bos_token (`str`, *optional*, defaults to `"<s>"`):
  55. The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
  56. <Tip>
  57. When building a sequence using special tokens, this is not the token that is used for the beginning of
  58. sequence. The token used is the `cls_token`.
  59. </Tip>
  60. eos_token (`str`, *optional*, defaults to `"</s>"`):
  61. The end of sequence token.
  62. <Tip>
  63. When building a sequence using special tokens, this is not the token that is used for the end of sequence.
  64. The token used is the `sep_token`.
  65. </Tip>
  66. sep_token (`str`, *optional*, defaults to `"</s>"`):
  67. The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
  68. sequence classification or for a text and a question for question answering. It is also used as the last
  69. token of a sequence built with special tokens.
  70. cls_token (`str`, *optional*, defaults to `"<s>"`):
  71. The classifier token which is used when doing sequence classification (classification of the whole sequence
  72. instead of per-token classification). It is the first token of the sequence when built with special tokens.
  73. unk_token (`str`, *optional*, defaults to `"<unk>"`):
  74. The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
  75. token instead.
  76. pad_token (`str`, *optional*, defaults to `"<pad>"`):
  77. The token used for padding, for example when batching sequences of different lengths.
  78. mask_token (`str`, *optional*, defaults to `"<mask>"`):
  79. The token used for masking values. This is the token used when training this model with masked language
  80. modeling. This is the token which the model will try to predict.
  81. add_prefix_space (`bool`, *optional*, defaults to `False`):
  82. Whether or not to add an initial space to the input. This allows to treat the leading word just as any
  83. other word. (BART tokenizer detect beginning of words by the preceding space).
  84. trim_offsets (`bool`, *optional*, defaults to `True`):
  85. Whether the post processing step should trim offsets to avoid including whitespaces.
  86. """
  87. vocab_files_names = VOCAB_FILES_NAMES
  88. model_input_names = ["input_ids", "attention_mask"]
  89. slow_tokenizer_class = BartTokenizer
  90. def __init__(
  91. self,
  92. vocab_file=None,
  93. merges_file=None,
  94. tokenizer_file=None,
  95. errors="replace",
  96. bos_token="<s>",
  97. eos_token="</s>",
  98. sep_token="</s>",
  99. cls_token="<s>",
  100. unk_token="<unk>",
  101. pad_token="<pad>",
  102. mask_token="<mask>",
  103. add_prefix_space=False,
  104. trim_offsets=True,
  105. **kwargs,
  106. ):
  107. # we have to specify that this tokens is special otherwise adding it will reset the normalized flag to `False` in `add_special_tokens`
  108. mask_token = (
  109. AddedToken(mask_token, lstrip=True, normalized=True, special=True)
  110. if isinstance(mask_token, str)
  111. else mask_token
  112. )
  113. super().__init__(
  114. vocab_file,
  115. merges_file,
  116. tokenizer_file=tokenizer_file,
  117. errors=errors,
  118. bos_token=bos_token,
  119. eos_token=eos_token,
  120. sep_token=sep_token,
  121. cls_token=cls_token,
  122. unk_token=unk_token,
  123. pad_token=pad_token,
  124. mask_token=mask_token,
  125. add_prefix_space=add_prefix_space,
  126. trim_offsets=trim_offsets,
  127. **kwargs,
  128. )
  129. pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
  130. if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
  131. pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type"))
  132. pre_tok_state["add_prefix_space"] = add_prefix_space
  133. self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
  134. self.add_prefix_space = add_prefix_space
  135. # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
  136. tokenizer_component = "post_processor"
  137. tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None)
  138. if tokenizer_component_instance:
  139. state = json.loads(tokenizer_component_instance.__getstate__())
  140. # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
  141. if "sep" in state:
  142. state["sep"] = tuple(state["sep"])
  143. if "cls" in state:
  144. state["cls"] = tuple(state["cls"])
  145. changes_to_apply = False
  146. if state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
  147. state["add_prefix_space"] = add_prefix_space
  148. changes_to_apply = True
  149. if state.get("trim_offsets", trim_offsets) != trim_offsets:
  150. state["trim_offsets"] = trim_offsets
  151. changes_to_apply = True
  152. if changes_to_apply:
  153. component_class = getattr(processors, state.pop("type"))
  154. new_value = component_class(**state)
  155. setattr(self.backend_tokenizer, tokenizer_component, new_value)
  156. @property
  157. def mask_token(self) -> str:
  158. """
  159. `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not
  160. having been set.
  161. BART tokenizer has a special mask token to be usable in the fill-mask pipeline. The mask token will greedily
  162. comprise the space before the *<mask>*.
  163. """
  164. if self._mask_token is None:
  165. if self.verbose:
  166. logger.error("Using mask_token, but it is not set yet.")
  167. return None
  168. return str(self._mask_token)
  169. @mask_token.setter
  170. def mask_token(self, value):
  171. """
  172. Overriding the default behavior of the mask token to have it eat the space before it.
  173. This is needed to preserve backward compatibility with all the previously used models based on Bart.
  174. """
  175. # Mask token behave like a normal word, i.e. include the space before it
  176. # So we set lstrip to True
  177. value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value
  178. self._mask_token = value
  179. def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
  180. is_split_into_words = kwargs.get("is_split_into_words", False)
  181. if is_split_into_words and not self.add_prefix_space:
  182. raise ValueError(
  183. f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
  184. "to use it with pretokenized inputs."
  185. )
  186. return super()._batch_encode_plus(*args, **kwargs)
  187. def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
  188. is_split_into_words = kwargs.get("is_split_into_words", False)
  189. if is_split_into_words and not self.add_prefix_space:
  190. raise ValueError(
  191. f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
  192. "to use it with pretokenized inputs."
  193. )
  194. return super()._encode_plus(*args, **kwargs)
  195. def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
  196. files = self._tokenizer.model.save(save_directory, name=filename_prefix)
  197. return tuple(files)
  198. def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
  199. output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id]
  200. if token_ids_1 is None:
  201. return output
  202. return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id]
  203. def create_token_type_ids_from_sequences(
  204. self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
  205. ) -> List[int]:
  206. """
  207. Create a mask from the two sequences passed to be used in a sequence-pair classification task. BART does not
  208. make use of token type ids, therefore a list of zeros is returned.
  209. Args:
  210. token_ids_0 (`List[int]`):
  211. List of IDs.
  212. token_ids_1 (`List[int]`, *optional*):
  213. Optional second list of IDs for sequence pairs.
  214. Returns:
  215. `List[int]`: List of zeros.
  216. """
  217. sep = [self.sep_token_id]
  218. cls = [self.cls_token_id]
  219. if token_ids_1 is None:
  220. return len(cls + token_ids_0 + sep) * [0]
  221. return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]