tokenization_gpt2_fast.py 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141
  1. # coding=utf-8
  2. # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """Tokenization classes for OpenAI GPT."""
  16. import json
  17. from typing import Optional, Tuple
  18. from tokenizers import pre_tokenizers
  19. from ...tokenization_utils_base import BatchEncoding
  20. from ...tokenization_utils_fast import PreTrainedTokenizerFast
  21. from ...utils import logging
  22. from .tokenization_gpt2 import GPT2Tokenizer
  23. logger = logging.get_logger(__name__)
  24. VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
  25. class GPT2TokenizerFast(PreTrainedTokenizerFast):
  26. """
  27. Construct a "fast" GPT-2 tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level
  28. Byte-Pair-Encoding.
  29. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
  30. be encoded differently whether it is at the beginning of the sentence (without space) or not:
  31. ```python
  32. >>> from transformers import GPT2TokenizerFast
  33. >>> tokenizer = GPT2TokenizerFast.from_pretrained("openai-community/gpt2")
  34. >>> tokenizer("Hello world")["input_ids"]
  35. [15496, 995]
  36. >>> tokenizer(" Hello world")["input_ids"]
  37. [18435, 995]
  38. ```
  39. You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since
  40. the model was not pretrained this way, it might yield a decrease in performance.
  41. <Tip>
  42. When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
  43. </Tip>
  44. This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
  45. refer to this superclass for more information regarding those methods.
  46. Args:
  47. vocab_file (`str`, *optional*):
  48. Path to the vocabulary file.
  49. merges_file (`str`, *optional*):
  50. Path to the merges file.
  51. tokenizer_file (`str`, *optional*):
  52. Path to [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that
  53. contains everything needed to load the tokenizer.
  54. unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
  55. The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
  56. token instead.
  57. bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
  58. The beginning of sequence token.
  59. eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
  60. The end of sequence token.
  61. add_prefix_space (`bool`, *optional*, defaults to `False`):
  62. Whether or not to add an initial space to the input. This allows to treat the leading word just as any
  63. other word. (GPT2 tokenizer detect beginning of words by the preceding space).
  64. """
  65. vocab_files_names = VOCAB_FILES_NAMES
  66. model_input_names = ["input_ids", "attention_mask"]
  67. slow_tokenizer_class = GPT2Tokenizer
  68. def __init__(
  69. self,
  70. vocab_file=None,
  71. merges_file=None,
  72. tokenizer_file=None,
  73. unk_token="<|endoftext|>",
  74. bos_token="<|endoftext|>",
  75. eos_token="<|endoftext|>",
  76. add_prefix_space=False,
  77. **kwargs,
  78. ):
  79. super().__init__(
  80. vocab_file=vocab_file,
  81. merges_file=merges_file,
  82. tokenizer_file=tokenizer_file,
  83. unk_token=unk_token,
  84. bos_token=bos_token,
  85. eos_token=eos_token,
  86. add_prefix_space=add_prefix_space,
  87. **kwargs,
  88. )
  89. self.add_bos_token = kwargs.pop("add_bos_token", False)
  90. pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
  91. if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
  92. pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type"))
  93. pre_tok_state["add_prefix_space"] = add_prefix_space
  94. self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
  95. self.add_prefix_space = add_prefix_space
  96. def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
  97. is_split_into_words = kwargs.get("is_split_into_words", False)
  98. assert self.add_prefix_space or not is_split_into_words, (
  99. f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
  100. "to use it with pretokenized inputs."
  101. )
  102. return super()._batch_encode_plus(*args, **kwargs)
  103. def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
  104. is_split_into_words = kwargs.get("is_split_into_words", False)
  105. assert self.add_prefix_space or not is_split_into_words, (
  106. f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
  107. "to use it with pretokenized inputs."
  108. )
  109. return super()._encode_plus(*args, **kwargs)
  110. def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
  111. files = self._tokenizer.model.save(save_directory, name=filename_prefix)
  112. return tuple(files)