tokenization_gemma_fast.py 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199
  1. # coding=utf-8
  2. # Copyright 2024 The HuggingFace Inc. team.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. import os
  16. from shutil import copyfile
  17. from typing import Optional, Tuple
  18. from tokenizers import processors
  19. from ...tokenization_utils_fast import PreTrainedTokenizerFast
  20. from ...utils import is_sentencepiece_available, logging
  21. from ...utils.versions import require_version
  22. require_version("tokenizers>=0.13.3")
  23. if is_sentencepiece_available():
  24. from .tokenization_gemma import GemmaTokenizer
  25. else:
  26. GemmaTokenizer = None
  27. logger = logging.get_logger(__name__)
  28. VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model", "tokenizer_file": "tokenizer.json"}
  29. class GemmaTokenizerFast(PreTrainedTokenizerFast):
  30. """
  31. Construct a Gemma tokenizer fast. Based on byte-level Byte-Pair-Encoding.
  32. This uses notably ByteFallback and no prefix space. Normalization is applied to replace `" "` with `"▁"`
  33. ```python
  34. >>> from transformers import GemmaTokenizerFast
  35. >>> tokenizer = GemmaTokenizerFast.from_pretrained("hf-internal-testing/dummy-gemma")
  36. >>> tokenizer.encode("Hello this is a test")
  37. [2, 4521, 736, 603, 476, 2121]
  38. ```
  39. If you want to change the `bos_token` or the `eos_token`, make sure to specify them when initializing the model, or
  40. call `tokenizer.update_post_processor()` to make sure that the post-processing is correctly done (otherwise the
  41. values of the first token and final token of an encoded sequence will not be correct). For more details, checkout
  42. [post-processors] (https://huggingface.co/docs/tokenizers/api/post-processors) documentation.
  43. This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
  44. refer to this superclass for more information regarding those methods.
  45. Args:
  46. vocab_file (`str`, *optional*):
  47. [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .model extension) that
  48. contains the vocabulary necessary to instantiate a tokenizer.
  49. tokenizer_file (`str`, *optional*):
  50. [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that
  51. contains everything needed to load the tokenizer.
  52. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
  53. Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
  54. extra spaces.
  55. unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<unk>"`):
  56. The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
  57. token instead.
  58. bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<bos>"`):
  59. The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
  60. eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<eos>"`):
  61. The end of sequence token.
  62. pad_token (`str`, *optional*, defaults to `"<pad>"`):
  63. The padding token
  64. add_bos_token (`bool`, *optional*, defaults to `True`):
  65. Whether or not to add an `bos_token` at the start of sequences.
  66. add_eos_token (`bool`, *optional*, defaults to `False`):
  67. Whether or not to add an `eos_token` at the end of sequences.
  68. """
  69. vocab_files_names = VOCAB_FILES_NAMES
  70. slow_tokenizer_class = GemmaTokenizer
  71. padding_side = "left"
  72. model_input_names = ["input_ids", "attention_mask"]
  73. def __init__(
  74. self,
  75. vocab_file=None,
  76. tokenizer_file=None,
  77. clean_up_tokenization_spaces=False,
  78. unk_token="<unk>",
  79. bos_token="<bos>",
  80. eos_token="<eos>",
  81. pad_token="<pad>",
  82. add_bos_token=True,
  83. add_eos_token=False,
  84. **kwargs,
  85. ):
  86. super().__init__(
  87. vocab_file=vocab_file,
  88. tokenizer_file=tokenizer_file,
  89. clean_up_tokenization_spaces=clean_up_tokenization_spaces,
  90. unk_token=unk_token,
  91. bos_token=bos_token,
  92. eos_token=eos_token,
  93. pad_token=pad_token,
  94. add_bos_token=add_bos_token,
  95. add_eos_token=add_eos_token,
  96. **kwargs,
  97. )
  98. self._add_bos_token = add_bos_token
  99. self._add_eos_token = add_eos_token
  100. self.update_post_processor()
  101. self.vocab_file = vocab_file
  102. @property
  103. def can_save_slow_tokenizer(self) -> bool:
  104. return os.path.isfile(self.vocab_file) if self.vocab_file else False
  105. # Copied from transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast.update_post_processor
  106. def update_post_processor(self):
  107. """
  108. Updates the underlying post processor with the current `bos_token` and `eos_token`.
  109. """
  110. bos = self.bos_token
  111. bos_token_id = self.bos_token_id
  112. if bos is None and self.add_bos_token:
  113. raise ValueError("add_bos_token = True but bos_token = None")
  114. eos = self.eos_token
  115. eos_token_id = self.eos_token_id
  116. if eos is None and self.add_eos_token:
  117. raise ValueError("add_eos_token = True but eos_token = None")
  118. single = f"{(bos+':0 ') if self.add_bos_token else ''}$A:0{(' '+eos+':0') if self.add_eos_token else ''}"
  119. pair = f"{single}{(' '+bos+':1') if self.add_bos_token else ''} $B:1{(' '+eos+':1') if self.add_eos_token else ''}"
  120. special_tokens = []
  121. if self.add_bos_token:
  122. special_tokens.append((bos, bos_token_id))
  123. if self.add_eos_token:
  124. special_tokens.append((eos, eos_token_id))
  125. self._tokenizer.post_processor = processors.TemplateProcessing(
  126. single=single, pair=pair, special_tokens=special_tokens
  127. )
  128. @property
  129. def add_eos_token(self):
  130. return self._add_eos_token
  131. @property
  132. def add_bos_token(self):
  133. return self._add_bos_token
  134. @add_eos_token.setter
  135. def add_eos_token(self, value):
  136. self._add_eos_token = value
  137. self.update_post_processor()
  138. @add_bos_token.setter
  139. def add_bos_token(self, value):
  140. self._add_bos_token = value
  141. self.update_post_processor()
  142. # Copied from transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast.save_vocabulary
  143. def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
  144. if not self.can_save_slow_tokenizer:
  145. raise ValueError(
  146. "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
  147. "tokenizer."
  148. )
  149. if not os.path.isdir(save_directory):
  150. logger.error(f"Vocabulary path ({save_directory}) should be a directory")
  151. return
  152. out_vocab_file = os.path.join(
  153. save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
  154. )
  155. if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
  156. copyfile(self.vocab_file, out_vocab_file)
  157. return (out_vocab_file,)
  158. # Copied from transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast.build_inputs_with_special_tokens
  159. def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
  160. bos_token_id = [self.bos_token_id] if self.add_bos_token else []
  161. eos_token_id = [self.eos_token_id] if self.add_eos_token else []
  162. output = bos_token_id + token_ids_0 + eos_token_id
  163. if token_ids_1 is not None:
  164. output = output + bos_token_id + token_ids_1 + eos_token_id
  165. return output