tokenization_perceiver.py 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197
  1. # coding=utf-8
  2. # Copyright 2021 The HuggingFace Inc. team.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """Tokenization class for Perceiver."""
  16. from typing import Dict, List, Optional, Tuple
  17. from ...tokenization_utils import AddedToken, PreTrainedTokenizer
  18. from ...utils import logging
  19. logger = logging.get_logger(__name__)
  20. class PerceiverTokenizer(PreTrainedTokenizer):
  21. """
  22. Construct a Perceiver tokenizer. The Perceiver simply uses raw bytes utf-8 encoding.
  23. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
  24. this superclass for more information regarding those methods.
  25. Args:
  26. pad_token (`str`, *optional*, defaults to `"[PAD]"`):
  27. The token used for padding, for example when batching sequences of different lengths.
  28. bos_token (`str`, *optional*, defaults to `"[BOS]"`):
  29. The BOS token (reserved in the vocab, but not actually used).
  30. eos_token (`str`, *optional*, defaults to `"[EOS]"`):
  31. The end of sequence token (reserved in the vocab, but not actually used).
  32. <Tip>
  33. When building a sequence using special tokens, this is not the token that is used for the end of sequence.
  34. The token used is the `sep_token`.
  35. </Tip>
  36. mask_token (`str`, *optional*, defaults to `"[MASK]"`):
  37. The MASK token, useful for masked language modeling.
  38. cls_token (`str`, *optional*, defaults to `"[CLS]"`):
  39. The CLS token (reserved in the vocab, but not actually used).
  40. sep_token (`str`, *optional*, defaults to `"[SEP]"`):
  41. The separator token, which is used when building a sequence from two sequences.
  42. """
  43. model_input_names = ["input_ids", "attention_mask"]
  44. def __init__(
  45. self,
  46. pad_token="[PAD]",
  47. bos_token="[BOS]",
  48. eos_token="[EOS]",
  49. mask_token="[MASK]",
  50. cls_token="[CLS]",
  51. sep_token="[SEP]",
  52. model_max_length=2048,
  53. **kwargs,
  54. ) -> None:
  55. pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
  56. bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
  57. eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
  58. mask_token = AddedToken(mask_token, lstrip=False, rstrip=False) if isinstance(mask_token, str) else mask_token
  59. cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
  60. sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
  61. self._utf_vocab_size = 2**8 # utf is 8 bits
  62. # Since these tokens are not part of the vocabulary, we manually add them
  63. self._added_tokens_decoder: Dict[str, int] = {
  64. 0: pad_token,
  65. 1: bos_token,
  66. 2: eos_token,
  67. 3: mask_token,
  68. 4: cls_token,
  69. 5: sep_token,
  70. }
  71. self._num_special_tokens = len(self._added_tokens_decoder)
  72. super().__init__(
  73. pad_token=pad_token,
  74. bos_token=bos_token,
  75. eos_token=eos_token,
  76. mask_token=mask_token,
  77. cls_token=cls_token,
  78. sep_token=sep_token,
  79. model_max_length=model_max_length,
  80. **kwargs,
  81. )
  82. def get_vocab(self) -> Dict[str, int]:
  83. vocab = {}
  84. for i in range(self._utf_vocab_size):
  85. token = chr(i)
  86. vocab[token] = i + self._num_special_tokens
  87. vocab.update(self.added_tokens_encoder)
  88. return vocab
  89. @property
  90. def vocab_size(self):
  91. return self._utf_vocab_size
  92. def get_special_tokens_mask(
  93. self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
  94. ) -> List[int]:
  95. """
  96. Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
  97. special tokens using the tokenizer `prepare_for_model` method.
  98. Args:
  99. token_ids_0 (`List[int]`):
  100. List of IDs.
  101. token_ids_1 (`List[int]`, *optional*):
  102. Optional second list of IDs for sequence pairs.
  103. already_has_special_tokens (`bool`, *optional*, defaults to `False`):
  104. Whether or not the token list is already formatted with special tokens for the model.
  105. Returns:
  106. `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
  107. """
  108. if already_has_special_tokens:
  109. return super().get_special_tokens_mask(
  110. token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
  111. )
  112. # normal case: some special tokens
  113. if token_ids_1 is None:
  114. return [1] + [0] * len(token_ids_0) + [1]
  115. return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
  116. def build_inputs_with_special_tokens(
  117. self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
  118. ) -> List[int]:
  119. """
  120. Build model inputs from a sequence or a pair of sequence for sequence classification tasks. A sequence has the
  121. following format:
  122. - single sequence: `[CLS] X [SEP]`
  123. - pair of sequences: `[CLS] A [SEP] B [SEP]`
  124. Args:
  125. token_ids_0 (`List[int]`):
  126. List of IDs to which the special tokens will be added.
  127. token_ids_1 (`List[int]`, *optional*):
  128. Optional second list of IDs for sequence pairs.
  129. Returns:
  130. `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
  131. """
  132. if token_ids_1 is None:
  133. return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
  134. else:
  135. return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] + token_ids_1 + [self.sep_token_id]
  136. def _tokenize(self, text: str) -> List[str]:
  137. """Take as input a string and return a list of strings (tokens) for words/sub-words"""
  138. tokens = [chr(i) for i in text.encode("utf-8")]
  139. return tokens
  140. def _convert_token_to_id(self, token):
  141. """Converts a token (str) in an id using the vocab."""
  142. if len(token) != 1:
  143. token_id = self.unk_token_id
  144. else:
  145. token_id = ord(token) + self._num_special_tokens
  146. return token_id
  147. def _convert_id_to_token(self, index):
  148. """Converts an index (integer) in a token (str) using the vocab."""
  149. token = chr(index - self._num_special_tokens)
  150. return token
  151. # TODO @ArthurZ refactor this as well....
  152. def convert_tokens_to_string(self, tokens):
  153. """Converts a sequence of tokens (string) in a single string."""
  154. bstring = b""
  155. for token in tokens:
  156. if token in self.added_tokens_encoder:
  157. tok_string = str(token).encode("utf-8")
  158. else:
  159. tok_string = bytes([ord(token)])
  160. bstring += tok_string
  161. string = bstring.decode("utf-8", errors="replace")
  162. return string
  163. # PerceiverTokenizer has no vocab file
  164. def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
  165. return ()