tokenization_cpm.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345
  1. # coding=utf-8
  2. # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """Tokenization classes."""
  16. import os
  17. import unicodedata
  18. from shutil import copyfile
  19. from typing import Any, Dict, List, Optional, Tuple
  20. import sentencepiece as spm
  21. from ...tokenization_utils import AddedToken, PreTrainedTokenizer
  22. from ...utils import SPIECE_UNDERLINE, logging
  23. logger = logging.get_logger(__name__)
  24. VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
  25. class CpmTokenizer(PreTrainedTokenizer):
  26. """Runs pre-tokenization with Jieba segmentation tool. It is used in CPM models."""
  27. vocab_files_names = VOCAB_FILES_NAMES
  28. def __init__(
  29. self,
  30. vocab_file,
  31. do_lower_case=False,
  32. remove_space=True,
  33. keep_accents=False,
  34. bos_token="<s>",
  35. eos_token="</s>",
  36. unk_token="<unk>",
  37. sep_token="<sep>",
  38. pad_token="<pad>",
  39. cls_token="<cls>",
  40. mask_token="<mask>",
  41. additional_special_tokens=["<eop>", "<eod>"],
  42. sp_model_kwargs: Optional[Dict[str, Any]] = None,
  43. **kwargs,
  44. ) -> None:
  45. """
  46. Construct a CPM tokenizer. Based on [Jieba](https://pypi.org/project/jieba/) and
  47. [SentencePiece](https://github.com/google/sentencepiece).
  48. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should
  49. refer to this superclass for more information regarding those methods.
  50. Args:
  51. vocab_file (`str`):
  52. [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that
  53. contains the vocabulary necessary to instantiate a tokenizer.
  54. do_lower_case (`bool`, *optional*, defaults to `True`):
  55. Whether to lowercase the input when tokenizing.
  56. remove_space (`bool`, *optional*, defaults to `True`):
  57. Whether to strip the text when tokenizing (removing excess spaces before and after the string).
  58. keep_accents (`bool`, *optional*, defaults to `False`):
  59. Whether to keep accents when tokenizing.
  60. bos_token (`str`, *optional*, defaults to `"<s>"`):
  61. The beginning of sequence token that was used during pretraining. Can be used a sequence classifier
  62. token.
  63. <Tip>
  64. When building a sequence using special tokens, this is not the token that is used for the beginning of
  65. sequence. The token used is the `cls_token`.
  66. </Tip>
  67. eos_token (`str`, *optional*, defaults to `"</s>"`):
  68. The end of sequence token.
  69. <Tip>
  70. When building a sequence using special tokens, this is not the token that is used for the end of
  71. sequence. The token used is the `sep_token`.
  72. </Tip>
  73. unk_token (`str`, *optional*, defaults to `"<unk>"`):
  74. The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be
  75. this token instead.
  76. sep_token (`str`, *optional*, defaults to `"<sep>"`):
  77. The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences
  78. for sequence classification or for a text and a question for question answering. It is also used as the
  79. last token of a sequence built with special tokens.
  80. pad_token (`str`, *optional*, defaults to `"<pad>"`):
  81. The token used for padding, for example when batching sequences of different lengths.
  82. cls_token (`str`, *optional*, defaults to `"<cls>"`):
  83. The classifier token which is used when doing sequence classification (classification of the whole
  84. sequence instead of per-token classification). It is the first token of the sequence when built with
  85. special tokens.
  86. mask_token (`str`, *optional*, defaults to `"<mask>"`):
  87. The token used for masking values. This is the token used when training this model with masked language
  88. modeling. This is the token which the model will try to predict.
  89. additional_special_tokens (`List[str]`, *optional*, defaults to `["<eop>", "<eod>"]`):
  90. Additional special tokens used by the tokenizer.
  91. Attributes:
  92. sp_model (`SentencePieceProcessor`):
  93. The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
  94. """
  95. # Mask token behave like a normal word, i.e. include the space before it
  96. mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
  97. self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
  98. self.do_lower_case = do_lower_case
  99. self.remove_space = remove_space
  100. self.keep_accents = keep_accents
  101. self.vocab_file = vocab_file
  102. self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
  103. self.sp_model.Load(vocab_file)
  104. try:
  105. import jieba
  106. except ModuleNotFoundError as error:
  107. raise error.__class__(
  108. "You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
  109. "See https://pypi.org/project/jieba/ for installation."
  110. )
  111. self.jieba = jieba
  112. self.translator = str.maketrans(" \n", "\u2582\u2583")
  113. super().__init__(
  114. do_lower_case=do_lower_case,
  115. remove_space=remove_space,
  116. keep_accents=keep_accents,
  117. bos_token=bos_token,
  118. eos_token=eos_token,
  119. unk_token=unk_token,
  120. sep_token=sep_token,
  121. pad_token=pad_token,
  122. cls_token=cls_token,
  123. mask_token=mask_token,
  124. additional_special_tokens=additional_special_tokens,
  125. sp_model_kwargs=self.sp_model_kwargs,
  126. **kwargs,
  127. )
  128. self._pad_token_type_id = 3
  129. @property
  130. # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
  131. def vocab_size(self):
  132. return len(self.sp_model)
  133. # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.get_vocab
  134. def get_vocab(self):
  135. vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
  136. vocab.update(self.added_tokens_encoder)
  137. return vocab
  138. # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.__getstate__
  139. def __getstate__(self):
  140. state = self.__dict__.copy()
  141. state["sp_model"] = None
  142. return state
  143. # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.__setstate__
  144. def __setstate__(self, d):
  145. self.__dict__ = d
  146. # for backward compatibility
  147. if not hasattr(self, "sp_model_kwargs"):
  148. self.sp_model_kwargs = {}
  149. self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
  150. self.sp_model.Load(self.vocab_file)
  151. # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.preprocess_text
  152. def preprocess_text(self, inputs):
  153. if self.remove_space:
  154. outputs = " ".join(inputs.strip().split())
  155. else:
  156. outputs = inputs
  157. outputs = outputs.replace("``", '"').replace("''", '"')
  158. if not self.keep_accents:
  159. outputs = unicodedata.normalize("NFKD", outputs)
  160. outputs = "".join([c for c in outputs if not unicodedata.combining(c)])
  161. if self.do_lower_case:
  162. outputs = outputs.lower()
  163. return outputs
  164. # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer._tokenize
  165. def _tokenize(self, text: str) -> List[str]:
  166. """Tokenize a string."""
  167. text = self.preprocess_text(text)
  168. pieces = self.sp_model.encode(text, out_type=str)
  169. new_pieces = []
  170. for piece in pieces:
  171. if len(piece) > 1 and piece[-1] == str(",") and piece[-2].isdigit():
  172. cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, ""))
  173. if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
  174. if len(cur_pieces[0]) == 1:
  175. cur_pieces = cur_pieces[1:]
  176. else:
  177. cur_pieces[0] = cur_pieces[0][1:]
  178. cur_pieces.append(piece[-1])
  179. new_pieces.extend(cur_pieces)
  180. else:
  181. new_pieces.append(piece)
  182. return new_pieces
  183. # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer._convert_token_to_id
  184. def _convert_token_to_id(self, token):
  185. """Converts a token (str) in an id using the vocab."""
  186. return self.sp_model.PieceToId(token)
  187. # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer._convert_id_to_token
  188. def _convert_id_to_token(self, index):
  189. """Converts an index (integer) in a token (str) using the vocab."""
  190. return self.sp_model.IdToPiece(index)
  191. # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.convert_tokens_to_string
  192. def convert_tokens_to_string(self, tokens):
  193. """Converts a sequence of tokens (strings for sub-words) in a single string."""
  194. out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
  195. return out_string
  196. # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.build_inputs_with_special_tokens
  197. def build_inputs_with_special_tokens(
  198. self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
  199. ) -> List[int]:
  200. """
  201. Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
  202. adding special tokens. An XLNet sequence has the following format:
  203. - single sequence: `X <sep> <cls>`
  204. - pair of sequences: `A <sep> B <sep> <cls>`
  205. Args:
  206. token_ids_0 (`List[int]`):
  207. List of IDs to which the special tokens will be added.
  208. token_ids_1 (`List[int]`, *optional*):
  209. Optional second list of IDs for sequence pairs.
  210. Returns:
  211. `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
  212. """
  213. sep = [self.sep_token_id]
  214. cls = [self.cls_token_id]
  215. if token_ids_1 is None:
  216. return token_ids_0 + sep + cls
  217. return token_ids_0 + sep + token_ids_1 + sep + cls
  218. # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.get_special_tokens_mask
  219. def get_special_tokens_mask(
  220. self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
  221. ) -> List[int]:
  222. """
  223. Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
  224. special tokens using the tokenizer `prepare_for_model` method.
  225. Args:
  226. token_ids_0 (`List[int]`):
  227. List of IDs.
  228. token_ids_1 (`List[int]`, *optional*):
  229. Optional second list of IDs for sequence pairs.
  230. already_has_special_tokens (`bool`, *optional*, defaults to `False`):
  231. Whether or not the token list is already formatted with special tokens for the model.
  232. Returns:
  233. `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
  234. """
  235. if already_has_special_tokens:
  236. return super().get_special_tokens_mask(
  237. token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
  238. )
  239. if token_ids_1 is not None:
  240. return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1, 1]
  241. return ([0] * len(token_ids_0)) + [1, 1]
  242. # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.create_token_type_ids_from_sequences
  243. def create_token_type_ids_from_sequences(
  244. self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
  245. ) -> List[int]:
  246. """
  247. Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet
  248. sequence pair mask has the following format:
  249. ```
  250. 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
  251. | first sequence | second sequence |
  252. ```
  253. If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
  254. Args:
  255. token_ids_0 (`List[int]`):
  256. List of IDs.
  257. token_ids_1 (`List[int]`, *optional*):
  258. Optional second list of IDs for sequence pairs.
  259. Returns:
  260. `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
  261. """
  262. sep = [self.sep_token_id]
  263. cls_segment_id = [2]
  264. if token_ids_1 is None:
  265. return len(token_ids_0 + sep) * [0] + cls_segment_id
  266. return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + cls_segment_id
  267. # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.save_vocabulary
  268. def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
  269. if not os.path.isdir(save_directory):
  270. logger.error(f"Vocabulary path ({save_directory}) should be a directory")
  271. return
  272. out_vocab_file = os.path.join(
  273. save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
  274. )
  275. if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
  276. copyfile(self.vocab_file, out_vocab_file)
  277. elif not os.path.isfile(self.vocab_file):
  278. with open(out_vocab_file, "wb") as fi:
  279. content_spiece_model = self.sp_model.serialized_model_proto()
  280. fi.write(content_spiece_model)
  281. return (out_vocab_file,)
  282. def _decode(self, *args, **kwargs):
  283. text = super()._decode(*args, **kwargs)
  284. text = text.replace(" ", "").replace("\u2582", " ").replace("\u2583", "\n")
  285. return text