tokenization_plbart.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427
  1. # coding=utf-8
  2. # Copyright 2022, UCLA NLP, The Facebook AI Research Team Authors and The HuggingFace Inc. team.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. import os
  16. from shutil import copyfile
  17. from typing import Any, Dict, List, Optional, Tuple
  18. import sentencepiece as spm
  19. from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
  20. from ...utils import logging
  21. logger = logging.get_logger(__name__)
  22. SPIECE_UNDERLINE = "▁"
  23. VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
  24. FAIRSEQ_LANGUAGE_CODES = {
  25. "base": ["__java__", "__python__", "__en_XX__"],
  26. "multi": ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"],
  27. }
  28. FAIRSEQ_LANGUAGE_CODES_MAP = {
  29. "java": "__java__",
  30. "python": "__python__",
  31. "en_XX": "__en_XX__",
  32. "javascript": "__javascript__",
  33. "php": "__php__",
  34. "ruby": "__ruby__",
  35. "go": "__go__",
  36. }
  37. class PLBartTokenizer(PreTrainedTokenizer):
  38. """
  39. Construct an PLBART tokenizer.
  40. Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
  41. [SentencePiece](https://github.com/google/sentencepiece).
  42. The tokenization method is `<tokens> <eos> <language code>` for source language documents, and `<language code>
  43. <tokens> <eos>` for target language documents.
  44. Args:
  45. vocab_file (`str`):
  46. Path to the vocabulary file.
  47. src_lang (`str`, *optional*):
  48. A string representing the source language.
  49. tgt_lang (`str`, *optional*):
  50. A string representing the target language.
  51. bos_token (`str`, *optional*, defaults to `"<s>"`):
  52. The start of sequence token.
  53. eos_token (`str`, *optional*, defaults to `"</s>"`):
  54. The end of sequence token.
  55. sep_token (`str`, *optional*, defaults to `"</s>"`):
  56. The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
  57. sequence classification or for a text and a question for question answering. It is also used as the last
  58. token of a sequence built with special tokens.
  59. cls_token (`str`, *optional*, defaults to `"<s>"`):
  60. The cls token, which is a special token used as the first token for all tasks.
  61. unk_token (`str`, *optional*, defaults to `"<unk>"`):
  62. The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
  63. token instead.
  64. pad_token (`str`, *optional*, defaults to `"<pad>"`):
  65. The token used for padding, for example when batching sequences of different lengths.
  66. mask_token(`str`, *optional*, defaults to `"<mask>"`):
  67. The token used for masking values. This is the token used when training this model with masking tasks. This
  68. is only used in the `"base"` tokenizer type. For `"multi"` tokenizer, masking is never done for the
  69. downstream tasks.
  70. language_codes (`str`, *optional*, defaults to `"base"`):
  71. What language codes to use. Should be one of `"base"` or `"multi"`.
  72. sp_model_kwargs (`dict`, *optional*):
  73. Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
  74. SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
  75. to set:
  76. - `enable_sampling`: Enable subword regularization.
  77. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
  78. - `nbest_size = {0,1}`: No sampling is performed.
  79. - `nbest_size > 1`: samples from the nbest_size results.
  80. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
  81. using forward-filtering-and-backward-sampling algorithm.
  82. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
  83. BPE-dropout.
  84. Examples:
  85. ```python
  86. >>> from transformers import PLBartTokenizer
  87. >>> tokenizer = PLBartTokenizer.from_pretrained("uclanlp/plbart-python-en_XX", src_lang="python", tgt_lang="en_XX")
  88. >>> example_python_phrase = "def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])"
  89. >>> expected_translation_english = "Returns the maximum value of a b c."
  90. >>> inputs = tokenizer(example_python_phrase, text_target=expected_translation_english, return_tensors="pt")
  91. ```"""
  92. vocab_files_names = VOCAB_FILES_NAMES
  93. model_input_names = ["input_ids", "attention_mask"]
  94. prefix_tokens: List[int] = []
  95. suffix_tokens: List[int] = []
  96. def __init__(
  97. self,
  98. vocab_file,
  99. bos_token="<s>",
  100. eos_token="</s>",
  101. sep_token="</s>",
  102. cls_token="<s>",
  103. unk_token="<unk>",
  104. pad_token="<pad>",
  105. mask_token="<mask>",
  106. language_codes="base",
  107. tokenizer_file=None,
  108. src_lang=None,
  109. tgt_lang=None,
  110. sp_model_kwargs: Optional[Dict[str, Any]] = None,
  111. additional_special_tokens=None,
  112. clean_up_tokenization_spaces=True,
  113. **kwargs,
  114. ):
  115. # Mask token behave like a normal word, i.e. include the space before it
  116. mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
  117. self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
  118. src_lang = self._convert_lang_code_special_format(src_lang)
  119. tgt_lang = self._convert_lang_code_special_format(tgt_lang)
  120. self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
  121. self.sp_model.Load(str(vocab_file))
  122. self.vocab_file = vocab_file
  123. self.language_codes = language_codes
  124. fairseq_language_codes = FAIRSEQ_LANGUAGE_CODES[self.language_codes]
  125. # Original fairseq vocab and spm vocab must be "aligned":
  126. # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
  127. # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
  128. # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
  129. # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
  130. # Mimic fairseq token-to-id alignment for the first 4 token
  131. self.fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
  132. # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
  133. self.fairseq_offset = 1
  134. self.sp_model_size = len(self.sp_model)
  135. self.lang_code_to_id = {
  136. code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(fairseq_language_codes)
  137. }
  138. self.id_to_lang_code = {v: k for k, v in self.lang_code_to_id.items()}
  139. if self.language_codes == "base":
  140. self.fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
  141. self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
  142. self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
  143. _additional_special_tokens = list(self.lang_code_to_id.keys())
  144. if additional_special_tokens is not None:
  145. # Only add those special tokens if they are not already there.
  146. _additional_special_tokens.extend(
  147. [t for t in additional_special_tokens if t not in _additional_special_tokens]
  148. )
  149. if self.language_codes == "base":
  150. self._src_lang = src_lang
  151. self.cur_lang_code_id = (
  152. self.lang_code_to_id[self._src_lang] if self._src_lang is not None else self._src_lang
  153. )
  154. else:
  155. self._src_lang = src_lang if src_lang is not None else "__en_XX__"
  156. self.cur_lang_code_id = self.lang_code_to_id[self._src_lang]
  157. super().__init__(
  158. bos_token=bos_token,
  159. eos_token=eos_token,
  160. unk_token=unk_token,
  161. sep_token=sep_token,
  162. cls_token=cls_token,
  163. pad_token=pad_token,
  164. mask_token=mask_token,
  165. language_codes=language_codes,
  166. tokenizer_file=tokenizer_file,
  167. src_lang=src_lang,
  168. tgt_lang=tgt_lang,
  169. additional_special_tokens=_additional_special_tokens,
  170. sp_model_kwargs=self.sp_model_kwargs,
  171. clean_up_tokenization_spaces=clean_up_tokenization_spaces,
  172. **kwargs,
  173. )
  174. self.tgt_lang = tgt_lang
  175. self.set_src_lang_special_tokens(self._src_lang)
  176. def __getstate__(self):
  177. state = self.__dict__.copy()
  178. state["sp_model"] = None
  179. state["sp_model_proto"] = self.sp_model.serialized_model_proto()
  180. return state
  181. def __setstate__(self, d):
  182. self.__dict__ = d
  183. # for backward compatibility
  184. if not hasattr(self, "sp_model_kwargs"):
  185. self.sp_model_kwargs = {}
  186. self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
  187. self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
  188. @property
  189. def vocab_size(self):
  190. if self.language_codes == "base":
  191. return (
  192. len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1
  193. ) # Plus 1 for the mask token
  194. else:
  195. return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
  196. @property
  197. def src_lang(self) -> str:
  198. return self._src_lang
  199. @src_lang.setter
  200. def src_lang(self, new_src_lang: str) -> None:
  201. new_src_lang = self._convert_lang_code_special_format(new_src_lang)
  202. self._src_lang = new_src_lang
  203. self.set_src_lang_special_tokens(self._src_lang)
  204. def get_special_tokens_mask(
  205. self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
  206. ) -> List[int]:
  207. """
  208. Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
  209. special tokens using the tokenizer `prepare_for_model` method.
  210. Args:
  211. token_ids_0 (`List[int]`):
  212. List of IDs.
  213. token_ids_1 (`List[int]`, *optional*):
  214. Optional second list of IDs for sequence pairs.
  215. already_has_special_tokens (`bool`, *optional*, defaults to `False`):
  216. Whether or not the token list is already formatted with special tokens for the model.
  217. Returns:
  218. `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
  219. """
  220. if already_has_special_tokens:
  221. return super().get_special_tokens_mask(
  222. token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
  223. )
  224. prefix_ones = [1] * len(self.prefix_tokens)
  225. suffix_ones = [1] * len(self.suffix_tokens)
  226. if token_ids_1 is None:
  227. return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones
  228. return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones
  229. def build_inputs_with_special_tokens(
  230. self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
  231. ) -> List[int]:
  232. """
  233. Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
  234. adding special tokens. An PLBART sequence has the following format, where `X` represents the sequence:
  235. - `input_ids` (for encoder) `X [eos, src_lang_code]`
  236. - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`
  237. BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
  238. separator.
  239. Args:
  240. token_ids_0 (`List[int]`):
  241. List of IDs to which the special tokens will be added.
  242. token_ids_1 (`List[int]`, *optional*):
  243. Optional second list of IDs for sequence pairs.
  244. Returns:
  245. `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
  246. """
  247. if token_ids_1 is None:
  248. return self.prefix_tokens + token_ids_0 + self.suffix_tokens
  249. # We don't expect to process pairs, but leave the pair logic for API consistency
  250. return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
  251. def create_token_type_ids_from_sequences(
  252. self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
  253. ) -> List[int]:
  254. """
  255. Create a mask from the two sequences passed to be used in a sequence-pair classification task. PLBart does not
  256. make use of token type ids, therefore a list of zeros is returned.
  257. Args:
  258. token_ids_0 (`List[int]`):
  259. List of IDs.
  260. token_ids_1 (`List[int]`, *optional*):
  261. Optional second list of IDs for sequence pairs.
  262. Returns:
  263. `List[int]`: List of zeros.
  264. """
  265. sep = [self.sep_token_id]
  266. cls = [self.cls_token_id]
  267. if token_ids_1 is None:
  268. return len(cls + token_ids_0 + sep) * [0]
  269. return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
  270. def _build_translation_inputs(
  271. self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs
  272. ):
  273. """Used by translation pipeline, to prepare inputs for the generate function"""
  274. if src_lang is None or tgt_lang is None:
  275. raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
  276. self.src_lang = self._convert_lang_code_special_format(src_lang)
  277. self.tgt_lang = self._convert_lang_code_special_format(tgt_lang)
  278. inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
  279. tgt_lang_id = self.convert_tokens_to_ids(self.tgt_lang)
  280. inputs["forced_bos_token_id"] = tgt_lang_id
  281. return inputs
  282. def get_vocab(self):
  283. vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
  284. vocab.update(self.added_tokens_encoder)
  285. return vocab
  286. def _tokenize(self, text: str) -> List[str]:
  287. return self.sp_model.encode(text, out_type=str)
  288. def _convert_token_to_id(self, token):
  289. """Converts a token (str) in an id using the vocab."""
  290. if token in self.fairseq_tokens_to_ids:
  291. return self.fairseq_tokens_to_ids[token]
  292. spm_id = self.sp_model.PieceToId(token)
  293. # Need to return unknown token if the SP model returned 0
  294. return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
  295. def _convert_id_to_token(self, index):
  296. """Converts an index (integer) in a token (str) using the vocab."""
  297. if index in self.fairseq_ids_to_tokens:
  298. return self.fairseq_ids_to_tokens[index]
  299. return self.sp_model.IdToPiece(index - self.fairseq_offset)
  300. def convert_tokens_to_string(self, tokens):
  301. """Converts a sequence of tokens (strings for sub-words) in a single string."""
  302. out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
  303. return out_string
  304. def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
  305. if not os.path.isdir(save_directory):
  306. logger.error(f"Vocabulary path ({save_directory}) should be a directory")
  307. return
  308. out_vocab_file = os.path.join(
  309. save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
  310. )
  311. if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
  312. copyfile(self.vocab_file, out_vocab_file)
  313. elif not os.path.isfile(self.vocab_file):
  314. with open(out_vocab_file, "wb") as fi:
  315. content_spiece_model = self.sp_model.serialized_model_proto()
  316. fi.write(content_spiece_model)
  317. return (out_vocab_file,)
  318. def prepare_seq2seq_batch(
  319. self,
  320. src_texts: List[str],
  321. src_lang: str = "en_XX",
  322. tgt_texts: Optional[List[str]] = None,
  323. tgt_lang: str = "python",
  324. **kwargs,
  325. ) -> BatchEncoding:
  326. self.src_lang = self._convert_lang_code_special_format(src_lang)
  327. self.tgt_lang = self._convert_lang_code_special_format(tgt_lang)
  328. return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
  329. def _switch_to_input_mode(self):
  330. return self.set_src_lang_special_tokens(self.src_lang)
  331. def _switch_to_target_mode(self):
  332. return self.set_tgt_lang_special_tokens(self.tgt_lang)
  333. def set_src_lang_special_tokens(self, src_lang) -> None:
  334. """Reset the special tokens to the source lang setting. No prefix and suffix=[eos, src_lang_code]."""
  335. src_lang = self._convert_lang_code_special_format(src_lang)
  336. self.cur_lang_code = self.lang_code_to_id[src_lang] if src_lang is not None else None
  337. self.prefix_tokens = []
  338. if self.cur_lang_code is not None:
  339. self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
  340. else:
  341. self.suffix_tokens = [self.eos_token_id]
  342. def set_tgt_lang_special_tokens(self, lang: str) -> None:
  343. """Reset the special tokens to the target language setting. No prefix and suffix=[eos, tgt_lang_code]."""
  344. lang = self._convert_lang_code_special_format(lang)
  345. self.cur_lang_code = self.lang_code_to_id[lang] if lang is not None else None
  346. self.prefix_tokens = []
  347. if self.cur_lang_code is not None:
  348. self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
  349. else:
  350. self.suffix_tokens = [self.eos_token_id]
  351. def _convert_lang_code_special_format(self, lang: str) -> str:
  352. """Convert Language Codes to format tokenizer uses if required"""
  353. lang = FAIRSEQ_LANGUAGE_CODES_MAP[lang] if lang in FAIRSEQ_LANGUAGE_CODES_MAP.keys() else lang
  354. return lang