tokenization_nllb.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389
  1. # coding=utf-8
  2. # Copyright 2022 The Facebook AI Research Team Authors and The HuggingFace Inc. team.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. import os
  16. from shutil import copyfile
  17. from typing import Any, Dict, List, Optional, Tuple
  18. import sentencepiece as spm
  19. from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
  20. from ...utils import logging
  21. logger = logging.get_logger(__name__)
  22. SPIECE_UNDERLINE = "▁"
  23. VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"}
  24. FAIRSEQ_LANGUAGE_CODES = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] # fmt: skip
  25. class NllbTokenizer(PreTrainedTokenizer):
  26. """
  27. Construct an NLLB tokenizer.
  28. Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
  29. [SentencePiece](https://github.com/google/sentencepiece).
  30. The tokenization method is `<tokens> <eos> <language code>` for source language documents, and `<language code>
  31. <tokens> <eos>` for target language documents.
  32. Examples:
  33. ```python
  34. >>> from transformers import NllbTokenizer
  35. >>> tokenizer = NllbTokenizer.from_pretrained(
  36. ... "facebook/nllb-200-distilled-600M", src_lang="eng_Latn", tgt_lang="fra_Latn"
  37. ... )
  38. >>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria"
  39. >>> expected_translation_french = "Le chef de l'ONU affirme qu'il n'y a pas de solution militaire en Syrie."
  40. >>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_french, return_tensors="pt")
  41. ```
  42. Args:
  43. vocab_file (`str`):
  44. Path to the vocabulary file.
  45. bos_token (`str`, *optional*, defaults to `"<s>"`):
  46. The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
  47. <Tip>
  48. When building a sequence using special tokens, this is not the token that is used for the beginning of
  49. sequence. The token used is the `cls_token`.
  50. </Tip>
  51. eos_token (`str`, *optional*, defaults to `"</s>"`):
  52. The end of sequence token.
  53. <Tip>
  54. When building a sequence using special tokens, this is not the token that is used for the end of sequence.
  55. The token used is the `sep_token`.
  56. </Tip>
  57. sep_token (`str`, *optional*, defaults to `"</s>"`):
  58. The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
  59. sequence classification or for a text and a question for question answering. It is also used as the last
  60. token of a sequence built with special tokens.
  61. cls_token (`str`, *optional*, defaults to `"<s>"`):
  62. The classifier token which is used when doing sequence classification (classification of the whole sequence
  63. instead of per-token classification). It is the first token of the sequence when built with special tokens.
  64. unk_token (`str`, *optional*, defaults to `"<unk>"`):
  65. The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
  66. token instead.
  67. pad_token (`str`, *optional*, defaults to `"<pad>"`):
  68. The token used for padding, for example when batching sequences of different lengths.
  69. mask_token (`str`, *optional*, defaults to `"<mask>"`):
  70. The token used for masking values. This is the token used when training this model with masked language
  71. modeling. This is the token which the model will try to predict.
  72. tokenizer_file (`str`, *optional*):
  73. The path to a tokenizer file to use instead of the vocab file.
  74. src_lang (`str`, *optional*):
  75. The language to use as source language for translation.
  76. tgt_lang (`str`, *optional*):
  77. The language to use as target language for translation.
  78. sp_model_kwargs (`Dict[str, str]`):
  79. Additional keyword arguments to pass to the model initialization.
  80. """
  81. vocab_files_names = VOCAB_FILES_NAMES
  82. model_input_names = ["input_ids", "attention_mask"]
  83. prefix_tokens: List[int] = []
  84. suffix_tokens: List[int] = []
  85. def __init__(
  86. self,
  87. vocab_file,
  88. bos_token="<s>",
  89. eos_token="</s>",
  90. sep_token="</s>",
  91. cls_token="<s>",
  92. unk_token="<unk>",
  93. pad_token="<pad>",
  94. mask_token="<mask>",
  95. tokenizer_file=None,
  96. src_lang=None,
  97. tgt_lang=None,
  98. sp_model_kwargs: Optional[Dict[str, Any]] = None,
  99. additional_special_tokens=None,
  100. legacy_behaviour=False,
  101. **kwargs,
  102. ):
  103. if additional_special_tokens is None:
  104. additional_special_tokens = FAIRSEQ_LANGUAGE_CODES
  105. bos_token = AddedToken(bos_token, normalized=False, special=True) if isinstance(bos_token, str) else bos_token
  106. pad_token = AddedToken(pad_token, normalized=False, special=True) if isinstance(pad_token, str) else pad_token
  107. eos_token = AddedToken(eos_token, normalized=False, special=True) if isinstance(eos_token, str) else eos_token
  108. unk_token = AddedToken(unk_token, normalized=False, special=True) if isinstance(unk_token, str) else unk_token
  109. # Mask token behave like a normal word, i.e. include the space before it
  110. mask_token = (
  111. AddedToken(mask_token, normalized=True, lstrip=True, special=True)
  112. if isinstance(mask_token, str)
  113. else mask_token
  114. )
  115. self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
  116. self.legacy_behaviour = legacy_behaviour
  117. self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
  118. self.sp_model.Load(str(vocab_file))
  119. self.vocab_file = vocab_file
  120. # Original fairseq vocab and spm vocab must be "aligned":
  121. # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
  122. # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
  123. # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
  124. # spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
  125. # unk token needs to be in the vocab with correct index
  126. self._added_tokens_decoder = {0: bos_token, 1: pad_token, 2: eos_token, 3: unk_token}
  127. # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
  128. self.fairseq_offset = 1
  129. self.sp_model_size = len(self.sp_model)
  130. super().__init__(
  131. bos_token=bos_token,
  132. eos_token=eos_token,
  133. unk_token=unk_token,
  134. sep_token=sep_token,
  135. cls_token=cls_token,
  136. pad_token=pad_token,
  137. mask_token=mask_token,
  138. tokenizer_file=tokenizer_file,
  139. src_lang=src_lang,
  140. tgt_lang=tgt_lang,
  141. additional_special_tokens=additional_special_tokens,
  142. sp_model_kwargs=self.sp_model_kwargs,
  143. legacy_behaviour=legacy_behaviour,
  144. **kwargs,
  145. )
  146. self._src_lang = src_lang if src_lang is not None else "eng_Latn"
  147. self.cur_lang_code_id = self.convert_tokens_to_ids(self._src_lang)
  148. self.tgt_lang = tgt_lang
  149. self.set_src_lang_special_tokens(self._src_lang)
  150. def __getstate__(self):
  151. state = self.__dict__.copy()
  152. state["sp_model"] = None
  153. state["sp_model_proto"] = self.sp_model.serialized_model_proto()
  154. return state
  155. def __setstate__(self, d):
  156. self.__dict__ = d
  157. # for backward compatibility
  158. if not hasattr(self, "sp_model_kwargs"):
  159. self.sp_model_kwargs = {}
  160. self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
  161. self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
  162. @property
  163. def vocab_size(self):
  164. return len(self.sp_model) + self.fairseq_offset
  165. @property
  166. def src_lang(self) -> str:
  167. return self._src_lang
  168. @src_lang.setter
  169. def src_lang(self, new_src_lang: str) -> None:
  170. self._src_lang = new_src_lang
  171. self.set_src_lang_special_tokens(self._src_lang)
  172. def get_special_tokens_mask(
  173. self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
  174. ) -> List[int]:
  175. """
  176. Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
  177. special tokens using the tokenizer `prepare_for_model` method.
  178. Args:
  179. token_ids_0 (`List[int]`):
  180. List of IDs.
  181. token_ids_1 (`List[int]`, *optional*):
  182. Optional second list of IDs for sequence pairs.
  183. already_has_special_tokens (`bool`, *optional*, defaults to `False`):
  184. Whether or not the token list is already formatted with special tokens for the model.
  185. Returns:
  186. `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
  187. """
  188. if already_has_special_tokens:
  189. return super().get_special_tokens_mask(
  190. token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
  191. )
  192. prefix_ones = [1] * len(self.prefix_tokens)
  193. suffix_ones = [1] * len(self.suffix_tokens)
  194. if token_ids_1 is None:
  195. return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones
  196. return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones
  197. def build_inputs_with_special_tokens(
  198. self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
  199. ) -> List[int]:
  200. """
  201. Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
  202. adding special tokens. An NLLB sequence has the following format, where `X` represents the sequence:
  203. - `input_ids` (for encoder) `X [eos, src_lang_code]`
  204. - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`
  205. BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
  206. separator.
  207. Args:
  208. token_ids_0 (`List[int]`):
  209. List of IDs to which the special tokens will be added.
  210. token_ids_1 (`List[int]`, *optional*):
  211. Optional second list of IDs for sequence pairs.
  212. Returns:
  213. `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
  214. """
  215. if token_ids_1 is None:
  216. return self.prefix_tokens + token_ids_0 + self.suffix_tokens
  217. # We don't expect to process pairs, but leave the pair logic for API consistency
  218. return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
  219. def create_token_type_ids_from_sequences(
  220. self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
  221. ) -> List[int]:
  222. """
  223. Create a mask from the two sequences passed to be used in a sequence-pair classification task. nllb does not
  224. make use of token type ids, therefore a list of zeros is returned.
  225. Args:
  226. token_ids_0 (`List[int]`):
  227. List of IDs.
  228. token_ids_1 (`List[int]`, *optional*):
  229. Optional second list of IDs for sequence pairs.
  230. Returns:
  231. `List[int]`: List of zeros.
  232. """
  233. sep = [self.sep_token_id]
  234. cls = [self.cls_token_id]
  235. if token_ids_1 is None:
  236. return len(cls + token_ids_0 + sep) * [0]
  237. return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
  238. def _build_translation_inputs(
  239. self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs
  240. ):
  241. """Used by translation pipeline, to prepare inputs for the generate function"""
  242. if src_lang is None or tgt_lang is None:
  243. raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
  244. self.src_lang = src_lang
  245. inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
  246. tgt_lang_id = self.convert_tokens_to_ids(tgt_lang)
  247. inputs["forced_bos_token_id"] = tgt_lang_id
  248. return inputs
  249. def get_vocab(self):
  250. vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
  251. vocab.update(self.added_tokens_encoder)
  252. return vocab
  253. def _tokenize(self, text: str) -> List[str]:
  254. return self.sp_model.encode(text, out_type=str)
  255. def _convert_token_to_id(self, token):
  256. """Converts a token (str) in an id using the vocab."""
  257. spm_id = self.sp_model.PieceToId(token)
  258. # Need to return unknown token if the SP model returned 0
  259. return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
  260. def _convert_id_to_token(self, index):
  261. """Converts an index (integer) in a token (str) using the vocab."""
  262. return self.sp_model.IdToPiece(index - self.fairseq_offset)
  263. def convert_tokens_to_string(self, tokens):
  264. """Converts a sequence of tokens (strings for sub-words) in a single string."""
  265. out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
  266. return out_string
  267. def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
  268. if not os.path.isdir(save_directory):
  269. logger.error(f"Vocabulary path ({save_directory}) should be a directory")
  270. return
  271. out_vocab_file = os.path.join(
  272. save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
  273. )
  274. if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
  275. copyfile(self.vocab_file, out_vocab_file)
  276. elif not os.path.isfile(self.vocab_file):
  277. with open(out_vocab_file, "wb") as fi:
  278. content_spiece_model = self.sp_model.serialized_model_proto()
  279. fi.write(content_spiece_model)
  280. return (out_vocab_file,)
  281. def prepare_seq2seq_batch(
  282. self,
  283. src_texts: List[str],
  284. src_lang: str = "eng_Latn",
  285. tgt_texts: Optional[List[str]] = None,
  286. tgt_lang: str = "fra_Latn",
  287. **kwargs,
  288. ) -> BatchEncoding:
  289. self.src_lang = src_lang
  290. self.tgt_lang = tgt_lang
  291. return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
  292. def _switch_to_input_mode(self):
  293. return self.set_src_lang_special_tokens(self.src_lang)
  294. def _switch_to_target_mode(self):
  295. return self.set_tgt_lang_special_tokens(self.tgt_lang)
  296. def set_src_lang_special_tokens(self, src_lang) -> None:
  297. """Reset the special tokens to the source lang setting.
  298. - In legacy mode: No prefix and suffix=[eos, src_lang_code].
  299. - In default mode: Prefix=[src_lang_code], suffix = [eos]
  300. """
  301. self.cur_lang_code = self.convert_tokens_to_ids(src_lang)
  302. if self.legacy_behaviour:
  303. self.prefix_tokens = []
  304. self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
  305. else:
  306. self.prefix_tokens = [self.cur_lang_code]
  307. self.suffix_tokens = [self.eos_token_id]
  308. def set_tgt_lang_special_tokens(self, lang: str) -> None:
  309. """Reset the special tokens to the target lang setting.
  310. - In legacy mode: No prefix and suffix=[eos, tgt_lang_code].
  311. - In default mode: Prefix=[tgt_lang_code], suffix = [eos]
  312. """
  313. self.cur_lang_code = self.convert_tokens_to_ids(lang)
  314. if self.legacy_behaviour:
  315. self.prefix_tokens = []
  316. self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
  317. else:
  318. self.prefix_tokens = [self.cur_lang_code]
  319. self.suffix_tokens = [self.eos_token_id]