tokenization_flaubert.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565
  1. # coding=utf-8
  2. # Copyright 2019-present CNRS, Facebook Inc. and the HuggingFace Inc. team.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """Tokenization classes for Flaubert."""
  16. import json
  17. import os
  18. import re
  19. import unicodedata
  20. from typing import List, Optional, Tuple
  21. from ...tokenization_utils import PreTrainedTokenizer
  22. from ...utils import logging
  23. logger = logging.get_logger(__name__)
  24. VOCAB_FILES_NAMES = {
  25. "vocab_file": "vocab.json",
  26. "merges_file": "merges.txt",
  27. }
  28. def convert_to_unicode(text):
  29. """
  30. Converts `text` to Unicode (if it's not already), assuming UTF-8 input.
  31. """
  32. def ensure_text(s, encoding="utf-8", errors="strict"):
  33. if isinstance(s, bytes):
  34. return s.decode(encoding, errors)
  35. elif isinstance(s, str):
  36. return s
  37. else:
  38. raise TypeError(f"not expecting type '{type(s)}'")
  39. return ensure_text(text, encoding="utf-8", errors="ignore")
  40. # Copied from transformers.models.xlm.tokenization_xlm.get_pairs
  41. def get_pairs(word):
  42. """
  43. Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length
  44. strings)
  45. """
  46. pairs = set()
  47. prev_char = word[0]
  48. for char in word[1:]:
  49. pairs.add((prev_char, char))
  50. prev_char = char
  51. return pairs
  52. # Copied from transformers.models.xlm.tokenization_xlm.replace_unicode_punct
  53. def replace_unicode_punct(text):
  54. """
  55. Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/replace-unicode-punctuation.perl
  56. """
  57. text = text.replace(",", ",")
  58. text = re.sub(r"。\s*", ". ", text)
  59. text = text.replace("、", ",")
  60. text = text.replace("”", '"')
  61. text = text.replace("“", '"')
  62. text = text.replace("∶", ":")
  63. text = text.replace(":", ":")
  64. text = text.replace("?", "?")
  65. text = text.replace("《", '"')
  66. text = text.replace("》", '"')
  67. text = text.replace(")", ")")
  68. text = text.replace("!", "!")
  69. text = text.replace("(", "(")
  70. text = text.replace(";", ";")
  71. text = text.replace("1", "1")
  72. text = text.replace("」", '"')
  73. text = text.replace("「", '"')
  74. text = text.replace("0", "0")
  75. text = text.replace("3", "3")
  76. text = text.replace("2", "2")
  77. text = text.replace("5", "5")
  78. text = text.replace("6", "6")
  79. text = text.replace("9", "9")
  80. text = text.replace("7", "7")
  81. text = text.replace("8", "8")
  82. text = text.replace("4", "4")
  83. text = re.sub(r".\s*", ". ", text)
  84. text = text.replace("~", "~")
  85. text = text.replace("’", "'")
  86. text = text.replace("…", "...")
  87. text = text.replace("━", "-")
  88. text = text.replace("〈", "<")
  89. text = text.replace("〉", ">")
  90. text = text.replace("【", "[")
  91. text = text.replace("】", "]")
  92. text = text.replace("%", "%")
  93. return text
  94. # Copied from transformers.models.xlm.tokenization_xlm.remove_non_printing_char
  95. def remove_non_printing_char(text):
  96. """
  97. Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/remove-non-printing-char.perl
  98. """
  99. output = []
  100. for char in text:
  101. cat = unicodedata.category(char)
  102. if cat.startswith("C"):
  103. continue
  104. output.append(char)
  105. return "".join(output)
  106. class FlaubertTokenizer(PreTrainedTokenizer):
  107. """
  108. Construct a Flaubert tokenizer. Based on Byte-Pair Encoding. The tokenization process is the following:
  109. - Moses preprocessing and tokenization.
  110. - Normalizing all inputs text.
  111. - The arguments `special_tokens` and the function `set_special_tokens`, can be used to add additional symbols (like
  112. "__classify__") to a vocabulary.
  113. - The argument `do_lowercase` controls lower casing (automatically set for pretrained vocabularies).
  114. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
  115. this superclass for more information regarding those methods.
  116. Args:
  117. vocab_file (`str`):
  118. Vocabulary file.
  119. merges_file (`str`):
  120. Merges file.
  121. do_lowercase (`bool`, *optional*, defaults to `False`):
  122. Controls lower casing.
  123. unk_token (`str`, *optional*, defaults to `"<unk>"`):
  124. The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
  125. token instead.
  126. bos_token (`str`, *optional*, defaults to `"<s>"`):
  127. The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
  128. <Tip>
  129. When building a sequence using special tokens, this is not the token that is used for the beginning of
  130. sequence. The token used is the `cls_token`.
  131. </Tip>
  132. sep_token (`str`, *optional*, defaults to `"</s>"`):
  133. The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
  134. sequence classification or for a text and a question for question answering. It is also used as the last
  135. token of a sequence built with special tokens.
  136. pad_token (`str`, *optional*, defaults to `"<pad>"`):
  137. The token used for padding, for example when batching sequences of different lengths.
  138. cls_token (`str`, *optional*, defaults to `"</s>"`):
  139. The classifier token which is used when doing sequence classification (classification of the whole sequence
  140. instead of per-token classification). It is the first token of the sequence when built with special tokens.
  141. mask_token (`str`, *optional*, defaults to `"<special1>"`):
  142. The token used for masking values. This is the token used when training this model with masked language
  143. modeling. This is the token which the model will try to predict.
  144. additional_special_tokens (`List[str]`, *optional*, defaults to `['<special0>', '<special1>', '<special2>', '<special3>', '<special4>', '<special5>', '<special6>', '<special7>', '<special8>', '<special9>']`):
  145. List of additional special tokens.
  146. lang2id (`Dict[str, int]`, *optional*):
  147. Dictionary mapping languages string identifiers to their IDs.
  148. id2lang (`Dict[int, str]`, *optional*):
  149. Dictionary mapping language IDs to their string identifiers.
  150. """
  151. vocab_files_names = VOCAB_FILES_NAMES
  152. def __init__(
  153. self,
  154. vocab_file,
  155. merges_file,
  156. do_lowercase=False,
  157. unk_token="<unk>",
  158. bos_token="<s>",
  159. sep_token="</s>",
  160. pad_token="<pad>",
  161. cls_token="</s>",
  162. mask_token="<special1>",
  163. additional_special_tokens=[
  164. "<special0>",
  165. "<special1>",
  166. "<special2>",
  167. "<special3>",
  168. "<special4>",
  169. "<special5>",
  170. "<special6>",
  171. "<special7>",
  172. "<special8>",
  173. "<special9>",
  174. ],
  175. lang2id=None,
  176. id2lang=None,
  177. **kwargs,
  178. ):
  179. do_lowercase_and_remove_accent = kwargs.pop("do_lowercase_and_remove_accent", None)
  180. if do_lowercase_and_remove_accent is not None:
  181. logger.warning(
  182. "`do_lowercase_and_remove_accent` is passed as a keyword argument, but this won't do anything."
  183. " `FlaubertTokenizer` will always set it to `False`."
  184. )
  185. # always `False`
  186. self.do_lowercase_and_remove_accent = False
  187. self.do_lowercase = do_lowercase
  188. try:
  189. import sacremoses
  190. except ImportError:
  191. raise ImportError(
  192. "You need to install sacremoses to use FlaubertTokenizer. "
  193. "See https://pypi.org/project/sacremoses/ for installation."
  194. )
  195. self.sm = sacremoses
  196. # cache of sm.MosesPunctNormalizer instance
  197. self.cache_moses_punct_normalizer = {}
  198. # cache of sm.MosesTokenizer instance
  199. self.cache_moses_tokenizer = {}
  200. self.lang_with_custom_tokenizer = {"zh", "th", "ja"}
  201. self.lang2id = lang2id
  202. self.id2lang = id2lang
  203. if lang2id is not None and id2lang is not None:
  204. assert len(lang2id) == len(id2lang)
  205. self.ja_word_tokenizer = None
  206. self.zh_word_tokenizer = None
  207. with open(vocab_file, encoding="utf-8") as vocab_handle:
  208. self.encoder = json.load(vocab_handle)
  209. self.decoder = {v: k for k, v in self.encoder.items()}
  210. with open(merges_file, encoding="utf-8") as merges_handle:
  211. merges = merges_handle.read().split("\n")[:-1]
  212. merges = [tuple(merge.split()[:2]) for merge in merges]
  213. self.bpe_ranks = dict(zip(merges, range(len(merges))))
  214. self.cache = {}
  215. super().__init__(
  216. do_lowercase=do_lowercase,
  217. unk_token=unk_token,
  218. bos_token=bos_token,
  219. sep_token=sep_token,
  220. pad_token=pad_token,
  221. cls_token=cls_token,
  222. mask_token=mask_token,
  223. additional_special_tokens=additional_special_tokens,
  224. lang2id=lang2id,
  225. id2lang=id2lang,
  226. **kwargs,
  227. )
  228. @property
  229. # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.do_lower_case
  230. def do_lower_case(self):
  231. return self.do_lowercase_and_remove_accent
  232. # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.moses_punct_norm
  233. def moses_punct_norm(self, text, lang):
  234. if lang not in self.cache_moses_punct_normalizer:
  235. punct_normalizer = self.sm.MosesPunctNormalizer(lang=lang)
  236. self.cache_moses_punct_normalizer[lang] = punct_normalizer
  237. else:
  238. punct_normalizer = self.cache_moses_punct_normalizer[lang]
  239. return punct_normalizer.normalize(text)
  240. # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.moses_tokenize
  241. def moses_tokenize(self, text, lang):
  242. if lang not in self.cache_moses_tokenizer:
  243. moses_tokenizer = self.sm.MosesTokenizer(lang=lang)
  244. self.cache_moses_tokenizer[lang] = moses_tokenizer
  245. else:
  246. moses_tokenizer = self.cache_moses_tokenizer[lang]
  247. return moses_tokenizer.tokenize(text, return_str=False, escape=False)
  248. # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.moses_pipeline
  249. def moses_pipeline(self, text, lang):
  250. text = replace_unicode_punct(text)
  251. text = self.moses_punct_norm(text, lang)
  252. text = remove_non_printing_char(text)
  253. return text
  254. # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.ja_tokenize
  255. def ja_tokenize(self, text):
  256. if self.ja_word_tokenizer is None:
  257. try:
  258. import Mykytea
  259. self.ja_word_tokenizer = Mykytea.Mykytea(
  260. f"-model {os.path.expanduser('~')}/local/share/kytea/model.bin"
  261. )
  262. except (AttributeError, ImportError):
  263. logger.error(
  264. "Make sure you install KyTea (https://github.com/neubig/kytea) and it's python wrapper"
  265. " (https://github.com/chezou/Mykytea-python) with the following steps"
  266. )
  267. logger.error("1. git clone git@github.com:neubig/kytea.git && cd kytea")
  268. logger.error("2. autoreconf -i")
  269. logger.error("3. ./configure --prefix=$HOME/local")
  270. logger.error("4. make && make install")
  271. logger.error("5. pip install kytea")
  272. raise
  273. return list(self.ja_word_tokenizer.getWS(text))
  274. @property
  275. # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.vocab_size
  276. def vocab_size(self):
  277. return len(self.encoder)
  278. # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.get_vocab
  279. def get_vocab(self):
  280. return dict(self.encoder, **self.added_tokens_encoder)
  281. # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.bpe
  282. def bpe(self, token):
  283. word = tuple(token[:-1]) + (token[-1] + "</w>",)
  284. if token in self.cache:
  285. return self.cache[token]
  286. pairs = get_pairs(word)
  287. if not pairs:
  288. return token + "</w>"
  289. while True:
  290. bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
  291. if bigram not in self.bpe_ranks:
  292. break
  293. first, second = bigram
  294. new_word = []
  295. i = 0
  296. while i < len(word):
  297. try:
  298. j = word.index(first, i)
  299. except ValueError:
  300. new_word.extend(word[i:])
  301. break
  302. else:
  303. new_word.extend(word[i:j])
  304. i = j
  305. if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
  306. new_word.append(first + second)
  307. i += 2
  308. else:
  309. new_word.append(word[i])
  310. i += 1
  311. new_word = tuple(new_word)
  312. word = new_word
  313. if len(word) == 1:
  314. break
  315. else:
  316. pairs = get_pairs(word)
  317. word = " ".join(word)
  318. if word == "\n </w>":
  319. word = "\n</w>"
  320. self.cache[token] = word
  321. return word
  322. def preprocess_text(self, text):
  323. text = text.replace("``", '"').replace("''", '"')
  324. text = convert_to_unicode(text)
  325. text = unicodedata.normalize("NFC", text)
  326. if self.do_lowercase:
  327. text = text.lower()
  328. return text
  329. def _tokenize(self, text, bypass_tokenizer=False):
  330. """
  331. Tokenize a string given language code using Moses.
  332. Details of tokenization:
  333. - [sacremoses](https://github.com/alvations/sacremoses): port of Moses
  334. - Install with `pip install sacremoses`
  335. Args:
  336. - bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False)
  337. (bool). If True, we only apply BPE.
  338. Returns:
  339. List of tokens.
  340. """
  341. lang = "fr"
  342. if lang and self.lang2id and lang not in self.lang2id:
  343. logger.error(
  344. "Supplied language code not found in lang2id mapping. Please check that your language is supported by"
  345. " the loaded pretrained model."
  346. )
  347. if bypass_tokenizer:
  348. text = text.split()
  349. else:
  350. text = self.preprocess_text(text)
  351. text = self.moses_pipeline(text, lang=lang)
  352. text = self.moses_tokenize(text, lang=lang)
  353. split_tokens = []
  354. for token in text:
  355. if token:
  356. split_tokens.extend(list(self.bpe(token).split(" ")))
  357. return split_tokens
  358. # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer._convert_token_to_id
  359. def _convert_token_to_id(self, token):
  360. """Converts a token (str) in an id using the vocab."""
  361. return self.encoder.get(token, self.encoder.get(self.unk_token))
  362. # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer._convert_id_to_token
  363. def _convert_id_to_token(self, index):
  364. """Converts an index (integer) in a token (str) using the vocab."""
  365. return self.decoder.get(index, self.unk_token)
  366. # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.convert_tokens_to_string
  367. def convert_tokens_to_string(self, tokens):
  368. """Converts a sequence of tokens (string) in a single string."""
  369. out_string = "".join(tokens).replace("</w>", " ").strip()
  370. return out_string
  371. # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.build_inputs_with_special_tokens
  372. def build_inputs_with_special_tokens(
  373. self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
  374. ) -> List[int]:
  375. """
  376. Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
  377. adding special tokens. An XLM sequence has the following format:
  378. - single sequence: `<s> X </s>`
  379. - pair of sequences: `<s> A </s> B </s>`
  380. Args:
  381. token_ids_0 (`List[int]`):
  382. List of IDs to which the special tokens will be added.
  383. token_ids_1 (`List[int]`, *optional*):
  384. Optional second list of IDs for sequence pairs.
  385. Returns:
  386. `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
  387. """
  388. bos = [self.bos_token_id]
  389. sep = [self.sep_token_id]
  390. if token_ids_1 is None:
  391. return bos + token_ids_0 + sep
  392. return bos + token_ids_0 + sep + token_ids_1 + sep
  393. # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.get_special_tokens_mask
  394. def get_special_tokens_mask(
  395. self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
  396. ) -> List[int]:
  397. """
  398. Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
  399. special tokens using the tokenizer `prepare_for_model` method.
  400. Args:
  401. token_ids_0 (`List[int]`):
  402. List of IDs.
  403. token_ids_1 (`List[int]`, *optional*):
  404. Optional second list of IDs for sequence pairs.
  405. already_has_special_tokens (`bool`, *optional*, defaults to `False`):
  406. Whether or not the token list is already formatted with special tokens for the model.
  407. Returns:
  408. `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
  409. """
  410. if already_has_special_tokens:
  411. return super().get_special_tokens_mask(
  412. token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
  413. )
  414. if token_ids_1 is not None:
  415. return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
  416. return [1] + ([0] * len(token_ids_0)) + [1]
  417. # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.create_token_type_ids_from_sequences
  418. def create_token_type_ids_from_sequences(
  419. self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
  420. ) -> List[int]:
  421. """
  422. Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLM sequence
  423. pair mask has the following format:
  424. ```
  425. 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
  426. | first sequence | second sequence |
  427. ```
  428. If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
  429. Args:
  430. token_ids_0 (`List[int]`):
  431. List of IDs.
  432. token_ids_1 (`List[int]`, *optional*):
  433. Optional second list of IDs for sequence pairs.
  434. Returns:
  435. `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
  436. """
  437. sep = [self.sep_token_id]
  438. cls = [self.cls_token_id]
  439. if token_ids_1 is None:
  440. return len(cls + token_ids_0 + sep) * [0]
  441. return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
  442. # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.save_vocabulary
  443. def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
  444. if not os.path.isdir(save_directory):
  445. logger.error(f"Vocabulary path ({save_directory}) should be a directory")
  446. return
  447. vocab_file = os.path.join(
  448. save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
  449. )
  450. merge_file = os.path.join(
  451. save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
  452. )
  453. with open(vocab_file, "w", encoding="utf-8") as f:
  454. f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
  455. index = 0
  456. with open(merge_file, "w", encoding="utf-8") as writer:
  457. for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
  458. if index != token_index:
  459. logger.warning(
  460. f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
  461. " Please check that the tokenizer is not corrupted!"
  462. )
  463. index = token_index
  464. writer.write(" ".join(bpe_tokens) + "\n")
  465. index += 1
  466. return vocab_file, merge_file
  467. # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.__getstate__
  468. def __getstate__(self):
  469. state = self.__dict__.copy()
  470. state["sm"] = None
  471. return state
  472. # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.__setstate__
  473. def __setstate__(self, d):
  474. self.__dict__ = d
  475. try:
  476. import sacremoses
  477. except ImportError:
  478. raise ImportError(
  479. "You need to install sacremoses to use XLMTokenizer. "
  480. "See https://pypi.org/project/sacremoses/ for installation."
  481. )
  482. self.sm = sacremoses