base_tokenizer.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418
  1. from typing import Dict, List, Optional, Tuple, Union
  2. from tokenizers import AddedToken, EncodeInput, Encoding, InputSequence, Tokenizer
  3. from tokenizers.decoders import Decoder
  4. from tokenizers.models import Model
  5. from tokenizers.normalizers import Normalizer
  6. from tokenizers.pre_tokenizers import PreTokenizer
  7. from tokenizers.processors import PostProcessor
  8. Offsets = Tuple[int, int]
  9. class BaseTokenizer:
  10. def __init__(self, tokenizer: Tokenizer, parameters=None):
  11. self._tokenizer = tokenizer
  12. self._parameters = parameters if parameters is not None else {}
  13. def __repr__(self):
  14. return "Tokenizer(vocabulary_size={}, {})".format(
  15. self._tokenizer.get_vocab_size(),
  16. ", ".join(k + "=" + str(v) for k, v in self._parameters.items()),
  17. )
  18. def num_special_tokens_to_add(self, is_pair: bool) -> int:
  19. """
  20. Return the number of special tokens that would be added for single/pair sentences.
  21. :param is_pair: Boolean indicating if the input would be a single sentence or a pair
  22. :return:
  23. """
  24. return self._tokenizer.num_special_tokens_to_add(is_pair)
  25. def get_vocab(self, with_added_tokens: bool = True) -> Dict[str, int]:
  26. """Returns the vocabulary
  27. Args:
  28. with_added_tokens: boolean:
  29. Whether to include the added tokens in the vocabulary
  30. Returns:
  31. The vocabulary
  32. """
  33. return self._tokenizer.get_vocab(with_added_tokens=with_added_tokens)
  34. def get_added_tokens_decoder(self) -> Dict[int, AddedToken]:
  35. """Returns the added reverse vocabulary
  36. Returns:
  37. The added vocabulary mapping ints to AddedTokens
  38. """
  39. return self._tokenizer.get_added_tokens_decoder()
  40. def get_vocab_size(self, with_added_tokens: bool = True) -> int:
  41. """Return the size of vocabulary, with or without added tokens.
  42. Args:
  43. with_added_tokens: (`optional`) bool:
  44. Whether to count in added special tokens or not
  45. Returns:
  46. Size of vocabulary
  47. """
  48. return self._tokenizer.get_vocab_size(with_added_tokens=with_added_tokens)
  49. def enable_padding(
  50. self,
  51. direction: Optional[str] = "right",
  52. pad_to_multiple_of: Optional[int] = None,
  53. pad_id: Optional[int] = 0,
  54. pad_type_id: Optional[int] = 0,
  55. pad_token: Optional[str] = "[PAD]",
  56. length: Optional[int] = None,
  57. ):
  58. """Change the padding strategy
  59. Args:
  60. direction: (`optional`) str:
  61. Can be one of: `right` or `left`
  62. pad_to_multiple_of: (`optional`) unsigned int:
  63. If specified, the padding length should always snap to the next multiple of
  64. the given value. For example if we were going to pad with a length of 250 but
  65. `pad_to_multiple_of=8` then we will pad to 256.
  66. pad_id: (`optional`) unsigned int:
  67. The indice to be used when padding
  68. pad_type_id: (`optional`) unsigned int:
  69. The type indice to be used when padding
  70. pad_token: (`optional`) str:
  71. The pad token to be used when padding
  72. length: (`optional`) unsigned int:
  73. If specified, the length at which to pad. If not specified
  74. we pad using the size of the longest sequence in a batch
  75. """
  76. return self._tokenizer.enable_padding(
  77. direction=direction,
  78. pad_to_multiple_of=pad_to_multiple_of,
  79. pad_id=pad_id,
  80. pad_type_id=pad_type_id,
  81. pad_token=pad_token,
  82. length=length,
  83. )
  84. def no_padding(self):
  85. """Disable padding"""
  86. return self._tokenizer.no_padding()
  87. @property
  88. def padding(self) -> Optional[dict]:
  89. """Get the current padding parameters
  90. Returns:
  91. None if padding is disabled, a dict with the currently set parameters
  92. if the padding is enabled.
  93. """
  94. return self._tokenizer.padding
  95. def enable_truncation(self, max_length: int, stride: Optional[int] = 0, strategy: Optional[str] = "longest_first"):
  96. """Change the truncation options
  97. Args:
  98. max_length: unsigned int:
  99. The maximum length at which to truncate
  100. stride: (`optional`) unsigned int:
  101. The length of the previous first sequence to be included
  102. in the overflowing sequence
  103. strategy: (`optional`) str:
  104. Can be one of `longest_first`, `only_first` or `only_second`
  105. """
  106. return self._tokenizer.enable_truncation(max_length, stride=stride, strategy=strategy)
  107. def no_truncation(self):
  108. """Disable truncation"""
  109. return self._tokenizer.no_truncation()
  110. @property
  111. def truncation(self) -> Optional[dict]:
  112. """Get the current truncation parameters
  113. Returns:
  114. None if truncation is disabled, a dict with the current truncation parameters if
  115. truncation is enabled
  116. """
  117. return self._tokenizer.truncation
  118. def add_tokens(self, tokens: List[Union[str, AddedToken]]) -> int:
  119. """Add the given tokens to the vocabulary
  120. Args:
  121. tokens: List[Union[str, AddedToken]]:
  122. A list of tokens to add to the vocabulary. Each token can either be
  123. a string, or an instance of AddedToken
  124. Returns:
  125. The number of tokens that were added to the vocabulary
  126. """
  127. return self._tokenizer.add_tokens(tokens)
  128. def add_special_tokens(self, special_tokens: List[Union[str, AddedToken]]) -> int:
  129. """Add the given special tokens to the vocabulary, and treat them as special tokens.
  130. The special tokens will never be processed by the model, and will be
  131. removed while decoding.
  132. Args:
  133. tokens: List[Union[str, AddedToken]]:
  134. A list of special tokens to add to the vocabulary. Each token can either be
  135. a string, or an instance of AddedToken
  136. Returns:
  137. The number of tokens that were added to the vocabulary
  138. """
  139. return self._tokenizer.add_special_tokens(special_tokens)
  140. def normalize(self, sequence: str) -> str:
  141. """Normalize the given sequence
  142. Args:
  143. sequence: str:
  144. The sequence to normalize
  145. Returns:
  146. The normalized string
  147. """
  148. return self._tokenizer.normalize(sequence)
  149. def encode(
  150. self,
  151. sequence: InputSequence,
  152. pair: Optional[InputSequence] = None,
  153. is_pretokenized: bool = False,
  154. add_special_tokens: bool = True,
  155. ) -> Encoding:
  156. """Encode the given sequence and pair. This method can process raw text sequences as well
  157. as already pre-tokenized sequences.
  158. Args:
  159. sequence: InputSequence:
  160. The sequence we want to encode. This sequence can be either raw text or
  161. pre-tokenized, according to the `is_pretokenized` argument:
  162. - If `is_pretokenized=False`: `InputSequence` is expected to be `str`
  163. - If `is_pretokenized=True`: `InputSequence` is expected to be
  164. `Union[List[str], Tuple[str]]`
  165. is_pretokenized: bool:
  166. Whether the input is already pre-tokenized.
  167. add_special_tokens: bool:
  168. Whether to add the special tokens while encoding.
  169. Returns:
  170. An Encoding
  171. """
  172. if sequence is None:
  173. raise ValueError("encode: `sequence` can't be `None`")
  174. return self._tokenizer.encode(sequence, pair, is_pretokenized, add_special_tokens)
  175. def encode_batch(
  176. self,
  177. inputs: List[EncodeInput],
  178. is_pretokenized: bool = False,
  179. add_special_tokens: bool = True,
  180. ) -> List[Encoding]:
  181. """Encode the given inputs. This method accept both raw text sequences as well as already
  182. pre-tokenized sequences.
  183. Args:
  184. inputs: List[EncodeInput]:
  185. A list of single sequences or pair sequences to encode. Each `EncodeInput` is
  186. expected to be of the following form:
  187. `Union[InputSequence, Tuple[InputSequence, InputSequence]]`
  188. Each `InputSequence` can either be raw text or pre-tokenized,
  189. according to the `is_pretokenized` argument:
  190. - If `is_pretokenized=False`: `InputSequence` is expected to be `str`
  191. - If `is_pretokenized=True`: `InputSequence` is expected to be
  192. `Union[List[str], Tuple[str]]`
  193. is_pretokenized: bool:
  194. Whether the input is already pre-tokenized.
  195. add_special_tokens: bool:
  196. Whether to add the special tokens while encoding.
  197. Returns:
  198. A list of Encoding
  199. """
  200. if inputs is None:
  201. raise ValueError("encode_batch: `inputs` can't be `None`")
  202. return self._tokenizer.encode_batch(inputs, is_pretokenized, add_special_tokens)
  203. def decode(self, ids: List[int], skip_special_tokens: Optional[bool] = True) -> str:
  204. """Decode the given list of ids to a string sequence
  205. Args:
  206. ids: List[unsigned int]:
  207. A list of ids to be decoded
  208. skip_special_tokens: (`optional`) boolean:
  209. Whether to remove all the special tokens from the output string
  210. Returns:
  211. The decoded string
  212. """
  213. if ids is None:
  214. raise ValueError("None input is not valid. Should be a list of integers.")
  215. return self._tokenizer.decode(ids, skip_special_tokens=skip_special_tokens)
  216. def decode_batch(self, sequences: List[List[int]], skip_special_tokens: Optional[bool] = True) -> str:
  217. """Decode the list of sequences to a list of string sequences
  218. Args:
  219. sequences: List[List[unsigned int]]:
  220. A list of sequence of ids to be decoded
  221. skip_special_tokens: (`optional`) boolean:
  222. Whether to remove all the special tokens from the output strings
  223. Returns:
  224. A list of decoded strings
  225. """
  226. if sequences is None:
  227. raise ValueError("None input is not valid. Should be list of list of integers.")
  228. return self._tokenizer.decode_batch(sequences, skip_special_tokens=skip_special_tokens)
  229. def token_to_id(self, token: str) -> Optional[int]:
  230. """Convert the given token to its corresponding id
  231. Args:
  232. token: str:
  233. The token to convert
  234. Returns:
  235. The corresponding id if it exists, None otherwise
  236. """
  237. return self._tokenizer.token_to_id(token)
  238. def id_to_token(self, id: int) -> Optional[str]:
  239. """Convert the given token id to its corresponding string
  240. Args:
  241. token: id:
  242. The token id to convert
  243. Returns:
  244. The corresponding string if it exists, None otherwise
  245. """
  246. return self._tokenizer.id_to_token(id)
  247. def save_model(self, directory: str, prefix: Optional[str] = None):
  248. """Save the current model to the given directory
  249. Args:
  250. directory: str:
  251. A path to the destination directory
  252. prefix: (Optional) str:
  253. An optional prefix, used to prefix each file name
  254. """
  255. return self._tokenizer.model.save(directory, prefix=prefix)
  256. def save(self, path: str, pretty: bool = True):
  257. """Save the current Tokenizer at the given path
  258. Args:
  259. path: str:
  260. A path to the destination Tokenizer file
  261. """
  262. return self._tokenizer.save(path, pretty)
  263. def to_str(self, pretty: bool = False):
  264. """Get a serialized JSON version of the Tokenizer as a str
  265. Args:
  266. pretty: bool:
  267. Whether the JSON string should be prettified
  268. Returns:
  269. str
  270. """
  271. return self._tokenizer.to_str(pretty)
  272. def post_process(
  273. self, encoding: Encoding, pair: Optional[Encoding] = None, add_special_tokens: bool = True
  274. ) -> Encoding:
  275. """Apply all the post-processing steps to the given encodings.
  276. The various steps are:
  277. 1. Truncate according to global params (provided to `enable_truncation`)
  278. 2. Apply the PostProcessor
  279. 3. Pad according to global params. (provided to `enable_padding`)
  280. Args:
  281. encoding: Encoding:
  282. The main Encoding to post process
  283. pair: Optional[Encoding]:
  284. An optional pair Encoding
  285. add_special_tokens: bool:
  286. Whether to add special tokens
  287. Returns:
  288. The resulting Encoding
  289. """
  290. return self._tokenizer.post_process(encoding, pair, add_special_tokens)
  291. @property
  292. def model(self) -> Model:
  293. return self._tokenizer.model
  294. @model.setter
  295. def model(self, model: Model):
  296. self._tokenizer.model = model
  297. @property
  298. def normalizer(self) -> Normalizer:
  299. return self._tokenizer.normalizer
  300. @normalizer.setter
  301. def normalizer(self, normalizer: Normalizer):
  302. self._tokenizer.normalizer = normalizer
  303. @property
  304. def pre_tokenizer(self) -> PreTokenizer:
  305. return self._tokenizer.pre_tokenizer
  306. @pre_tokenizer.setter
  307. def pre_tokenizer(self, pre_tokenizer: PreTokenizer):
  308. self._tokenizer.pre_tokenizer = pre_tokenizer
  309. @property
  310. def post_processor(self) -> PostProcessor:
  311. return self._tokenizer.post_processor
  312. @post_processor.setter
  313. def post_processor(self, post_processor: PostProcessor):
  314. self._tokenizer.post_processor = post_processor
  315. @property
  316. def decoder(self) -> Decoder:
  317. return self._tokenizer.decoder
  318. @decoder.setter
  319. def decoder(self, decoder: Decoder):
  320. self._tokenizer.decoder = decoder