__init__.pyi 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271
  1. # Generated content DO NOT EDIT
  2. class Decoder:
  3. """
  4. Base class for all decoders
  5. This class is not supposed to be instantiated directly. Instead, any implementation of
  6. a Decoder will return an instance of this class when instantiated.
  7. """
  8. def decode(self, tokens):
  9. """
  10. Decode the given list of tokens to a final string
  11. Args:
  12. tokens (:obj:`List[str]`):
  13. The list of tokens to decode
  14. Returns:
  15. :obj:`str`: The decoded string
  16. """
  17. pass
  18. class BPEDecoder(Decoder):
  19. """
  20. BPEDecoder Decoder
  21. Args:
  22. suffix (:obj:`str`, `optional`, defaults to :obj:`</w>`):
  23. The suffix that was used to caracterize an end-of-word. This suffix will
  24. be replaced by whitespaces during the decoding
  25. """
  26. def __init__(self, suffix="</w>"):
  27. pass
  28. def decode(self, tokens):
  29. """
  30. Decode the given list of tokens to a final string
  31. Args:
  32. tokens (:obj:`List[str]`):
  33. The list of tokens to decode
  34. Returns:
  35. :obj:`str`: The decoded string
  36. """
  37. pass
  38. class ByteFallback(Decoder):
  39. """
  40. ByteFallback Decoder
  41. ByteFallback is a simple trick which converts tokens looking like `<0x61>`
  42. to pure bytes, and attempts to make them into a string. If the tokens
  43. cannot be decoded you will get � instead for each inconvertable byte token
  44. """
  45. def __init__(self):
  46. pass
  47. def decode(self, tokens):
  48. """
  49. Decode the given list of tokens to a final string
  50. Args:
  51. tokens (:obj:`List[str]`):
  52. The list of tokens to decode
  53. Returns:
  54. :obj:`str`: The decoded string
  55. """
  56. pass
  57. class ByteLevel(Decoder):
  58. """
  59. ByteLevel Decoder
  60. This decoder is to be used in tandem with the :class:`~tokenizers.pre_tokenizers.ByteLevel`
  61. :class:`~tokenizers.pre_tokenizers.PreTokenizer`.
  62. """
  63. def __init__(self):
  64. pass
  65. def decode(self, tokens):
  66. """
  67. Decode the given list of tokens to a final string
  68. Args:
  69. tokens (:obj:`List[str]`):
  70. The list of tokens to decode
  71. Returns:
  72. :obj:`str`: The decoded string
  73. """
  74. pass
  75. class CTC(Decoder):
  76. """
  77. CTC Decoder
  78. Args:
  79. pad_token (:obj:`str`, `optional`, defaults to :obj:`<pad>`):
  80. The pad token used by CTC to delimit a new token.
  81. word_delimiter_token (:obj:`str`, `optional`, defaults to :obj:`|`):
  82. The word delimiter token. It will be replaced by a <space>
  83. cleanup (:obj:`bool`, `optional`, defaults to :obj:`True`):
  84. Whether to cleanup some tokenization artifacts.
  85. Mainly spaces before punctuation, and some abbreviated english forms.
  86. """
  87. def __init__(self, pad_token="<pad>", word_delimiter_token="|", cleanup=True):
  88. pass
  89. def decode(self, tokens):
  90. """
  91. Decode the given list of tokens to a final string
  92. Args:
  93. tokens (:obj:`List[str]`):
  94. The list of tokens to decode
  95. Returns:
  96. :obj:`str`: The decoded string
  97. """
  98. pass
  99. class Fuse(Decoder):
  100. """
  101. Fuse Decoder
  102. Fuse simply fuses every token into a single string.
  103. This is the last step of decoding, this decoder exists only if
  104. there is need to add other decoders *after* the fusion
  105. """
  106. def __init__(self):
  107. pass
  108. def decode(self, tokens):
  109. """
  110. Decode the given list of tokens to a final string
  111. Args:
  112. tokens (:obj:`List[str]`):
  113. The list of tokens to decode
  114. Returns:
  115. :obj:`str`: The decoded string
  116. """
  117. pass
  118. class Metaspace(Decoder):
  119. """
  120. Metaspace Decoder
  121. Args:
  122. replacement (:obj:`str`, `optional`, defaults to :obj:`▁`):
  123. The replacement character. Must be exactly one character. By default we
  124. use the `▁` (U+2581) meta symbol (Same as in SentencePiece).
  125. prepend_scheme (:obj:`str`, `optional`, defaults to :obj:`"always"`):
  126. Whether to add a space to the first word if there isn't already one. This
  127. lets us treat `hello` exactly like `say hello`.
  128. Choices: "always", "never", "first". First means the space is only added on the first
  129. token (relevant when special tokens are used or other pre_tokenizer are used).
  130. """
  131. def __init__(self, replacement="▁", prepend_scheme="always", split=True):
  132. pass
  133. def decode(self, tokens):
  134. """
  135. Decode the given list of tokens to a final string
  136. Args:
  137. tokens (:obj:`List[str]`):
  138. The list of tokens to decode
  139. Returns:
  140. :obj:`str`: The decoded string
  141. """
  142. pass
  143. class Replace(Decoder):
  144. """
  145. Replace Decoder
  146. This decoder is to be used in tandem with the :class:`~tokenizers.pre_tokenizers.Replace`
  147. :class:`~tokenizers.pre_tokenizers.PreTokenizer`.
  148. """
  149. def __init__(self, pattern, content):
  150. pass
  151. def decode(self, tokens):
  152. """
  153. Decode the given list of tokens to a final string
  154. Args:
  155. tokens (:obj:`List[str]`):
  156. The list of tokens to decode
  157. Returns:
  158. :obj:`str`: The decoded string
  159. """
  160. pass
  161. class Sequence(Decoder):
  162. """
  163. Sequence Decoder
  164. Args:
  165. decoders (:obj:`List[Decoder]`)
  166. The decoders that need to be chained
  167. """
  168. def __init__(self, decoders):
  169. pass
  170. def decode(self, tokens):
  171. """
  172. Decode the given list of tokens to a final string
  173. Args:
  174. tokens (:obj:`List[str]`):
  175. The list of tokens to decode
  176. Returns:
  177. :obj:`str`: The decoded string
  178. """
  179. pass
  180. class Strip(Decoder):
  181. """
  182. Strip normalizer
  183. Strips n left characters of each token, or n right characters of each token
  184. """
  185. def __init__(self, content, left=0, right=0):
  186. pass
  187. def decode(self, tokens):
  188. """
  189. Decode the given list of tokens to a final string
  190. Args:
  191. tokens (:obj:`List[str]`):
  192. The list of tokens to decode
  193. Returns:
  194. :obj:`str`: The decoded string
  195. """
  196. pass
  197. class WordPiece(Decoder):
  198. """
  199. WordPiece Decoder
  200. Args:
  201. prefix (:obj:`str`, `optional`, defaults to :obj:`##`):
  202. The prefix to use for subwords that are not a beginning-of-word
  203. cleanup (:obj:`bool`, `optional`, defaults to :obj:`True`):
  204. Whether to cleanup some tokenization artifacts. Mainly spaces before punctuation,
  205. and some abbreviated english forms.
  206. """
  207. def __init__(self, prefix="##", cleanup=True):
  208. pass
  209. def decode(self, tokens):
  210. """
  211. Decode the given list of tokens to a final string
  212. Args:
  213. tokens (:obj:`List[str]`):
  214. The list of tokens to decode
  215. Returns:
  216. :obj:`str`: The decoded string
  217. """
  218. pass