__init__.pyi 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610
  1. # Generated content DO NOT EDIT
  2. class PreTokenizer:
  3. """
  4. Base class for all pre-tokenizers
  5. This class is not supposed to be instantiated directly. Instead, any implementation of a
  6. PreTokenizer will return an instance of this class when instantiated.
  7. """
  8. def pre_tokenize(self, pretok):
  9. """
  10. Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
  11. This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
  12. keep track of the pre-tokenization, and leverage the capabilities of the
  13. :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
  14. the pre-tokenization of a raw string, you can use
  15. :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
  16. Args:
  17. pretok (:class:`~tokenizers.PreTokenizedString):
  18. The pre-tokenized string on which to apply this
  19. :class:`~tokenizers.pre_tokenizers.PreTokenizer`
  20. """
  21. pass
  22. def pre_tokenize_str(self, sequence):
  23. """
  24. Pre tokenize the given string
  25. This method provides a way to visualize the effect of a
  26. :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
  27. alignment, nor does it provide all the capabilities of the
  28. :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
  29. :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
  30. Args:
  31. sequence (:obj:`str`):
  32. A string to pre-tokeize
  33. Returns:
  34. :obj:`List[Tuple[str, Offsets]]`:
  35. A list of tuple with the pre-tokenized parts and their offsets
  36. """
  37. pass
  38. class BertPreTokenizer(PreTokenizer):
  39. """
  40. BertPreTokenizer
  41. This pre-tokenizer splits tokens on spaces, and also on punctuation.
  42. Each occurence of a punctuation character will be treated separately.
  43. """
  44. def __init__(self):
  45. pass
  46. def pre_tokenize(self, pretok):
  47. """
  48. Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
  49. This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
  50. keep track of the pre-tokenization, and leverage the capabilities of the
  51. :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
  52. the pre-tokenization of a raw string, you can use
  53. :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
  54. Args:
  55. pretok (:class:`~tokenizers.PreTokenizedString):
  56. The pre-tokenized string on which to apply this
  57. :class:`~tokenizers.pre_tokenizers.PreTokenizer`
  58. """
  59. pass
  60. def pre_tokenize_str(self, sequence):
  61. """
  62. Pre tokenize the given string
  63. This method provides a way to visualize the effect of a
  64. :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
  65. alignment, nor does it provide all the capabilities of the
  66. :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
  67. :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
  68. Args:
  69. sequence (:obj:`str`):
  70. A string to pre-tokeize
  71. Returns:
  72. :obj:`List[Tuple[str, Offsets]]`:
  73. A list of tuple with the pre-tokenized parts and their offsets
  74. """
  75. pass
  76. class ByteLevel(PreTokenizer):
  77. """
  78. ByteLevel PreTokenizer
  79. This pre-tokenizer takes care of replacing all bytes of the given string
  80. with a corresponding representation, as well as splitting into words.
  81. Args:
  82. add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`):
  83. Whether to add a space to the first word if there isn't already one. This
  84. lets us treat `hello` exactly like `say hello`.
  85. use_regex (:obj:`bool`, `optional`, defaults to :obj:`True`):
  86. Set this to :obj:`False` to prevent this `pre_tokenizer` from using
  87. the GPT2 specific regexp for spliting on whitespace.
  88. """
  89. def __init__(self, add_prefix_space=True, use_regex=True):
  90. pass
  91. @staticmethod
  92. def alphabet():
  93. """
  94. Returns the alphabet used by this PreTokenizer.
  95. Since the ByteLevel works as its name suggests, at the byte level, it
  96. encodes each byte value to a unique visible character. This means that there is a
  97. total of 256 different characters composing this alphabet.
  98. Returns:
  99. :obj:`List[str]`: A list of characters that compose the alphabet
  100. """
  101. pass
  102. def pre_tokenize(self, pretok):
  103. """
  104. Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
  105. This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
  106. keep track of the pre-tokenization, and leverage the capabilities of the
  107. :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
  108. the pre-tokenization of a raw string, you can use
  109. :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
  110. Args:
  111. pretok (:class:`~tokenizers.PreTokenizedString):
  112. The pre-tokenized string on which to apply this
  113. :class:`~tokenizers.pre_tokenizers.PreTokenizer`
  114. """
  115. pass
  116. def pre_tokenize_str(self, sequence):
  117. """
  118. Pre tokenize the given string
  119. This method provides a way to visualize the effect of a
  120. :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
  121. alignment, nor does it provide all the capabilities of the
  122. :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
  123. :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
  124. Args:
  125. sequence (:obj:`str`):
  126. A string to pre-tokeize
  127. Returns:
  128. :obj:`List[Tuple[str, Offsets]]`:
  129. A list of tuple with the pre-tokenized parts and their offsets
  130. """
  131. pass
  132. class CharDelimiterSplit(PreTokenizer):
  133. """
  134. This pre-tokenizer simply splits on the provided char. Works like `.split(delimiter)`
  135. Args:
  136. delimiter: str:
  137. The delimiter char that will be used to split input
  138. """
  139. def pre_tokenize(self, pretok):
  140. """
  141. Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
  142. This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
  143. keep track of the pre-tokenization, and leverage the capabilities of the
  144. :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
  145. the pre-tokenization of a raw string, you can use
  146. :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
  147. Args:
  148. pretok (:class:`~tokenizers.PreTokenizedString):
  149. The pre-tokenized string on which to apply this
  150. :class:`~tokenizers.pre_tokenizers.PreTokenizer`
  151. """
  152. pass
  153. def pre_tokenize_str(self, sequence):
  154. """
  155. Pre tokenize the given string
  156. This method provides a way to visualize the effect of a
  157. :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
  158. alignment, nor does it provide all the capabilities of the
  159. :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
  160. :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
  161. Args:
  162. sequence (:obj:`str`):
  163. A string to pre-tokeize
  164. Returns:
  165. :obj:`List[Tuple[str, Offsets]]`:
  166. A list of tuple with the pre-tokenized parts and their offsets
  167. """
  168. pass
  169. class Digits(PreTokenizer):
  170. """
  171. This pre-tokenizer simply splits using the digits in separate tokens
  172. Args:
  173. individual_digits (:obj:`bool`, `optional`, defaults to :obj:`False`):
  174. If set to True, digits will each be separated as follows::
  175. "Call 123 please" -> "Call ", "1", "2", "3", " please"
  176. If set to False, digits will grouped as follows::
  177. "Call 123 please" -> "Call ", "123", " please"
  178. """
  179. def __init__(self, individual_digits=False):
  180. pass
  181. def pre_tokenize(self, pretok):
  182. """
  183. Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
  184. This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
  185. keep track of the pre-tokenization, and leverage the capabilities of the
  186. :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
  187. the pre-tokenization of a raw string, you can use
  188. :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
  189. Args:
  190. pretok (:class:`~tokenizers.PreTokenizedString):
  191. The pre-tokenized string on which to apply this
  192. :class:`~tokenizers.pre_tokenizers.PreTokenizer`
  193. """
  194. pass
  195. def pre_tokenize_str(self, sequence):
  196. """
  197. Pre tokenize the given string
  198. This method provides a way to visualize the effect of a
  199. :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
  200. alignment, nor does it provide all the capabilities of the
  201. :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
  202. :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
  203. Args:
  204. sequence (:obj:`str`):
  205. A string to pre-tokeize
  206. Returns:
  207. :obj:`List[Tuple[str, Offsets]]`:
  208. A list of tuple with the pre-tokenized parts and their offsets
  209. """
  210. pass
  211. class Metaspace(PreTokenizer):
  212. """
  213. Metaspace pre-tokenizer
  214. This pre-tokenizer replaces any whitespace by the provided replacement character.
  215. It then tries to split on these spaces.
  216. Args:
  217. replacement (:obj:`str`, `optional`, defaults to :obj:`▁`):
  218. The replacement character. Must be exactly one character. By default we
  219. use the `▁` (U+2581) meta symbol (Same as in SentencePiece).
  220. prepend_scheme (:obj:`str`, `optional`, defaults to :obj:`"always"`):
  221. Whether to add a space to the first word if there isn't already one. This
  222. lets us treat `hello` exactly like `say hello`.
  223. Choices: "always", "never", "first". First means the space is only added on the first
  224. token (relevant when special tokens are used or other pre_tokenizer are used).
  225. """
  226. def __init__(self, replacement="_", prepend_scheme="always", split=True):
  227. pass
  228. def pre_tokenize(self, pretok):
  229. """
  230. Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
  231. This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
  232. keep track of the pre-tokenization, and leverage the capabilities of the
  233. :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
  234. the pre-tokenization of a raw string, you can use
  235. :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
  236. Args:
  237. pretok (:class:`~tokenizers.PreTokenizedString):
  238. The pre-tokenized string on which to apply this
  239. :class:`~tokenizers.pre_tokenizers.PreTokenizer`
  240. """
  241. pass
  242. def pre_tokenize_str(self, sequence):
  243. """
  244. Pre tokenize the given string
  245. This method provides a way to visualize the effect of a
  246. :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
  247. alignment, nor does it provide all the capabilities of the
  248. :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
  249. :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
  250. Args:
  251. sequence (:obj:`str`):
  252. A string to pre-tokeize
  253. Returns:
  254. :obj:`List[Tuple[str, Offsets]]`:
  255. A list of tuple with the pre-tokenized parts and their offsets
  256. """
  257. pass
  258. class Punctuation(PreTokenizer):
  259. """
  260. This pre-tokenizer simply splits on punctuation as individual characters.
  261. Args:
  262. behavior (:class:`~tokenizers.SplitDelimiterBehavior`):
  263. The behavior to use when splitting.
  264. Choices: "removed", "isolated" (default), "merged_with_previous", "merged_with_next",
  265. "contiguous"
  266. """
  267. def __init__(self, behavior="isolated"):
  268. pass
  269. def pre_tokenize(self, pretok):
  270. """
  271. Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
  272. This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
  273. keep track of the pre-tokenization, and leverage the capabilities of the
  274. :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
  275. the pre-tokenization of a raw string, you can use
  276. :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
  277. Args:
  278. pretok (:class:`~tokenizers.PreTokenizedString):
  279. The pre-tokenized string on which to apply this
  280. :class:`~tokenizers.pre_tokenizers.PreTokenizer`
  281. """
  282. pass
  283. def pre_tokenize_str(self, sequence):
  284. """
  285. Pre tokenize the given string
  286. This method provides a way to visualize the effect of a
  287. :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
  288. alignment, nor does it provide all the capabilities of the
  289. :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
  290. :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
  291. Args:
  292. sequence (:obj:`str`):
  293. A string to pre-tokeize
  294. Returns:
  295. :obj:`List[Tuple[str, Offsets]]`:
  296. A list of tuple with the pre-tokenized parts and their offsets
  297. """
  298. pass
  299. class Sequence(PreTokenizer):
  300. """
  301. This pre-tokenizer composes other pre_tokenizers and applies them in sequence
  302. """
  303. def __init__(self, pretokenizers):
  304. pass
  305. def pre_tokenize(self, pretok):
  306. """
  307. Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
  308. This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
  309. keep track of the pre-tokenization, and leverage the capabilities of the
  310. :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
  311. the pre-tokenization of a raw string, you can use
  312. :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
  313. Args:
  314. pretok (:class:`~tokenizers.PreTokenizedString):
  315. The pre-tokenized string on which to apply this
  316. :class:`~tokenizers.pre_tokenizers.PreTokenizer`
  317. """
  318. pass
  319. def pre_tokenize_str(self, sequence):
  320. """
  321. Pre tokenize the given string
  322. This method provides a way to visualize the effect of a
  323. :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
  324. alignment, nor does it provide all the capabilities of the
  325. :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
  326. :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
  327. Args:
  328. sequence (:obj:`str`):
  329. A string to pre-tokeize
  330. Returns:
  331. :obj:`List[Tuple[str, Offsets]]`:
  332. A list of tuple with the pre-tokenized parts and their offsets
  333. """
  334. pass
  335. class Split(PreTokenizer):
  336. """
  337. Split PreTokenizer
  338. This versatile pre-tokenizer splits using the provided pattern and
  339. according to the provided behavior. The pattern can be inverted by
  340. making use of the invert flag.
  341. Args:
  342. pattern (:obj:`str` or :class:`~tokenizers.Regex`):
  343. A pattern used to split the string. Usually a string or a regex built with `tokenizers.Regex`.
  344. If you want to use a regex pattern, it has to be wrapped around a `tokenizer.Regex`,
  345. otherwise we consider is as a string pattern. For example `pattern="|"`
  346. means you want to split on `|` (imagine a csv file for example), while
  347. `patter=tokenizer.Regex("1|2")` means you split on either '1' or '2'.
  348. behavior (:class:`~tokenizers.SplitDelimiterBehavior`):
  349. The behavior to use when splitting.
  350. Choices: "removed", "isolated", "merged_with_previous", "merged_with_next",
  351. "contiguous"
  352. invert (:obj:`bool`, `optional`, defaults to :obj:`False`):
  353. Whether to invert the pattern.
  354. """
  355. def __init__(self, pattern, behavior, invert=False):
  356. pass
  357. def pre_tokenize(self, pretok):
  358. """
  359. Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
  360. This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
  361. keep track of the pre-tokenization, and leverage the capabilities of the
  362. :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
  363. the pre-tokenization of a raw string, you can use
  364. :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
  365. Args:
  366. pretok (:class:`~tokenizers.PreTokenizedString):
  367. The pre-tokenized string on which to apply this
  368. :class:`~tokenizers.pre_tokenizers.PreTokenizer`
  369. """
  370. pass
  371. def pre_tokenize_str(self, sequence):
  372. """
  373. Pre tokenize the given string
  374. This method provides a way to visualize the effect of a
  375. :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
  376. alignment, nor does it provide all the capabilities of the
  377. :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
  378. :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
  379. Args:
  380. sequence (:obj:`str`):
  381. A string to pre-tokeize
  382. Returns:
  383. :obj:`List[Tuple[str, Offsets]]`:
  384. A list of tuple with the pre-tokenized parts and their offsets
  385. """
  386. pass
  387. class UnicodeScripts(PreTokenizer):
  388. """
  389. This pre-tokenizer splits on characters that belong to different language family
  390. It roughly follows https://github.com/google/sentencepiece/blob/master/data/Scripts.txt
  391. Actually Hiragana and Katakana are fused with Han, and 0x30FC is Han too.
  392. This mimicks SentencePiece Unigram implementation.
  393. """
  394. def __init__(self):
  395. pass
  396. def pre_tokenize(self, pretok):
  397. """
  398. Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
  399. This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
  400. keep track of the pre-tokenization, and leverage the capabilities of the
  401. :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
  402. the pre-tokenization of a raw string, you can use
  403. :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
  404. Args:
  405. pretok (:class:`~tokenizers.PreTokenizedString):
  406. The pre-tokenized string on which to apply this
  407. :class:`~tokenizers.pre_tokenizers.PreTokenizer`
  408. """
  409. pass
  410. def pre_tokenize_str(self, sequence):
  411. """
  412. Pre tokenize the given string
  413. This method provides a way to visualize the effect of a
  414. :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
  415. alignment, nor does it provide all the capabilities of the
  416. :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
  417. :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
  418. Args:
  419. sequence (:obj:`str`):
  420. A string to pre-tokeize
  421. Returns:
  422. :obj:`List[Tuple[str, Offsets]]`:
  423. A list of tuple with the pre-tokenized parts and their offsets
  424. """
  425. pass
  426. class Whitespace(PreTokenizer):
  427. """
  428. This pre-tokenizer simply splits using the following regex: `\w+|[^\w\s]+`
  429. """
  430. def __init__(self):
  431. pass
  432. def pre_tokenize(self, pretok):
  433. """
  434. Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
  435. This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
  436. keep track of the pre-tokenization, and leverage the capabilities of the
  437. :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
  438. the pre-tokenization of a raw string, you can use
  439. :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
  440. Args:
  441. pretok (:class:`~tokenizers.PreTokenizedString):
  442. The pre-tokenized string on which to apply this
  443. :class:`~tokenizers.pre_tokenizers.PreTokenizer`
  444. """
  445. pass
  446. def pre_tokenize_str(self, sequence):
  447. """
  448. Pre tokenize the given string
  449. This method provides a way to visualize the effect of a
  450. :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
  451. alignment, nor does it provide all the capabilities of the
  452. :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
  453. :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
  454. Args:
  455. sequence (:obj:`str`):
  456. A string to pre-tokeize
  457. Returns:
  458. :obj:`List[Tuple[str, Offsets]]`:
  459. A list of tuple with the pre-tokenized parts and their offsets
  460. """
  461. pass
  462. class WhitespaceSplit(PreTokenizer):
  463. """
  464. This pre-tokenizer simply splits on the whitespace. Works like `.split()`
  465. """
  466. def __init__(self):
  467. pass
  468. def pre_tokenize(self, pretok):
  469. """
  470. Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
  471. This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
  472. keep track of the pre-tokenization, and leverage the capabilities of the
  473. :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
  474. the pre-tokenization of a raw string, you can use
  475. :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
  476. Args:
  477. pretok (:class:`~tokenizers.PreTokenizedString):
  478. The pre-tokenized string on which to apply this
  479. :class:`~tokenizers.pre_tokenizers.PreTokenizer`
  480. """
  481. pass
  482. def pre_tokenize_str(self, sequence):
  483. """
  484. Pre tokenize the given string
  485. This method provides a way to visualize the effect of a
  486. :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
  487. alignment, nor does it provide all the capabilities of the
  488. :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
  489. :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
  490. Args:
  491. sequence (:obj:`str`):
  492. A string to pre-tokeize
  493. Returns:
  494. :obj:`List[Tuple[str, Offsets]]`:
  495. A list of tuple with the pre-tokenized parts and their offsets
  496. """
  497. pass