byte_level_bpe.py 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. from typing import Dict, Iterator, List, Optional, Tuple, Union
  2. from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, processors, trainers
  3. from tokenizers.models import BPE
  4. from tokenizers.normalizers import Lowercase, Sequence, unicode_normalizer_from_str
  5. from .base_tokenizer import BaseTokenizer
  6. class ByteLevelBPETokenizer(BaseTokenizer):
  7. """ByteLevelBPETokenizer
  8. Represents a Byte-level BPE as introduced by OpenAI with their GPT-2 model
  9. """
  10. def __init__(
  11. self,
  12. vocab: Optional[Union[str, Dict[str, int]]] = None,
  13. merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None,
  14. add_prefix_space: bool = False,
  15. lowercase: bool = False,
  16. dropout: Optional[float] = None,
  17. unicode_normalizer: Optional[str] = None,
  18. continuing_subword_prefix: Optional[str] = None,
  19. end_of_word_suffix: Optional[str] = None,
  20. trim_offsets: bool = False,
  21. ):
  22. if vocab is not None and merges is not None:
  23. tokenizer = Tokenizer(
  24. BPE(
  25. vocab,
  26. merges,
  27. dropout=dropout,
  28. continuing_subword_prefix=continuing_subword_prefix or "",
  29. end_of_word_suffix=end_of_word_suffix or "",
  30. )
  31. )
  32. else:
  33. tokenizer = Tokenizer(BPE())
  34. # Check for Unicode normalization first (before everything else)
  35. normalizers = []
  36. if unicode_normalizer:
  37. normalizers += [unicode_normalizer_from_str(unicode_normalizer)]
  38. if lowercase:
  39. normalizers += [Lowercase()]
  40. # Create the normalizer structure
  41. if len(normalizers) > 0:
  42. if len(normalizers) > 1:
  43. tokenizer.normalizer = Sequence(normalizers)
  44. else:
  45. tokenizer.normalizer = normalizers[0]
  46. tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=add_prefix_space)
  47. tokenizer.decoder = decoders.ByteLevel()
  48. tokenizer.post_processor = processors.ByteLevel(trim_offsets=trim_offsets)
  49. parameters = {
  50. "model": "ByteLevelBPE",
  51. "add_prefix_space": add_prefix_space,
  52. "lowercase": lowercase,
  53. "dropout": dropout,
  54. "unicode_normalizer": unicode_normalizer,
  55. "continuing_subword_prefix": continuing_subword_prefix,
  56. "end_of_word_suffix": end_of_word_suffix,
  57. "trim_offsets": trim_offsets,
  58. }
  59. super().__init__(tokenizer, parameters)
  60. @staticmethod
  61. def from_file(vocab_filename: str, merges_filename: str, **kwargs):
  62. vocab, merges = BPE.read_file(vocab_filename, merges_filename)
  63. return ByteLevelBPETokenizer(vocab, merges, **kwargs)
  64. def train(
  65. self,
  66. files: Union[str, List[str]],
  67. vocab_size: int = 30000,
  68. min_frequency: int = 2,
  69. show_progress: bool = True,
  70. special_tokens: List[Union[str, AddedToken]] = [],
  71. ):
  72. """Train the model using the given files"""
  73. trainer = trainers.BpeTrainer(
  74. vocab_size=vocab_size,
  75. min_frequency=min_frequency,
  76. show_progress=show_progress,
  77. special_tokens=special_tokens,
  78. initial_alphabet=pre_tokenizers.ByteLevel.alphabet(),
  79. )
  80. if isinstance(files, str):
  81. files = [files]
  82. self._tokenizer.train(files, trainer=trainer)
  83. def train_from_iterator(
  84. self,
  85. iterator: Union[Iterator[str], Iterator[Iterator[str]]],
  86. vocab_size: int = 30000,
  87. min_frequency: int = 2,
  88. show_progress: bool = True,
  89. special_tokens: List[Union[str, AddedToken]] = [],
  90. length: Optional[int] = None,
  91. ):
  92. """Train the model using the given iterator"""
  93. trainer = trainers.BpeTrainer(
  94. vocab_size=vocab_size,
  95. min_frequency=min_frequency,
  96. show_progress=show_progress,
  97. special_tokens=special_tokens,
  98. initial_alphabet=pre_tokenizers.ByteLevel.alphabet(),
  99. )
  100. self._tokenizer.train_from_iterator(
  101. iterator,
  102. trainer=trainer,
  103. length=length,
  104. )