tokenization_utils_base.py 206 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310
  1. # coding=utf-8
  2. # Copyright 2020 The HuggingFace Inc. team.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """
  16. Base classes common to both the slow and the fast tokenization classes: PreTrainedTokenizerBase (host all the user
  17. fronting encoding methods) Special token mixing (host the special tokens logic) and BatchEncoding (wrap the dictionary
  18. of output with special method for the Fast tokenizers)
  19. """
  20. import copy
  21. import json
  22. import os
  23. import re
  24. import warnings
  25. from collections import UserDict
  26. from collections.abc import Mapping, Sized
  27. from contextlib import contextmanager
  28. from dataclasses import dataclass
  29. from inspect import isfunction
  30. from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Sequence, Tuple, Union
  31. import numpy as np
  32. from packaging import version
  33. from . import __version__
  34. from .dynamic_module_utils import custom_object_save
  35. from .utils import (
  36. ExplicitEnum,
  37. PaddingStrategy,
  38. PushToHubMixin,
  39. TensorType,
  40. add_end_docstrings,
  41. add_model_info_to_auto_map,
  42. add_model_info_to_custom_pipelines,
  43. cached_file,
  44. copy_func,
  45. download_url,
  46. extract_commit_hash,
  47. get_json_schema,
  48. is_flax_available,
  49. is_jax_tensor,
  50. is_mlx_available,
  51. is_numpy_array,
  52. is_offline_mode,
  53. is_protobuf_available,
  54. is_remote_url,
  55. is_tf_available,
  56. is_tf_tensor,
  57. is_tokenizers_available,
  58. is_torch_available,
  59. is_torch_device,
  60. is_torch_tensor,
  61. logging,
  62. requires_backends,
  63. to_py_obj,
  64. )
  65. from .utils.chat_template_utils import _compile_jinja_template, _render_with_assistant_indices
  66. from .utils.import_utils import PROTOBUF_IMPORT_ERROR
  67. if TYPE_CHECKING:
  68. if is_torch_available():
  69. import torch
  70. if is_tf_available():
  71. import tensorflow as tf
  72. if is_flax_available():
  73. import jax.numpy as jnp # noqa: F401
  74. def import_protobuf_decode_error(error_message=""):
  75. if is_protobuf_available():
  76. from google.protobuf.message import DecodeError
  77. return DecodeError
  78. else:
  79. raise ImportError(PROTOBUF_IMPORT_ERROR.format(error_message))
  80. if is_tokenizers_available():
  81. from tokenizers import AddedToken
  82. from tokenizers import Encoding as EncodingFast
  83. else:
  84. @dataclass(frozen=False, eq=True)
  85. class AddedToken:
  86. """
  87. AddedToken represents a token to be added to a Tokenizer An AddedToken can have special options defining the
  88. way it should behave.
  89. The `normalized` will default to `not special` if it is not specified, similarly to the definition in
  90. `tokenizers`.
  91. """
  92. def __init__(
  93. self, content: str, single_word=False, lstrip=False, rstrip=False, special=False, normalized=None
  94. ):
  95. self.content = content
  96. self.single_word = single_word
  97. self.lstrip = lstrip
  98. self.rstrip = rstrip
  99. self.special = special
  100. self.normalized = normalized if normalized is not None else not special
  101. def __getstate__(self):
  102. return self.__dict__
  103. def __str__(self):
  104. return self.content
  105. @dataclass
  106. class EncodingFast:
  107. """This is dummy class because without the `tokenizers` library we don't have these objects anyway"""
  108. pass
  109. logger = logging.get_logger(__name__)
  110. VERY_LARGE_INTEGER = int(1e30) # This is used to set the max input length for a model with infinite size input
  111. LARGE_INTEGER = int(1e20) # This is used when we need something big but slightly smaller than VERY_LARGE_INTEGER
  112. # Define type aliases and NamedTuples
  113. TextInput = str
  114. PreTokenizedInput = List[str]
  115. EncodedInput = List[int]
  116. TextInputPair = Tuple[str, str]
  117. PreTokenizedInputPair = Tuple[List[str], List[str]]
  118. EncodedInputPair = Tuple[List[int], List[int]]
  119. # Define type aliases for text-related non-text modalities
  120. AudioInput = Union["np.ndarray", "torch.Tensor", List["np.ndarray"], List["torch.Tensor"]]
  121. # Slow tokenizers used to be saved in three separated files
  122. SPECIAL_TOKENS_MAP_FILE = "special_tokens_map.json"
  123. ADDED_TOKENS_FILE = "added_tokens.json"
  124. TOKENIZER_CONFIG_FILE = "tokenizer_config.json"
  125. # Fast tokenizers (provided by HuggingFace tokenizer's library) can be saved in a single file
  126. FULL_TOKENIZER_FILE = "tokenizer.json"
  127. _re_tokenizer_file = re.compile(r"tokenizer\.(.*)\.json")
  128. class TruncationStrategy(ExplicitEnum):
  129. """
  130. Possible values for the `truncation` argument in [`PreTrainedTokenizerBase.__call__`]. Useful for tab-completion in
  131. an IDE.
  132. """
  133. ONLY_FIRST = "only_first"
  134. ONLY_SECOND = "only_second"
  135. LONGEST_FIRST = "longest_first"
  136. DO_NOT_TRUNCATE = "do_not_truncate"
  137. class CharSpan(NamedTuple):
  138. """
  139. Character span in the original string.
  140. Args:
  141. start (`int`): Index of the first character in the original string.
  142. end (`int`): Index of the character following the last character in the original string.
  143. """
  144. start: int
  145. end: int
  146. class TokenSpan(NamedTuple):
  147. """
  148. Token span in an encoded string (list of tokens).
  149. Args:
  150. start (`int`): Index of the first token in the span.
  151. end (`int`): Index of the token following the last token in the span.
  152. """
  153. start: int
  154. end: int
  155. class BatchEncoding(UserDict):
  156. """
  157. Holds the output of the [`~tokenization_utils_base.PreTrainedTokenizerBase.__call__`],
  158. [`~tokenization_utils_base.PreTrainedTokenizerBase.encode_plus`] and
  159. [`~tokenization_utils_base.PreTrainedTokenizerBase.batch_encode_plus`] methods (tokens, attention_masks, etc).
  160. This class is derived from a python dictionary and can be used as a dictionary. In addition, this class exposes
  161. utility methods to map from word/character space to token space.
  162. Args:
  163. data (`dict`, *optional*):
  164. Dictionary of lists/arrays/tensors returned by the `__call__`/`encode_plus`/`batch_encode_plus` methods
  165. ('input_ids', 'attention_mask', etc.).
  166. encoding (`tokenizers.Encoding` or `Sequence[tokenizers.Encoding]`, *optional*):
  167. If the tokenizer is a fast tokenizer which outputs additional information like mapping from word/character
  168. space to token space the `tokenizers.Encoding` instance or list of instance (for batches) hold this
  169. information.
  170. tensor_type (`Union[None, str, TensorType]`, *optional*):
  171. You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at
  172. initialization.
  173. prepend_batch_axis (`bool`, *optional*, defaults to `False`):
  174. Whether or not to add a batch axis when converting to tensors (see `tensor_type` above). Note that this
  175. parameter has an effect if the parameter `tensor_type` is set, *otherwise has no effect*.
  176. n_sequences (`Optional[int]`, *optional*):
  177. You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at
  178. initialization.
  179. """
  180. def __init__(
  181. self,
  182. data: Optional[Dict[str, Any]] = None,
  183. encoding: Optional[Union[EncodingFast, Sequence[EncodingFast]]] = None,
  184. tensor_type: Union[None, str, TensorType] = None,
  185. prepend_batch_axis: bool = False,
  186. n_sequences: Optional[int] = None,
  187. ):
  188. super().__init__(data)
  189. if isinstance(encoding, EncodingFast):
  190. encoding = [encoding]
  191. self._encodings = encoding
  192. if n_sequences is None and encoding is not None and len(encoding):
  193. n_sequences = encoding[0].n_sequences
  194. self._n_sequences = n_sequences
  195. self.convert_to_tensors(tensor_type=tensor_type, prepend_batch_axis=prepend_batch_axis)
  196. @property
  197. def n_sequences(self) -> Optional[int]:
  198. """
  199. `Optional[int]`: The number of sequences used to generate each sample from the batch encoded in this
  200. [`BatchEncoding`]. Currently can be one of `None` (unknown), `1` (a single sentence) or `2` (a pair of
  201. sentences)
  202. """
  203. return self._n_sequences
  204. @property
  205. def is_fast(self) -> bool:
  206. """
  207. `bool`: Indicate whether this [`BatchEncoding`] was generated from the result of a [`PreTrainedTokenizerFast`]
  208. or not.
  209. """
  210. return self._encodings is not None
  211. def __getitem__(self, item: Union[int, str]) -> Union[Any, EncodingFast]:
  212. """
  213. If the key is a string, returns the value of the dict associated to `key` ('input_ids', 'attention_mask',
  214. etc.).
  215. If the key is an integer, get the `tokenizers.Encoding` for batch item with index `key`.
  216. If the key is a slice, returns the value of the dict associated to `key` ('input_ids', 'attention_mask', etc.)
  217. with the constraint of slice.
  218. """
  219. if isinstance(item, str):
  220. return self.data[item]
  221. elif self._encodings is not None:
  222. return self._encodings[item]
  223. elif isinstance(item, slice):
  224. return {key: self.data[key][item] for key in self.data.keys()}
  225. else:
  226. raise KeyError(
  227. "Invalid key. Only three types of key are available: "
  228. "(1) string, (2) integers for backend Encoding, and (3) slices for data subsetting."
  229. )
  230. def __getattr__(self, item: str):
  231. try:
  232. return self.data[item]
  233. except KeyError:
  234. raise AttributeError
  235. def __getstate__(self):
  236. return {"data": self.data, "encodings": self._encodings}
  237. def __setstate__(self, state):
  238. if "data" in state:
  239. self.data = state["data"]
  240. if "encodings" in state:
  241. self._encodings = state["encodings"]
  242. def keys(self):
  243. return self.data.keys()
  244. def values(self):
  245. return self.data.values()
  246. def items(self):
  247. return self.data.items()
  248. # After this point:
  249. # Extended properties and methods only available for fast (Rust-based) tokenizers
  250. # provided by HuggingFace tokenizers library.
  251. @property
  252. def encodings(self) -> Optional[List[EncodingFast]]:
  253. """
  254. `Optional[List[tokenizers.Encoding]]`: The list all encodings from the tokenization process. Returns `None` if
  255. the input was tokenized through Python (i.e., not a fast) tokenizer.
  256. """
  257. return self._encodings
  258. def tokens(self, batch_index: int = 0) -> List[str]:
  259. """
  260. Return the list of tokens (sub-parts of the input strings after word/subword splitting and before conversion to
  261. integer indices) at a given batch index (only works for the output of a fast tokenizer).
  262. Args:
  263. batch_index (`int`, *optional*, defaults to 0): The index to access in the batch.
  264. Returns:
  265. `List[str]`: The list of tokens at that index.
  266. """
  267. if not self._encodings:
  268. raise ValueError(
  269. "tokens() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast`"
  270. " class)."
  271. )
  272. return self._encodings[batch_index].tokens
  273. def sequence_ids(self, batch_index: int = 0) -> List[Optional[int]]:
  274. """
  275. Return a list mapping the tokens to the id of their original sentences:
  276. - `None` for special tokens added around or between sequences,
  277. - `0` for tokens corresponding to words in the first sequence,
  278. - `1` for tokens corresponding to words in the second sequence when a pair of sequences was jointly
  279. encoded.
  280. Args:
  281. batch_index (`int`, *optional*, defaults to 0): The index to access in the batch.
  282. Returns:
  283. `List[Optional[int]]`: A list indicating the sequence id corresponding to each token. Special tokens added
  284. by the tokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding
  285. sequence.
  286. """
  287. if not self._encodings:
  288. raise ValueError(
  289. "sequence_ids() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast`"
  290. " class)."
  291. )
  292. return self._encodings[batch_index].sequence_ids
  293. def words(self, batch_index: int = 0) -> List[Optional[int]]:
  294. """
  295. Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer.
  296. Args:
  297. batch_index (`int`, *optional*, defaults to 0): The index to access in the batch.
  298. Returns:
  299. `List[Optional[int]]`: A list indicating the word corresponding to each token. Special tokens added by the
  300. tokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding word
  301. (several tokens will be mapped to the same word index if they are parts of that word).
  302. """
  303. if not self._encodings:
  304. raise ValueError(
  305. "words() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast`"
  306. " class)."
  307. )
  308. warnings.warn(
  309. "`BatchEncoding.words()` property is deprecated and should be replaced with the identical, "
  310. "but more self-explanatory `BatchEncoding.word_ids()` property.",
  311. FutureWarning,
  312. )
  313. return self.word_ids(batch_index)
  314. def word_ids(self, batch_index: int = 0) -> List[Optional[int]]:
  315. """
  316. Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer.
  317. Args:
  318. batch_index (`int`, *optional*, defaults to 0): The index to access in the batch.
  319. Returns:
  320. `List[Optional[int]]`: A list indicating the word corresponding to each token. Special tokens added by the
  321. tokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding word
  322. (several tokens will be mapped to the same word index if they are parts of that word).
  323. """
  324. if not self._encodings:
  325. raise ValueError(
  326. "word_ids() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast`"
  327. " class)."
  328. )
  329. return self._encodings[batch_index].word_ids
  330. def token_to_sequence(self, batch_or_token_index: int, token_index: Optional[int] = None) -> int:
  331. """
  332. Get the index of the sequence represented by the given token. In the general use case, this method returns `0`
  333. for a single sequence or the first sequence of a pair, and `1` for the second sequence of a pair
  334. Can be called as:
  335. - `self.token_to_sequence(token_index)` if batch size is 1
  336. - `self.token_to_sequence(batch_index, token_index)` if batch size is greater than 1
  337. This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e.,
  338. words are defined by the user). In this case it allows to easily associate encoded tokens with provided
  339. tokenized words.
  340. Args:
  341. batch_or_token_index (`int`):
  342. Index of the sequence in the batch. If the batch only comprises one sequence, this can be the index of
  343. the token in the sequence.
  344. token_index (`int`, *optional*):
  345. If a batch index is provided in *batch_or_token_index*, this can be the index of the token in the
  346. sequence.
  347. Returns:
  348. `int`: Index of the word in the input sequence.
  349. """
  350. if not self._encodings:
  351. raise ValueError("token_to_sequence() is not available when using Python based tokenizers")
  352. if token_index is not None:
  353. batch_index = batch_or_token_index
  354. else:
  355. batch_index = 0
  356. token_index = batch_or_token_index
  357. if batch_index < 0:
  358. batch_index = self._batch_size + batch_index
  359. if token_index < 0:
  360. token_index = self._seq_len + token_index
  361. return self._encodings[batch_index].token_to_sequence(token_index)
  362. def token_to_word(self, batch_or_token_index: int, token_index: Optional[int] = None) -> int:
  363. """
  364. Get the index of the word corresponding (i.e. comprising) to an encoded token in a sequence of the batch.
  365. Can be called as:
  366. - `self.token_to_word(token_index)` if batch size is 1
  367. - `self.token_to_word(batch_index, token_index)` if batch size is greater than 1
  368. This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e.,
  369. words are defined by the user). In this case it allows to easily associate encoded tokens with provided
  370. tokenized words.
  371. Args:
  372. batch_or_token_index (`int`):
  373. Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
  374. the token in the sequence.
  375. token_index (`int`, *optional*):
  376. If a batch index is provided in *batch_or_token_index*, this can be the index of the token in the
  377. sequence.
  378. Returns:
  379. `int`: Index of the word in the input sequence.
  380. """
  381. if not self._encodings:
  382. raise ValueError("token_to_word() is not available when using Python based tokenizers")
  383. if token_index is not None:
  384. batch_index = batch_or_token_index
  385. else:
  386. batch_index = 0
  387. token_index = batch_or_token_index
  388. if batch_index < 0:
  389. batch_index = self._batch_size + batch_index
  390. if token_index < 0:
  391. token_index = self._seq_len + token_index
  392. return self._encodings[batch_index].token_to_word(token_index)
  393. def word_to_tokens(
  394. self, batch_or_word_index: int, word_index: Optional[int] = None, sequence_index: int = 0
  395. ) -> Optional[TokenSpan]:
  396. """
  397. Get the encoded token span corresponding to a word in a sequence of the batch.
  398. Token spans are returned as a [`~tokenization_utils_base.TokenSpan`] with:
  399. - **start** -- Index of the first token.
  400. - **end** -- Index of the token following the last token.
  401. Can be called as:
  402. - `self.word_to_tokens(word_index, sequence_index: int = 0)` if batch size is 1
  403. - `self.word_to_tokens(batch_index, word_index, sequence_index: int = 0)` if batch size is greater or equal to
  404. 1
  405. This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words
  406. are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized
  407. words.
  408. Args:
  409. batch_or_word_index (`int`):
  410. Index of the sequence in the batch. If the batch only comprises one sequence, this can be the index of
  411. the word in the sequence.
  412. word_index (`int`, *optional*):
  413. If a batch index is provided in *batch_or_token_index*, this can be the index of the word in the
  414. sequence.
  415. sequence_index (`int`, *optional*, defaults to 0):
  416. If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0
  417. or 1) the provided word index belongs to.
  418. Returns:
  419. ([`~tokenization_utils_base.TokenSpan`], *optional*): Span of tokens in the encoded sequence. Returns
  420. `None` if no tokens correspond to the word. This can happen especially when the token is a special token
  421. that has been used to format the tokenization. For example when we add a class token at the very beginning
  422. of the tokenization.
  423. """
  424. if not self._encodings:
  425. raise ValueError("word_to_tokens() is not available when using Python based tokenizers")
  426. if word_index is not None:
  427. batch_index = batch_or_word_index
  428. else:
  429. batch_index = 0
  430. word_index = batch_or_word_index
  431. if batch_index < 0:
  432. batch_index = self._batch_size + batch_index
  433. if word_index < 0:
  434. word_index = self._seq_len + word_index
  435. span = self._encodings[batch_index].word_to_tokens(word_index, sequence_index)
  436. return TokenSpan(*span) if span is not None else None
  437. def token_to_chars(self, batch_or_token_index: int, token_index: Optional[int] = None) -> CharSpan:
  438. """
  439. Get the character span corresponding to an encoded token in a sequence of the batch.
  440. Character spans are returned as a [`~tokenization_utils_base.CharSpan`] with:
  441. - **start** -- Index of the first character in the original string associated to the token.
  442. - **end** -- Index of the character following the last character in the original string associated to the
  443. token.
  444. Can be called as:
  445. - `self.token_to_chars(token_index)` if batch size is 1
  446. - `self.token_to_chars(batch_index, token_index)` if batch size is greater or equal to 1
  447. Args:
  448. batch_or_token_index (`int`):
  449. Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
  450. the token in the sequence.
  451. token_index (`int`, *optional*):
  452. If a batch index is provided in *batch_or_token_index*, this can be the index of the token or tokens in
  453. the sequence.
  454. Returns:
  455. [`~tokenization_utils_base.CharSpan`]: Span of characters in the original string, or None, if the token
  456. (e.g. <s>, </s>) doesn't correspond to any chars in the origin string.
  457. """
  458. if not self._encodings:
  459. raise ValueError("token_to_chars() is not available when using Python based tokenizers")
  460. if token_index is not None:
  461. batch_index = batch_or_token_index
  462. else:
  463. batch_index = 0
  464. token_index = batch_or_token_index
  465. span_indices = self._encodings[batch_index].token_to_chars(token_index)
  466. return CharSpan(*span_indices) if span_indices is not None else None
  467. def char_to_token(
  468. self, batch_or_char_index: int, char_index: Optional[int] = None, sequence_index: int = 0
  469. ) -> int:
  470. """
  471. Get the index of the token in the encoded output comprising a character in the original string for a sequence
  472. of the batch.
  473. Can be called as:
  474. - `self.char_to_token(char_index)` if batch size is 1
  475. - `self.char_to_token(batch_index, char_index)` if batch size is greater or equal to 1
  476. This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words
  477. are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized
  478. words.
  479. Args:
  480. batch_or_char_index (`int`):
  481. Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
  482. the word in the sequence
  483. char_index (`int`, *optional*):
  484. If a batch index is provided in *batch_or_token_index*, this can be the index of the word in the
  485. sequence.
  486. sequence_index (`int`, *optional*, defaults to 0):
  487. If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0
  488. or 1) the provided character index belongs to.
  489. Returns:
  490. `int`: Index of the token, or None if the char index refers to a whitespace only token and whitespace is
  491. trimmed with `trim_offsets=True`.
  492. """
  493. if not self._encodings:
  494. raise ValueError("char_to_token() is not available when using Python based tokenizers")
  495. if char_index is not None:
  496. batch_index = batch_or_char_index
  497. else:
  498. batch_index = 0
  499. char_index = batch_or_char_index
  500. return self._encodings[batch_index].char_to_token(char_index, sequence_index)
  501. def word_to_chars(
  502. self, batch_or_word_index: int, word_index: Optional[int] = None, sequence_index: int = 0
  503. ) -> CharSpan:
  504. """
  505. Get the character span in the original string corresponding to given word in a sequence of the batch.
  506. Character spans are returned as a CharSpan NamedTuple with:
  507. - start: index of the first character in the original string
  508. - end: index of the character following the last character in the original string
  509. Can be called as:
  510. - `self.word_to_chars(word_index)` if batch size is 1
  511. - `self.word_to_chars(batch_index, word_index)` if batch size is greater or equal to 1
  512. Args:
  513. batch_or_word_index (`int`):
  514. Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
  515. the word in the sequence
  516. word_index (`int`, *optional*):
  517. If a batch index is provided in *batch_or_token_index*, this can be the index of the word in the
  518. sequence.
  519. sequence_index (`int`, *optional*, defaults to 0):
  520. If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0
  521. or 1) the provided word index belongs to.
  522. Returns:
  523. `CharSpan` or `List[CharSpan]`: Span(s) of the associated character or characters in the string. CharSpan
  524. are NamedTuple with:
  525. - start: index of the first character associated to the token in the original string
  526. - end: index of the character following the last character associated to the token in the original
  527. string
  528. """
  529. if not self._encodings:
  530. raise ValueError("word_to_chars() is not available when using Python based tokenizers")
  531. if word_index is not None:
  532. batch_index = batch_or_word_index
  533. else:
  534. batch_index = 0
  535. word_index = batch_or_word_index
  536. return CharSpan(*(self._encodings[batch_index].word_to_chars(word_index, sequence_index)))
  537. def char_to_word(self, batch_or_char_index: int, char_index: Optional[int] = None, sequence_index: int = 0) -> int:
  538. """
  539. Get the word in the original string corresponding to a character in the original string of a sequence of the
  540. batch.
  541. Can be called as:
  542. - `self.char_to_word(char_index)` if batch size is 1
  543. - `self.char_to_word(batch_index, char_index)` if batch size is greater than 1
  544. This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words
  545. are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized
  546. words.
  547. Args:
  548. batch_or_char_index (`int`):
  549. Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
  550. the character in the original string.
  551. char_index (`int`, *optional*):
  552. If a batch index is provided in *batch_or_token_index*, this can be the index of the character in the
  553. original string.
  554. sequence_index (`int`, *optional*, defaults to 0):
  555. If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0
  556. or 1) the provided character index belongs to.
  557. Returns:
  558. `int` or `List[int]`: Index or indices of the associated encoded token(s).
  559. """
  560. if not self._encodings:
  561. raise ValueError("char_to_word() is not available when using Python based tokenizers")
  562. if char_index is not None:
  563. batch_index = batch_or_char_index
  564. else:
  565. batch_index = 0
  566. char_index = batch_or_char_index
  567. return self._encodings[batch_index].char_to_word(char_index, sequence_index)
  568. def convert_to_tensors(
  569. self, tensor_type: Optional[Union[str, TensorType]] = None, prepend_batch_axis: bool = False
  570. ):
  571. """
  572. Convert the inner content to tensors.
  573. Args:
  574. tensor_type (`str` or [`~utils.TensorType`], *optional*):
  575. The type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If
  576. `None`, no modification is done.
  577. prepend_batch_axis (`int`, *optional*, defaults to `False`):
  578. Whether or not to add the batch dimension during the conversion.
  579. """
  580. if tensor_type is None:
  581. return self
  582. # Convert to TensorType
  583. if not isinstance(tensor_type, TensorType):
  584. tensor_type = TensorType(tensor_type)
  585. # Get a function reference for the correct framework
  586. if tensor_type == TensorType.TENSORFLOW:
  587. if not is_tf_available():
  588. raise ImportError(
  589. "Unable to convert output to TensorFlow tensors format, TensorFlow is not installed."
  590. )
  591. import tensorflow as tf
  592. as_tensor = tf.constant
  593. is_tensor = tf.is_tensor
  594. elif tensor_type == TensorType.PYTORCH:
  595. if not is_torch_available():
  596. raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed.")
  597. import torch
  598. is_tensor = torch.is_tensor
  599. def as_tensor(value, dtype=None):
  600. if isinstance(value, list) and isinstance(value[0], np.ndarray):
  601. return torch.from_numpy(np.array(value))
  602. return torch.tensor(value)
  603. elif tensor_type == TensorType.JAX:
  604. if not is_flax_available():
  605. raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed.")
  606. import jax.numpy as jnp # noqa: F811
  607. as_tensor = jnp.array
  608. is_tensor = is_jax_tensor
  609. elif tensor_type == TensorType.MLX:
  610. if not is_mlx_available():
  611. raise ImportError("Unable to convert output to MLX tensors format, MLX is not installed.")
  612. import mlx.core as mx
  613. as_tensor = mx.array
  614. def is_tensor(obj):
  615. return isinstance(obj, mx.array)
  616. else:
  617. def as_tensor(value, dtype=None):
  618. if isinstance(value, (list, tuple)) and isinstance(value[0], (list, tuple, np.ndarray)):
  619. value_lens = [len(val) for val in value]
  620. if len(set(value_lens)) > 1 and dtype is None:
  621. # we have a ragged list so handle explicitly
  622. value = as_tensor([np.asarray(val) for val in value], dtype=object)
  623. return np.asarray(value, dtype=dtype)
  624. is_tensor = is_numpy_array
  625. # Do the tensor conversion in batch
  626. for key, value in self.items():
  627. try:
  628. if prepend_batch_axis:
  629. value = [value]
  630. if not is_tensor(value):
  631. tensor = as_tensor(value)
  632. # Removing this for now in favor of controlling the shape with `prepend_batch_axis`
  633. # # at-least2d
  634. # if tensor.ndim > 2:
  635. # tensor = tensor.squeeze(0)
  636. # elif tensor.ndim < 2:
  637. # tensor = tensor[None, :]
  638. self[key] = tensor
  639. except Exception as e:
  640. if key == "overflowing_tokens":
  641. raise ValueError(
  642. "Unable to create tensor returning overflowing tokens of different lengths. "
  643. "Please see if a fast version of this tokenizer is available to have this feature available."
  644. ) from e
  645. raise ValueError(
  646. "Unable to create tensor, you should probably activate truncation and/or padding with"
  647. " 'padding=True' 'truncation=True' to have batched tensors with the same length. Perhaps your"
  648. f" features (`{key}` in this case) have excessive nesting (inputs type `list` where type `int` is"
  649. " expected)."
  650. ) from e
  651. return self
  652. def to(self, device: Union[str, "torch.device"]) -> "BatchEncoding":
  653. """
  654. Send all values to device by calling `v.to(device)` (PyTorch only).
  655. Args:
  656. device (`str` or `torch.device`): The device to put the tensors on.
  657. Returns:
  658. [`BatchEncoding`]: The same instance after modification.
  659. """
  660. requires_backends(self, ["torch"])
  661. import torch
  662. # This check catches things like APEX blindly calling "to" on all inputs to a module
  663. # Otherwise it passes the casts down and casts the LongTensor containing the token idxs
  664. # into a HalfTensor
  665. if isinstance(device, str) or is_torch_device(device) or isinstance(device, int):
  666. self.data = {k: v.to(device=device) for k, v in self.data.items() if isinstance(v, torch.Tensor)}
  667. else:
  668. logger.warning(f"Attempting to cast a BatchEncoding to type {str(device)}. This is not supported.")
  669. return self
  670. class SpecialTokensMixin:
  671. """
  672. A mixin derived by [`PreTrainedTokenizer`] and [`PreTrainedTokenizerFast`] to handle specific behaviors related to
  673. special tokens. In particular, this class hold the attributes which can be used to directly access these special
  674. tokens in a model-independent manner and allow to set and update the special tokens.
  675. Args:
  676. bos_token (`str` or `tokenizers.AddedToken`, *optional*):
  677. A special token representing the beginning of a sentence.
  678. eos_token (`str` or `tokenizers.AddedToken`, *optional*):
  679. A special token representing the end of a sentence.
  680. unk_token (`str` or `tokenizers.AddedToken`, *optional*):
  681. A special token representing an out-of-vocabulary token.
  682. sep_token (`str` or `tokenizers.AddedToken`, *optional*):
  683. A special token separating two different sentences in the same input (used by BERT for instance).
  684. pad_token (`str` or `tokenizers.AddedToken`, *optional*):
  685. A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
  686. attention mechanisms or loss computation.
  687. cls_token (`str` or `tokenizers.AddedToken`, *optional*):
  688. A special token representing the class of the input (used by BERT for instance).
  689. mask_token (`str` or `tokenizers.AddedToken`, *optional*):
  690. A special token representing a masked token (used by masked-language modeling pretraining objectives, like
  691. BERT).
  692. additional_special_tokens (tuple or list of `str` or `tokenizers.AddedToken`, *optional*):
  693. A tuple or a list of additional tokens, which will be marked as `special`, meaning that they will be
  694. skipped when decoding if `skip_special_tokens` is set to `True`.
  695. """
  696. SPECIAL_TOKENS_ATTRIBUTES = [
  697. "bos_token",
  698. "eos_token",
  699. "unk_token",
  700. "sep_token",
  701. "pad_token",
  702. "cls_token",
  703. "mask_token",
  704. "additional_special_tokens",
  705. ]
  706. def __init__(self, verbose=False, **kwargs):
  707. self._bos_token = None
  708. self._eos_token = None
  709. self._unk_token = None
  710. self._sep_token = None
  711. self._pad_token = None
  712. self._cls_token = None
  713. self._mask_token = None
  714. self._pad_token_type_id = 0
  715. self._additional_special_tokens = []
  716. self.verbose = verbose
  717. # We directly set the hidden value to allow initialization with special tokens
  718. # which are not yet in the vocabulary. Necessary for serialization/de-serialization
  719. # TODO clean this up at some point (probably by switching to fast tokenizers)
  720. for key, value in kwargs.items():
  721. if value is None:
  722. continue
  723. if key in self.SPECIAL_TOKENS_ATTRIBUTES:
  724. if key == "additional_special_tokens":
  725. assert isinstance(value, (list, tuple)), f"Value {value} is not a list or tuple"
  726. assert all(
  727. isinstance(t, (str, AddedToken)) for t in value
  728. ), "One of the tokens is not a string or an AddedToken"
  729. setattr(self, key, value)
  730. elif isinstance(value, (str, AddedToken)):
  731. setattr(self, key, value)
  732. else:
  733. raise TypeError(f"Special token {key} has to be either str or AddedToken but got: {type(value)}")
  734. def sanitize_special_tokens(self) -> int:
  735. """
  736. The `sanitize_special_tokens` is now deprecated kept for backward compatibility and will be removed in
  737. transformers v5.
  738. """
  739. logger.warning_once("The `sanitize_special_tokens` will be removed in transformers v5.")
  740. return self.add_tokens(self.all_special_tokens_extended, special_tokens=True)
  741. def add_special_tokens(
  742. self, special_tokens_dict: Dict[str, Union[str, AddedToken]], replace_additional_special_tokens=True
  743. ) -> int:
  744. """
  745. Add a dictionary of special tokens (eos, pad, cls, etc.) to the encoder and link them to class attributes. If
  746. special tokens are NOT in the vocabulary, they are added to it (indexed starting from the last index of the
  747. current vocabulary).
  748. When adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix of the
  749. model so that its embedding matrix matches the tokenizer.
  750. In order to do that, please use the [`~PreTrainedModel.resize_token_embeddings`] method.
  751. Using `add_special_tokens` will ensure your special tokens can be used in several ways:
  752. - Special tokens can be skipped when decoding using `skip_special_tokens = True`.
  753. - Special tokens are carefully handled by the tokenizer (they are never split), similar to `AddedTokens`.
  754. - You can easily refer to special tokens using tokenizer class attributes like `tokenizer.cls_token`. This
  755. makes it easy to develop model-agnostic training and fine-tuning scripts.
  756. When possible, special tokens are already registered for provided pretrained models (for instance
  757. [`BertTokenizer`] `cls_token` is already registered to be :obj*'[CLS]'* and XLM's one is also registered to be
  758. `'</s>'`).
  759. Args:
  760. special_tokens_dict (dictionary *str* to *str* or `tokenizers.AddedToken`):
  761. Keys should be in the list of predefined special attributes: [`bos_token`, `eos_token`, `unk_token`,
  762. `sep_token`, `pad_token`, `cls_token`, `mask_token`, `additional_special_tokens`].
  763. Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer
  764. assign the index of the `unk_token` to them).
  765. replace_additional_special_tokens (`bool`, *optional*,, defaults to `True`):
  766. If `True`, the existing list of additional special tokens will be replaced by the list provided in
  767. `special_tokens_dict`. Otherwise, `self._additional_special_tokens` is just extended. In the former
  768. case, the tokens will NOT be removed from the tokenizer's full vocabulary - they are only being flagged
  769. as non-special tokens. Remember, this only affects which tokens are skipped during decoding, not the
  770. `added_tokens_encoder` and `added_tokens_decoder`. This means that the previous
  771. `additional_special_tokens` are still added tokens, and will not be split by the model.
  772. Returns:
  773. `int`: Number of tokens added to the vocabulary.
  774. Examples:
  775. ```python
  776. # Let's see how to add a new classification token to GPT-2
  777. tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2")
  778. model = GPT2Model.from_pretrained("openai-community/gpt2")
  779. special_tokens_dict = {"cls_token": "<CLS>"}
  780. num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
  781. print("We have added", num_added_toks, "tokens")
  782. # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer.
  783. model.resize_token_embeddings(len(tokenizer))
  784. assert tokenizer.cls_token == "<CLS>"
  785. ```"""
  786. if not special_tokens_dict:
  787. return 0
  788. added_tokens = []
  789. for key, value in special_tokens_dict.items():
  790. assert key in self.SPECIAL_TOKENS_ATTRIBUTES, f"Key {key} is not a special token"
  791. if self.verbose:
  792. logger.info(f"Assigning {value} to the {key} key of the tokenizer")
  793. if key == "additional_special_tokens":
  794. assert isinstance(value, (list, tuple)) and all(
  795. isinstance(t, (str, AddedToken)) for t in value
  796. ), f"Tokens {value} for key {key} should all be str or AddedToken instances"
  797. to_add = []
  798. for token in value:
  799. if isinstance(token, str):
  800. # for legacy purpose we default to stripping. `test_add_tokens_tokenizer` depends on this
  801. token = AddedToken(token, rstrip=False, lstrip=False, normalized=False, special=True)
  802. if not replace_additional_special_tokens and str(token) in self.additional_special_tokens:
  803. continue
  804. to_add.append(token)
  805. if replace_additional_special_tokens and len(to_add) > 0:
  806. setattr(self, key, list(to_add))
  807. else:
  808. self._additional_special_tokens.extend(to_add)
  809. added_tokens += to_add
  810. else:
  811. if not isinstance(value, (str, AddedToken)):
  812. raise ValueError(f"Token {value} for key {key} should be a str or an AddedToken instance")
  813. if isinstance(value, (str)):
  814. # for legacy purpose we default to stripping. `False` depends on this
  815. value = AddedToken(value, rstrip=False, lstrip=False, normalized=False, special=True)
  816. if isinstance(value, AddedToken):
  817. setattr(self, key, value)
  818. if value not in added_tokens:
  819. added_tokens.append(value)
  820. # if we are adding tokens that were not part of the vocab, we ought to add them
  821. added_tokens = self.add_tokens(added_tokens, special_tokens=True)
  822. return added_tokens
  823. def add_tokens(
  824. self, new_tokens: Union[str, AddedToken, List[Union[str, AddedToken]]], special_tokens: bool = False
  825. ) -> int:
  826. """
  827. Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to
  828. it with indices starting from length of the current vocabulary and and will be isolated before the tokenization
  829. algorithm is applied. Added tokens and tokens from the vocabulary of the tokenization algorithm are therefore
  830. not treated in the same way.
  831. Note, when adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix
  832. of the model so that its embedding matrix matches the tokenizer.
  833. In order to do that, please use the [`~PreTrainedModel.resize_token_embeddings`] method.
  834. Args:
  835. new_tokens (`str`, `tokenizers.AddedToken` or a list of *str* or `tokenizers.AddedToken`):
  836. Tokens are only added if they are not already in the vocabulary. `tokenizers.AddedToken` wraps a string
  837. token to let you personalize its behavior: whether this token should only match against a single word,
  838. whether this token should strip all potential whitespaces on the left side, whether this token should
  839. strip all potential whitespaces on the right side, etc.
  840. special_tokens (`bool`, *optional*, defaults to `False`):
  841. Can be used to specify if the token is a special token. This mostly change the normalization behavior
  842. (special tokens like CLS or [MASK] are usually not lower-cased for instance).
  843. See details for `tokenizers.AddedToken` in HuggingFace tokenizers library.
  844. Returns:
  845. `int`: Number of tokens added to the vocabulary.
  846. Examples:
  847. ```python
  848. # Let's see how to increase the vocabulary of Bert model and tokenizer
  849. tokenizer = BertTokenizerFast.from_pretrained("google-bert/bert-base-uncased")
  850. model = BertModel.from_pretrained("google-bert/bert-base-uncased")
  851. num_added_toks = tokenizer.add_tokens(["new_tok1", "my_new-tok2"])
  852. print("We have added", num_added_toks, "tokens")
  853. # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer.
  854. model.resize_token_embeddings(len(tokenizer))
  855. ```"""
  856. if not new_tokens:
  857. return 0
  858. if not isinstance(new_tokens, (list, tuple)):
  859. new_tokens = [new_tokens]
  860. return self._add_tokens(new_tokens, special_tokens=special_tokens)
  861. def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int:
  862. raise NotImplementedError
  863. @property
  864. def bos_token(self) -> str:
  865. """
  866. `str`: Beginning of sentence token. Log an error if used while not having been set.
  867. """
  868. if self._bos_token is None:
  869. if self.verbose:
  870. logger.error("Using bos_token, but it is not set yet.")
  871. return None
  872. return str(self._bos_token)
  873. @property
  874. def eos_token(self) -> str:
  875. """
  876. `str`: End of sentence token. Log an error if used while not having been set.
  877. """
  878. if self._eos_token is None:
  879. if self.verbose:
  880. logger.error("Using eos_token, but it is not set yet.")
  881. return None
  882. return str(self._eos_token)
  883. @property
  884. def unk_token(self) -> str:
  885. """
  886. `str`: Unknown token. Log an error if used while not having been set.
  887. """
  888. if self._unk_token is None:
  889. if self.verbose:
  890. logger.error("Using unk_token, but it is not set yet.")
  891. return None
  892. return str(self._unk_token)
  893. @property
  894. def sep_token(self) -> str:
  895. """
  896. `str`: Separation token, to separate context and query in an input sequence. Log an error if used while not
  897. having been set.
  898. """
  899. if self._sep_token is None:
  900. if self.verbose:
  901. logger.error("Using sep_token, but it is not set yet.")
  902. return None
  903. return str(self._sep_token)
  904. @property
  905. def pad_token(self) -> str:
  906. """
  907. `str`: Padding token. Log an error if used while not having been set.
  908. """
  909. if self._pad_token is None:
  910. if self.verbose:
  911. logger.error("Using pad_token, but it is not set yet.")
  912. return None
  913. return str(self._pad_token)
  914. @property
  915. def cls_token(self) -> str:
  916. """
  917. `str`: Classification token, to extract a summary of an input sequence leveraging self-attention along the full
  918. depth of the model. Log an error if used while not having been set.
  919. """
  920. if self._cls_token is None:
  921. if self.verbose:
  922. logger.error("Using cls_token, but it is not set yet.")
  923. return None
  924. return str(self._cls_token)
  925. @property
  926. def mask_token(self) -> str:
  927. """
  928. `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not
  929. having been set.
  930. """
  931. if self._mask_token is None:
  932. if self.verbose:
  933. logger.error("Using mask_token, but it is not set yet.")
  934. return None
  935. return str(self._mask_token)
  936. @property
  937. def additional_special_tokens(self) -> List[str]:
  938. """
  939. `List[str]`: All the additional special tokens you may want to use. Log an error if used while not having been
  940. set.
  941. """
  942. if self._additional_special_tokens is None:
  943. if self.verbose:
  944. logger.error("Using additional_special_tokens, but it is not set yet.")
  945. return None
  946. return [str(tok) for tok in self._additional_special_tokens]
  947. @bos_token.setter
  948. def bos_token(self, value):
  949. if not isinstance(value, (str, AddedToken)) and value is not None:
  950. raise ValueError("Cannot set a non-string value as the BOS token")
  951. self._bos_token = value
  952. @eos_token.setter
  953. def eos_token(self, value):
  954. if not isinstance(value, (str, AddedToken)) and value is not None:
  955. raise ValueError("Cannot set a non-string value as the EOS token")
  956. self._eos_token = value
  957. @unk_token.setter
  958. def unk_token(self, value):
  959. if not isinstance(value, (str, AddedToken)) and value is not None:
  960. raise ValueError("Cannot set a non-string value as the UNK token")
  961. self._unk_token = value
  962. @sep_token.setter
  963. def sep_token(self, value):
  964. if not isinstance(value, (str, AddedToken)) and value is not None:
  965. raise ValueError("Cannot set a non-string value as the SEP token")
  966. self._sep_token = value
  967. @pad_token.setter
  968. def pad_token(self, value):
  969. if not isinstance(value, (str, AddedToken)) and value is not None:
  970. raise ValueError("Cannot set a non-string value as the PAD token")
  971. self._pad_token = value
  972. @cls_token.setter
  973. def cls_token(self, value):
  974. if not isinstance(value, (str, AddedToken)) and value is not None:
  975. raise ValueError("Cannot set a non-string value as the CLS token")
  976. self._cls_token = value
  977. @mask_token.setter
  978. def mask_token(self, value):
  979. if not isinstance(value, (str, AddedToken)) and value is not None:
  980. raise ValueError("Cannot set a non-string value as the MASK token")
  981. self._mask_token = value
  982. @additional_special_tokens.setter
  983. def additional_special_tokens(self, value):
  984. self._additional_special_tokens = value if value is not None else None
  985. @property
  986. def bos_token_id(self) -> Optional[int]:
  987. """
  988. `Optional[int]`: Id of the beginning of sentence token in the vocabulary. Returns `None` if the token has not
  989. been set.
  990. """
  991. if self._bos_token is None:
  992. return None
  993. return self.convert_tokens_to_ids(self.bos_token)
  994. @property
  995. def eos_token_id(self) -> Optional[int]:
  996. """
  997. `Optional[int]`: Id of the end of sentence token in the vocabulary. Returns `None` if the token has not been
  998. set.
  999. """
  1000. if self._eos_token is None:
  1001. return None
  1002. return self.convert_tokens_to_ids(self.eos_token)
  1003. @property
  1004. def unk_token_id(self) -> Optional[int]:
  1005. """
  1006. `Optional[int]`: Id of the unknown token in the vocabulary. Returns `None` if the token has not been set.
  1007. """
  1008. if self._unk_token is None:
  1009. return None
  1010. return self.convert_tokens_to_ids(self.unk_token)
  1011. @property
  1012. def sep_token_id(self) -> Optional[int]:
  1013. """
  1014. `Optional[int]`: Id of the separation token in the vocabulary, to separate context and query in an input
  1015. sequence. Returns `None` if the token has not been set.
  1016. """
  1017. if self._sep_token is None:
  1018. return None
  1019. return self.convert_tokens_to_ids(self.sep_token)
  1020. @property
  1021. def pad_token_id(self) -> Optional[int]:
  1022. """
  1023. `Optional[int]`: Id of the padding token in the vocabulary. Returns `None` if the token has not been set.
  1024. """
  1025. if self._pad_token is None:
  1026. return None
  1027. return self.convert_tokens_to_ids(self.pad_token)
  1028. @property
  1029. def pad_token_type_id(self) -> int:
  1030. """
  1031. `int`: Id of the padding token type in the vocabulary.
  1032. """
  1033. return self._pad_token_type_id
  1034. @property
  1035. def cls_token_id(self) -> Optional[int]:
  1036. """
  1037. `Optional[int]`: Id of the classification token in the vocabulary, to extract a summary of an input sequence
  1038. leveraging self-attention along the full depth of the model.
  1039. Returns `None` if the token has not been set.
  1040. """
  1041. if self._cls_token is None:
  1042. return None
  1043. return self.convert_tokens_to_ids(self.cls_token)
  1044. @property
  1045. def mask_token_id(self) -> Optional[int]:
  1046. """
  1047. `Optional[int]`: Id of the mask token in the vocabulary, used when training a model with masked-language
  1048. modeling. Returns `None` if the token has not been set.
  1049. """
  1050. if self._mask_token is None:
  1051. return None
  1052. return self.convert_tokens_to_ids(self.mask_token)
  1053. @property
  1054. def additional_special_tokens_ids(self) -> List[int]:
  1055. """
  1056. `List[int]`: Ids of all the additional special tokens in the vocabulary. Log an error if used while not having
  1057. been set.
  1058. """
  1059. return self.convert_tokens_to_ids(self.additional_special_tokens)
  1060. @bos_token_id.setter
  1061. def bos_token_id(self, value):
  1062. self._bos_token = self.convert_ids_to_tokens(value) if value is not None else None
  1063. @eos_token_id.setter
  1064. def eos_token_id(self, value):
  1065. self._eos_token = self.convert_ids_to_tokens(value) if value is not None else None
  1066. @unk_token_id.setter
  1067. def unk_token_id(self, value):
  1068. self._unk_token = self.convert_ids_to_tokens(value) if value is not None else None
  1069. @sep_token_id.setter
  1070. def sep_token_id(self, value):
  1071. self._sep_token = self.convert_ids_to_tokens(value) if value is not None else None
  1072. @pad_token_id.setter
  1073. def pad_token_id(self, value):
  1074. self._pad_token = self.convert_ids_to_tokens(value) if value is not None else None
  1075. @cls_token_id.setter
  1076. def cls_token_id(self, value):
  1077. self._cls_token = self.convert_ids_to_tokens(value) if value is not None else None
  1078. @mask_token_id.setter
  1079. def mask_token_id(self, value):
  1080. self._mask_token = self.convert_ids_to_tokens(value) if value is not None else None
  1081. @additional_special_tokens_ids.setter
  1082. def additional_special_tokens_ids(self, values):
  1083. self._additional_special_tokens = [self.convert_ids_to_tokens(value) for value in values]
  1084. @property
  1085. def special_tokens_map(self) -> Dict[str, Union[str, List[str]]]:
  1086. """
  1087. `Dict[str, Union[str, List[str]]]`: A dictionary mapping special token class attributes (`cls_token`,
  1088. `unk_token`, etc.) to their values (`'<unk>'`, `'<cls>'`, etc.).
  1089. Convert potential tokens of `tokenizers.AddedToken` type to string.
  1090. """
  1091. set_attr = {}
  1092. for attr in self.SPECIAL_TOKENS_ATTRIBUTES:
  1093. attr_value = getattr(self, attr)
  1094. if attr_value:
  1095. set_attr[attr] = attr_value
  1096. return set_attr
  1097. @property
  1098. def special_tokens_map_extended(self) -> Dict[str, Union[str, AddedToken, List[Union[str, AddedToken]]]]:
  1099. """
  1100. `Dict[str, Union[str, tokenizers.AddedToken, List[Union[str, tokenizers.AddedToken]]]]`: A dictionary mapping
  1101. special token class attributes (`cls_token`, `unk_token`, etc.) to their values (`'<unk>'`, `'<cls>'`, etc.).
  1102. Don't convert tokens of `tokenizers.AddedToken` type to string so they can be used to control more finely how
  1103. special tokens are tokenized.
  1104. """
  1105. set_attr = {}
  1106. for attr in self.SPECIAL_TOKENS_ATTRIBUTES:
  1107. attr_value = getattr(self, "_" + attr)
  1108. if attr_value:
  1109. set_attr[attr] = attr_value
  1110. return set_attr
  1111. @property
  1112. def all_special_tokens_extended(self) -> List[Union[str, AddedToken]]:
  1113. """
  1114. `List[Union[str, tokenizers.AddedToken]]`: All the special tokens (`'<unk>'`, `'<cls>'`, etc.), the order has
  1115. nothing to do with the index of each tokens. If you want to know the correct indices, check
  1116. `self.added_tokens_encoder`. We can't create an order anymore as the keys are `AddedTokens` and not `Strings`.
  1117. Don't convert tokens of `tokenizers.AddedToken` type to string so they can be used to control more finely how
  1118. special tokens are tokenized.
  1119. """
  1120. all_tokens = []
  1121. seen = set()
  1122. for value in self.special_tokens_map_extended.values():
  1123. if isinstance(value, (list, tuple)):
  1124. tokens_to_add = [token for token in value if str(token) not in seen]
  1125. else:
  1126. tokens_to_add = [value] if str(value) not in seen else []
  1127. seen.update(map(str, tokens_to_add))
  1128. all_tokens.extend(tokens_to_add)
  1129. return all_tokens
  1130. @property
  1131. def all_special_tokens(self) -> List[str]:
  1132. """
  1133. `List[str]`: A list of the unique special tokens (`'<unk>'`, `'<cls>'`, ..., etc.).
  1134. Convert tokens of `tokenizers.AddedToken` type to string.
  1135. """
  1136. all_toks = [str(s) for s in self.all_special_tokens_extended]
  1137. return all_toks
  1138. @property
  1139. def all_special_ids(self) -> List[int]:
  1140. """
  1141. `List[int]`: List the ids of the special tokens(`'<unk>'`, `'<cls>'`, etc.) mapped to class attributes.
  1142. """
  1143. all_toks = self.all_special_tokens
  1144. all_ids = self.convert_tokens_to_ids(all_toks)
  1145. return all_ids
  1146. ENCODE_KWARGS_DOCSTRING = r"""
  1147. add_special_tokens (`bool`, *optional*, defaults to `True`):
  1148. Whether or not to add special tokens when encoding the sequences. This will use the underlying
  1149. `PretrainedTokenizerBase.build_inputs_with_special_tokens` function, which defines which tokens are
  1150. automatically added to the input ids. This is usefull if you want to add `bos` or `eos` tokens
  1151. automatically.
  1152. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
  1153. Activates and controls padding. Accepts the following values:
  1154. - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
  1155. sequence if provided).
  1156. - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
  1157. acceptable input length for the model if that argument is not provided.
  1158. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
  1159. lengths).
  1160. truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
  1161. Activates and controls truncation. Accepts the following values:
  1162. - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
  1163. to the maximum acceptable input length for the model if that argument is not provided. This will
  1164. truncate token by token, removing a token from the longest sequence in the pair if a pair of
  1165. sequences (or a batch of pairs) is provided.
  1166. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
  1167. maximum acceptable input length for the model if that argument is not provided. This will only
  1168. truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
  1169. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
  1170. maximum acceptable input length for the model if that argument is not provided. This will only
  1171. truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
  1172. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
  1173. greater than the model maximum admissible input size).
  1174. max_length (`int`, *optional*):
  1175. Controls the maximum length to use by one of the truncation/padding parameters.
  1176. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
  1177. is required by one of the truncation/padding parameters. If the model has no specific maximum input
  1178. length (like XLNet) truncation/padding to a maximum length will be deactivated.
  1179. stride (`int`, *optional*, defaults to 0):
  1180. If set to a number along with `max_length`, the overflowing tokens returned when
  1181. `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
  1182. returned to provide some overlap between truncated and overflowing sequences. The value of this
  1183. argument defines the number of overlapping tokens.
  1184. is_split_into_words (`bool`, *optional*, defaults to `False`):
  1185. Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the
  1186. tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace)
  1187. which it will tokenize. This is useful for NER or token classification.
  1188. pad_to_multiple_of (`int`, *optional*):
  1189. If set will pad the sequence to a multiple of the provided value. Requires `padding` to be activated.
  1190. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
  1191. `>= 7.5` (Volta).
  1192. padding_side (`str`, *optional*):
  1193. The side on which the model should have padding applied. Should be selected between ['right', 'left'].
  1194. Default value is picked from the class attribute of the same name.
  1195. return_tensors (`str` or [`~utils.TensorType`], *optional*):
  1196. If set, will return tensors instead of list of python integers. Acceptable values are:
  1197. - `'tf'`: Return TensorFlow `tf.constant` objects.
  1198. - `'pt'`: Return PyTorch `torch.Tensor` objects.
  1199. - `'np'`: Return Numpy `np.ndarray` objects.
  1200. """
  1201. ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r"""
  1202. return_token_type_ids (`bool`, *optional*):
  1203. Whether to return token type IDs. If left to the default, will return the token type IDs according to
  1204. the specific tokenizer's default, defined by the `return_outputs` attribute.
  1205. [What are token type IDs?](../glossary#token-type-ids)
  1206. return_attention_mask (`bool`, *optional*):
  1207. Whether to return the attention mask. If left to the default, will return the attention mask according
  1208. to the specific tokenizer's default, defined by the `return_outputs` attribute.
  1209. [What are attention masks?](../glossary#attention-mask)
  1210. return_overflowing_tokens (`bool`, *optional*, defaults to `False`):
  1211. Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch
  1212. of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead
  1213. of returning overflowing tokens.
  1214. return_special_tokens_mask (`bool`, *optional*, defaults to `False`):
  1215. Whether or not to return special tokens mask information.
  1216. return_offsets_mapping (`bool`, *optional*, defaults to `False`):
  1217. Whether or not to return `(char_start, char_end)` for each token.
  1218. This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using
  1219. Python's tokenizer, this method will raise `NotImplementedError`.
  1220. return_length (`bool`, *optional*, defaults to `False`):
  1221. Whether or not to return the lengths of the encoded inputs.
  1222. verbose (`bool`, *optional*, defaults to `True`):
  1223. Whether or not to print more information and warnings.
  1224. **kwargs: passed to the `self.tokenize()` method
  1225. Return:
  1226. [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
  1227. - **input_ids** -- List of token ids to be fed to a model.
  1228. [What are input IDs?](../glossary#input-ids)
  1229. - **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or
  1230. if *"token_type_ids"* is in `self.model_input_names`).
  1231. [What are token type IDs?](../glossary#token-type-ids)
  1232. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
  1233. `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`).
  1234. [What are attention masks?](../glossary#attention-mask)
  1235. - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and
  1236. `return_overflowing_tokens=True`).
  1237. - **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and
  1238. `return_overflowing_tokens=True`).
  1239. - **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying
  1240. regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`).
  1241. - **length** -- The length of the inputs (when `return_length=True`)
  1242. """
  1243. INIT_TOKENIZER_DOCSTRING = r"""
  1244. Class attributes (overridden by derived classes)
  1245. - **vocab_files_names** (`Dict[str, str]`) -- A dictionary with, as keys, the `__init__` keyword name of each
  1246. vocabulary file required by the model, and as associated values, the filename for saving the associated file
  1247. (string).
  1248. - **pretrained_vocab_files_map** (`Dict[str, Dict[str, str]]`) -- A dictionary of dictionaries, with the
  1249. high-level keys being the `__init__` keyword name of each vocabulary file required by the model, the
  1250. low-level being the `short-cut-names` of the pretrained models with, as associated values, the `url` to the
  1251. associated pretrained vocabulary file.
  1252. - **model_input_names** (`List[str]`) -- A list of inputs expected in the forward pass of the model.
  1253. - **padding_side** (`str`) -- The default value for the side on which the model should have padding applied.
  1254. Should be `'right'` or `'left'`.
  1255. - **truncation_side** (`str`) -- The default value for the side on which the model should have truncation
  1256. applied. Should be `'right'` or `'left'`.
  1257. Args:
  1258. model_max_length (`int`, *optional*):
  1259. The maximum length (in number of tokens) for the inputs to the transformer model. When the tokenizer is
  1260. loaded with [`~tokenization_utils_base.PreTrainedTokenizerBase.from_pretrained`], this will be set to the
  1261. value stored for the associated model in `max_model_input_sizes` (see above). If no value is provided, will
  1262. default to VERY_LARGE_INTEGER (`int(1e30)`).
  1263. padding_side (`str`, *optional*):
  1264. The side on which the model should have padding applied. Should be selected between ['right', 'left'].
  1265. Default value is picked from the class attribute of the same name.
  1266. truncation_side (`str`, *optional*):
  1267. The side on which the model should have truncation applied. Should be selected between ['right', 'left'].
  1268. Default value is picked from the class attribute of the same name.
  1269. chat_template (`str`, *optional*):
  1270. A Jinja template string that will be used to format lists of chat messages. See
  1271. https://huggingface.co/docs/transformers/chat_templating for a full description.
  1272. model_input_names (`List[string]`, *optional*):
  1273. The list of inputs accepted by the forward pass of the model (like `"token_type_ids"` or
  1274. `"attention_mask"`). Default value is picked from the class attribute of the same name.
  1275. bos_token (`str` or `tokenizers.AddedToken`, *optional*):
  1276. A special token representing the beginning of a sentence. Will be associated to `self.bos_token` and
  1277. `self.bos_token_id`.
  1278. eos_token (`str` or `tokenizers.AddedToken`, *optional*):
  1279. A special token representing the end of a sentence. Will be associated to `self.eos_token` and
  1280. `self.eos_token_id`.
  1281. unk_token (`str` or `tokenizers.AddedToken`, *optional*):
  1282. A special token representing an out-of-vocabulary token. Will be associated to `self.unk_token` and
  1283. `self.unk_token_id`.
  1284. sep_token (`str` or `tokenizers.AddedToken`, *optional*):
  1285. A special token separating two different sentences in the same input (used by BERT for instance). Will be
  1286. associated to `self.sep_token` and `self.sep_token_id`.
  1287. pad_token (`str` or `tokenizers.AddedToken`, *optional*):
  1288. A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
  1289. attention mechanisms or loss computation. Will be associated to `self.pad_token` and `self.pad_token_id`.
  1290. cls_token (`str` or `tokenizers.AddedToken`, *optional*):
  1291. A special token representing the class of the input (used by BERT for instance). Will be associated to
  1292. `self.cls_token` and `self.cls_token_id`.
  1293. mask_token (`str` or `tokenizers.AddedToken`, *optional*):
  1294. A special token representing a masked token (used by masked-language modeling pretraining objectives, like
  1295. BERT). Will be associated to `self.mask_token` and `self.mask_token_id`.
  1296. additional_special_tokens (tuple or list of `str` or `tokenizers.AddedToken`, *optional*):
  1297. A tuple or a list of additional special tokens. Add them here to ensure they are skipped when decoding with
  1298. `skip_special_tokens` is set to True. If they are not part of the vocabulary, they will be added at the end
  1299. of the vocabulary.
  1300. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`):
  1301. Whether or not the model should cleanup the spaces that were added when splitting the input text during the
  1302. tokenization process.
  1303. split_special_tokens (`bool`, *optional*, defaults to `False`):
  1304. Whether or not the special tokens should be split during the tokenization process. Passing will affect the
  1305. internal state of the tokenizer. The default behavior is to not split special tokens. This means that if
  1306. `<s>` is the `bos_token`, then `tokenizer.tokenize("<s>") = ['<s>`]. Otherwise, if
  1307. `split_special_tokens=True`, then `tokenizer.tokenize("<s>")` will be give `['<','s', '>']`.
  1308. """
  1309. @add_end_docstrings(INIT_TOKENIZER_DOCSTRING)
  1310. class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin):
  1311. """
  1312. Base class for [`PreTrainedTokenizer`] and [`PreTrainedTokenizerFast`].
  1313. Handles shared (mostly boiler plate) methods for those two classes.
  1314. """
  1315. vocab_files_names: Dict[str, str] = {}
  1316. pretrained_vocab_files_map: Dict[str, Dict[str, str]] = {}
  1317. _auto_class: Optional[str] = None
  1318. # first name has to correspond to main model input name
  1319. # to make sure `tokenizer.pad(...)` works correctly
  1320. model_input_names: List[str] = ["input_ids", "token_type_ids", "attention_mask"]
  1321. padding_side: str = "right"
  1322. truncation_side: str = "right"
  1323. slow_tokenizer_class = None
  1324. def __init__(self, **kwargs):
  1325. # inputs and kwargs for saving and re-loading (see ``from_pretrained`` and ``save_pretrained``)
  1326. self.init_inputs = ()
  1327. for key in kwargs:
  1328. if hasattr(self, key) and callable(getattr(self, key)):
  1329. raise AttributeError(f"{key} conflicts with the method {key} in {self.__class__.__name__}")
  1330. self.init_kwargs = copy.deepcopy(kwargs)
  1331. self.name_or_path = kwargs.pop("name_or_path", "")
  1332. self._processor_class = kwargs.pop("processor_class", None)
  1333. # For backward compatibility we fallback to set model_max_length from max_len if provided
  1334. model_max_length = kwargs.pop("model_max_length", kwargs.pop("max_len", None))
  1335. self.model_max_length = model_max_length if model_max_length is not None else VERY_LARGE_INTEGER
  1336. # Padding and truncation side are right by default and overridden in subclasses. If specified in the kwargs, it
  1337. # is changed.
  1338. self.padding_side = kwargs.pop("padding_side", self.padding_side)
  1339. if self.padding_side not in ["right", "left"]:
  1340. raise ValueError(
  1341. f"Padding side should be selected between 'right' and 'left', current value: {self.padding_side}"
  1342. )
  1343. self.truncation_side = kwargs.pop("truncation_side", self.truncation_side)
  1344. if self.truncation_side not in ["right", "left"]:
  1345. raise ValueError(
  1346. f"Truncation side should be selected between 'right' and 'left', current value: {self.truncation_side}"
  1347. )
  1348. self.model_input_names = kwargs.pop("model_input_names", self.model_input_names)
  1349. # By default, cleaning tokenization spaces for both fast and slow tokenizers
  1350. self.clean_up_tokenization_spaces = kwargs.pop("clean_up_tokenization_spaces", False)
  1351. # By default, do not split special tokens for both fast and slow tokenizers
  1352. self.split_special_tokens = kwargs.pop("split_special_tokens", False)
  1353. self.deprecation_warnings = {} # Use to store when we have already noticed a deprecation warning (avoid overlogging).
  1354. self._in_target_context_manager = False
  1355. # Stores a Jinja template that formats chat histories into tokenizable strings
  1356. self.chat_template = kwargs.pop("chat_template", None)
  1357. if isinstance(self.chat_template, (list, tuple)):
  1358. # Chat templates are stored as lists of dicts with fixed key names,
  1359. # we reconstruct that into a single dict while loading them.
  1360. self.chat_template = {template["name"]: template["template"] for template in self.chat_template}
  1361. super().__init__(**kwargs)
  1362. @property
  1363. def max_len_single_sentence(self) -> int:
  1364. """
  1365. `int`: The maximum length of a sentence that can be fed to the model.
  1366. """
  1367. return self.model_max_length - self.num_special_tokens_to_add(pair=False)
  1368. @property
  1369. def max_len_sentences_pair(self) -> int:
  1370. """
  1371. `int`: The maximum combined length of a pair of sentences that can be fed to the model.
  1372. """
  1373. return self.model_max_length - self.num_special_tokens_to_add(pair=True)
  1374. @max_len_single_sentence.setter
  1375. def max_len_single_sentence(self, value) -> int:
  1376. # For backward compatibility, allow to try to setup 'max_len_single_sentence'.
  1377. if value == self.model_max_length - self.num_special_tokens_to_add(pair=False) and self.verbose:
  1378. if not self.deprecation_warnings.get("max_len_single_sentence", False):
  1379. logger.warning(
  1380. "Setting 'max_len_single_sentence' is now deprecated. This value is automatically set up."
  1381. )
  1382. self.deprecation_warnings["max_len_single_sentence"] = True
  1383. else:
  1384. raise ValueError(
  1385. "Setting 'max_len_single_sentence' is now deprecated. This value is automatically set up."
  1386. )
  1387. @max_len_sentences_pair.setter
  1388. def max_len_sentences_pair(self, value) -> int:
  1389. # For backward compatibility, allow to try to setup 'max_len_sentences_pair'.
  1390. if value == self.model_max_length - self.num_special_tokens_to_add(pair=True) and self.verbose:
  1391. if not self.deprecation_warnings.get("max_len_sentences_pair", False):
  1392. logger.warning(
  1393. "Setting 'max_len_sentences_pair' is now deprecated. This value is automatically set up."
  1394. )
  1395. self.deprecation_warnings["max_len_sentences_pair"] = True
  1396. else:
  1397. raise ValueError("Setting 'max_len_sentences_pair' is now deprecated. This value is automatically set up.")
  1398. def _set_processor_class(self, processor_class: str):
  1399. """Sets processor class as an attribute."""
  1400. self._processor_class = processor_class
  1401. @property
  1402. def added_tokens_decoder(self) -> Dict[int, AddedToken]:
  1403. raise NotImplementedError()
  1404. def __repr__(self) -> str:
  1405. added_tokens_decoder_rep = "\n\t".join([f"{k}: {v.__repr__()}," for k, v in self.added_tokens_decoder.items()])
  1406. return (
  1407. f"{self.__class__.__name__}(name_or_path='{self.name_or_path}',"
  1408. f" vocab_size={self.vocab_size}, model_max_length={self.model_max_length}, is_fast={self.is_fast},"
  1409. f" padding_side='{self.padding_side}', truncation_side='{self.truncation_side}',"
  1410. f" special_tokens={self.special_tokens_map}, clean_up_tokenization_spaces={self.clean_up_tokenization_spaces}), "
  1411. " added_tokens_decoder={\n\t" + added_tokens_decoder_rep + "\n}"
  1412. )
  1413. def __len__(self) -> int:
  1414. raise NotImplementedError()
  1415. def get_vocab(self) -> Dict[str, int]:
  1416. """
  1417. Returns the vocabulary as a dictionary of token to index.
  1418. `tokenizer.get_vocab()[token]` is equivalent to `tokenizer.convert_tokens_to_ids(token)` when `token` is in the
  1419. vocab.
  1420. Returns:
  1421. `Dict[str, int]`: The vocabulary.
  1422. """
  1423. raise NotImplementedError()
  1424. def apply_chat_template(
  1425. self,
  1426. conversation: Union[List[Dict[str, str]], List[List[Dict[str, str]]]],
  1427. tools: Optional[List[Dict]] = None,
  1428. documents: Optional[List[Dict[str, str]]] = None,
  1429. chat_template: Optional[str] = None,
  1430. add_generation_prompt: bool = False,
  1431. continue_final_message: bool = False,
  1432. tokenize: bool = True,
  1433. padding: bool = False,
  1434. truncation: bool = False,
  1435. max_length: Optional[int] = None,
  1436. return_tensors: Optional[Union[str, TensorType]] = None,
  1437. return_dict: bool = False,
  1438. return_assistant_tokens_mask: bool = False,
  1439. tokenizer_kwargs: Optional[Dict[str, Any]] = None,
  1440. **kwargs,
  1441. ) -> Union[str, List[int], List[str], List[List[int]], BatchEncoding]:
  1442. """
  1443. Converts a list of dictionaries with `"role"` and `"content"` keys to a list of token
  1444. ids. This method is intended for use with chat models, and will read the tokenizer's chat_template attribute to
  1445. determine the format and control tokens to use when converting.
  1446. Args:
  1447. conversation (Union[List[Dict[str, str]], List[List[Dict[str, str]]]]): A list of dicts
  1448. with "role" and "content" keys, representing the chat history so far.
  1449. tools (`List[Dict]`, *optional*):
  1450. A list of tools (callable functions) that will be accessible to the model. If the template does not
  1451. support function calling, this argument will have no effect. Each tool should be passed as a JSON Schema,
  1452. giving the name, description and argument types for the tool. See our
  1453. [chat templating guide](https://huggingface.co/docs/transformers/main/en/chat_templating#automated-function-conversion-for-tool-use)
  1454. for more information.
  1455. documents (`List[Dict[str, str]]`, *optional*):
  1456. A list of dicts representing documents that will be accessible to the model if it is performing RAG
  1457. (retrieval-augmented generation). If the template does not support RAG, this argument will have no
  1458. effect. We recommend that each document should be a dict containing "title" and "text" keys. Please
  1459. see the RAG section of the [chat templating guide](https://huggingface.co/docs/transformers/main/en/chat_templating#arguments-for-RAG)
  1460. for examples of passing documents with chat templates.
  1461. chat_template (`str`, *optional*):
  1462. A Jinja template to use for this conversion. It is usually not necessary to pass anything to this
  1463. argument, as the model's template will be used by default.
  1464. add_generation_prompt (bool, *optional*):
  1465. If this is set, a prompt with the token(s) that indicate
  1466. the start of an assistant message will be appended to the formatted output. This is useful when you want to generate a response from the model.
  1467. Note that this argument will be passed to the chat template, and so it must be supported in the
  1468. template for this argument to have any effect.
  1469. continue_final_message (bool, *optional*):
  1470. If this is set, the chat will be formatted so that the final
  1471. message in the chat is open-ended, without any EOS tokens. The model will continue this message
  1472. rather than starting a new one. This allows you to "prefill" part of
  1473. the model's response for it. Cannot be used at the same time as `add_generation_prompt`.
  1474. tokenize (`bool`, defaults to `True`):
  1475. Whether to tokenize the output. If `False`, the output will be a string.
  1476. padding (`bool`, defaults to `False`):
  1477. Whether to pad sequences to the maximum length. Has no effect if tokenize is `False`.
  1478. truncation (`bool`, defaults to `False`):
  1479. Whether to truncate sequences at the maximum length. Has no effect if tokenize is `False`.
  1480. max_length (`int`, *optional*):
  1481. Maximum length (in tokens) to use for padding or truncation. Has no effect if tokenize is `False`. If
  1482. not specified, the tokenizer's `max_length` attribute will be used as a default.
  1483. return_tensors (`str` or [`~utils.TensorType`], *optional*):
  1484. If set, will return tensors of a particular framework. Has no effect if tokenize is `False`. Acceptable
  1485. values are:
  1486. - `'tf'`: Return TensorFlow `tf.Tensor` objects.
  1487. - `'pt'`: Return PyTorch `torch.Tensor` objects.
  1488. - `'np'`: Return NumPy `np.ndarray` objects.
  1489. - `'jax'`: Return JAX `jnp.ndarray` objects.
  1490. return_dict (`bool`, defaults to `False`):
  1491. Whether to return a dictionary with named outputs. Has no effect if tokenize is `False`.
  1492. tokenizer_kwargs (`Dict[str: Any]`, *optional*): Additional kwargs to pass to the tokenizer.
  1493. return_assistant_tokens_mask (`bool`, defaults to `False`):
  1494. Whether to return a mask of the assistant generated tokens. For tokens generated by the assistant,
  1495. the mask will contain 1. For user and system tokens, the mask will contain 0.
  1496. This functionality is only available for chat templates that support it via the `{% generation %}` keyword.
  1497. **kwargs: Additional kwargs to pass to the template renderer. Will be accessible by the chat template.
  1498. Returns:
  1499. `Union[List[int], Dict]`: A list of token ids representing the tokenized chat so far, including control tokens. This
  1500. output is ready to pass to the model, either directly or via methods like `generate()`. If `return_dict` is
  1501. set, will return a dict of tokenizer outputs instead.
  1502. """
  1503. if return_dict and not tokenize:
  1504. raise ValueError(
  1505. "`return_dict=True` is incompatible with `tokenize=False`, because there is no dict "
  1506. "of tokenizer outputs to return."
  1507. )
  1508. if return_assistant_tokens_mask and not return_dict:
  1509. raise ValueError("`return_assistant_tokens_mask=True` is incompatible with `return_dict=False`")
  1510. if tokenizer_kwargs is None:
  1511. tokenizer_kwargs = {}
  1512. chat_template = self.get_chat_template(chat_template, tools)
  1513. if return_assistant_tokens_mask and not re.search(r"\{\%-?\s*generation\s*-?\%\}", chat_template):
  1514. logger.warning_once(
  1515. "return_assistant_tokens_mask==True but chat template does not contain `{% generation %}` keyword."
  1516. )
  1517. # Compilation function uses a cache to avoid recompiling the same template
  1518. compiled_template = _compile_jinja_template(chat_template)
  1519. if isinstance(conversation, (list, tuple)) and (
  1520. isinstance(conversation[0], (list, tuple)) or hasattr(conversation[0], "messages")
  1521. ):
  1522. conversations = conversation
  1523. is_batched = True
  1524. else:
  1525. conversations = [conversation]
  1526. is_batched = False
  1527. if continue_final_message:
  1528. if add_generation_prompt:
  1529. raise ValueError(
  1530. "continue_final_message and add_generation_prompt are not compatible. Use continue_final_message when you want the model to continue the final message, and add_generation_prompt when you want to add a header that will prompt it to start a new assistant message instead."
  1531. )
  1532. if return_assistant_tokens_mask:
  1533. raise ValueError("continue_final_message is not compatible with return_assistant_tokens_mask.")
  1534. # We accept either JSON schemas or functions for tools. If we get functions, we convert them to schemas
  1535. if tools is not None:
  1536. tool_schemas = []
  1537. for tool in tools:
  1538. if isinstance(tool, dict):
  1539. tool_schemas.append(tool)
  1540. elif isfunction(tool):
  1541. tool_schemas.append(get_json_schema(tool))
  1542. else:
  1543. raise ValueError(
  1544. "Tools should either be a JSON schema, or a callable function with type hints "
  1545. "and a docstring suitable for auto-conversion to a schema."
  1546. )
  1547. else:
  1548. tool_schemas = None
  1549. if documents is not None:
  1550. for document in documents:
  1551. if not isinstance(document, dict):
  1552. raise TypeError("Documents should be a list of dicts with 'title' and 'text' keys!")
  1553. rendered = []
  1554. all_generation_indices = []
  1555. template_kwargs = {**self.special_tokens_map, **kwargs} # kwargs overwrite special tokens if both are present
  1556. for chat in conversations:
  1557. if hasattr(chat, "messages"):
  1558. # Indicates it's a Conversation object
  1559. chat = chat.messages
  1560. if return_assistant_tokens_mask:
  1561. rendered_chat, generation_indices = _render_with_assistant_indices(
  1562. compiled_template=compiled_template,
  1563. messages=chat,
  1564. tools=tool_schemas,
  1565. documents=documents,
  1566. add_generation_prompt=add_generation_prompt,
  1567. **template_kwargs,
  1568. )
  1569. all_generation_indices.append(generation_indices)
  1570. else:
  1571. rendered_chat = compiled_template.render(
  1572. messages=chat,
  1573. tools=tool_schemas,
  1574. documents=documents,
  1575. add_generation_prompt=add_generation_prompt,
  1576. **template_kwargs,
  1577. )
  1578. if continue_final_message:
  1579. final_message = chat[-1]["content"]
  1580. if isinstance(final_message, (list, tuple)):
  1581. final_message = final_message[-1]["text"]
  1582. final_message = final_message.strip()
  1583. rendered_chat = rendered_chat[: rendered_chat.rindex(final_message) + len(final_message)].rstrip()
  1584. rendered.append(rendered_chat)
  1585. if not is_batched:
  1586. rendered = rendered[0]
  1587. if tokenize:
  1588. out = self(
  1589. rendered,
  1590. padding=padding,
  1591. truncation=truncation,
  1592. max_length=max_length,
  1593. add_special_tokens=False,
  1594. return_tensors=return_tensors,
  1595. **tokenizer_kwargs,
  1596. )
  1597. if return_dict:
  1598. if return_assistant_tokens_mask:
  1599. assistant_masks = []
  1600. if is_batched or return_tensors:
  1601. input_ids = out["input_ids"]
  1602. else:
  1603. input_ids = [out["input_ids"]]
  1604. for i in range(len(input_ids)):
  1605. current_mask = [0] * len(input_ids[i])
  1606. for assistant_start_char, assistant_end_char in all_generation_indices[i]:
  1607. start_token = out.char_to_token(i, assistant_start_char)
  1608. end_token = out.char_to_token(i, assistant_end_char - 1)
  1609. if start_token is None:
  1610. # start_token is out of bounds maybe due to truncation.
  1611. break
  1612. for token_id in range(start_token, end_token + 1 if end_token else len(input_ids)):
  1613. current_mask[token_id] = 1
  1614. assistant_masks.append(current_mask)
  1615. out["assistant_masks"] = assistant_masks if is_batched else assistant_masks[0]
  1616. return out
  1617. else:
  1618. return out["input_ids"]
  1619. else:
  1620. return rendered
  1621. def get_chat_template(self, chat_template: Optional[str] = None, tools: Optional[List[Dict]] = None) -> str:
  1622. """
  1623. Retrieve the chat template string used for tokenizing chat messages. This template is used
  1624. internally by the `apply_chat_template` method and can also be used externally to retrieve the model's chat
  1625. template for better generation tracking.
  1626. Args:
  1627. chat_template (`str`, *optional*):
  1628. A Jinja template or the name of a template to use for this conversion.
  1629. It is usually not necessary to pass anything to this argument,
  1630. as the model's template will be used by default.
  1631. tools (`List[Dict]`, *optional*):
  1632. A list of tools (callable functions) that will be accessible to the model. If the template does not
  1633. support function calling, this argument will have no effect. Each tool should be passed as a JSON Schema,
  1634. giving the name, description and argument types for the tool. See our
  1635. [chat templating guide](https://huggingface.co/docs/transformers/main/en/chat_templating#automated-function-conversion-for-tool-use)
  1636. for more information.
  1637. Returns:
  1638. `str`: The chat template string.
  1639. """
  1640. # First, handle the cases when the model has a dict of multiple templates
  1641. if isinstance(self.chat_template, dict):
  1642. template_dict = self.chat_template
  1643. if chat_template is not None and chat_template in template_dict:
  1644. # The user can pass the name of a template to the chat template argument instead of an entire template
  1645. chat_template = template_dict[chat_template]
  1646. elif chat_template is None:
  1647. if tools is not None and "tool_use" in template_dict:
  1648. chat_template = template_dict["tool_use"]
  1649. elif "default" in template_dict:
  1650. chat_template = template_dict["default"]
  1651. else:
  1652. raise ValueError(
  1653. "This model has multiple chat templates with no default specified! Please either pass a chat "
  1654. "template or the name of the template you wish to use to the `chat_template` argument. Available "
  1655. f"template names are {sorted(template_dict.keys())}."
  1656. )
  1657. elif chat_template is None:
  1658. # These are the cases when the model has a single template
  1659. # priority: `chat_template` argument > `tokenizer.chat_template`
  1660. if self.chat_template is not None:
  1661. chat_template = self.chat_template
  1662. else:
  1663. raise ValueError(
  1664. "Cannot use chat template functions because tokenizer.chat_template is not set and no template "
  1665. "argument was passed! For information about writing templates and setting the "
  1666. "tokenizer.chat_template attribute, please see the documentation at "
  1667. "https://huggingface.co/docs/transformers/main/en/chat_templating"
  1668. )
  1669. return chat_template
  1670. @classmethod
  1671. def from_pretrained(
  1672. cls,
  1673. pretrained_model_name_or_path: Union[str, os.PathLike],
  1674. *init_inputs,
  1675. cache_dir: Optional[Union[str, os.PathLike]] = None,
  1676. force_download: bool = False,
  1677. local_files_only: bool = False,
  1678. token: Optional[Union[str, bool]] = None,
  1679. revision: str = "main",
  1680. trust_remote_code=False,
  1681. **kwargs,
  1682. ):
  1683. r"""
  1684. Instantiate a [`~tokenization_utils_base.PreTrainedTokenizerBase`] (or a derived class) from a predefined
  1685. tokenizer.
  1686. Args:
  1687. pretrained_model_name_or_path (`str` or `os.PathLike`):
  1688. Can be either:
  1689. - A string, the *model id* of a predefined tokenizer hosted inside a model repo on huggingface.co.
  1690. - A path to a *directory* containing vocabulary files required by the tokenizer, for instance saved
  1691. using the [`~tokenization_utils_base.PreTrainedTokenizerBase.save_pretrained`] method, e.g.,
  1692. `./my_model_directory/`.
  1693. - (**Deprecated**, not applicable to all derived classes) A path or url to a single saved vocabulary
  1694. file (if and only if the tokenizer only requires a single vocabulary file like Bert or XLNet), e.g.,
  1695. `./my_model_directory/vocab.txt`.
  1696. cache_dir (`str` or `os.PathLike`, *optional*):
  1697. Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the
  1698. standard cache should not be used.
  1699. force_download (`bool`, *optional*, defaults to `False`):
  1700. Whether or not to force the (re-)download the vocabulary files and override the cached versions if they
  1701. exist.
  1702. resume_download:
  1703. Deprecated and ignored. All downloads are now resumed by default when possible.
  1704. Will be removed in v5 of Transformers.
  1705. proxies (`Dict[str, str]`, *optional*):
  1706. A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
  1707. 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
  1708. token (`str` or *bool*, *optional*):
  1709. The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
  1710. when running `huggingface-cli login` (stored in `~/.huggingface`).
  1711. local_files_only (`bool`, *optional*, defaults to `False`):
  1712. Whether or not to only rely on local files and not to attempt to download any files.
  1713. revision (`str`, *optional*, defaults to `"main"`):
  1714. The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
  1715. git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
  1716. identifier allowed by git.
  1717. subfolder (`str`, *optional*):
  1718. In case the relevant files are located inside a subfolder of the model repo on huggingface.co (e.g. for
  1719. facebook/rag-token-base), specify it here.
  1720. inputs (additional positional arguments, *optional*):
  1721. Will be passed along to the Tokenizer `__init__` method.
  1722. trust_remote_code (`bool`, *optional*, defaults to `False`):
  1723. Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
  1724. should only be set to `True` for repositories you trust and in which you have read the code, as it will
  1725. execute code present on the Hub on your local machine.
  1726. kwargs (additional keyword arguments, *optional*):
  1727. Will be passed to the Tokenizer `__init__` method. Can be used to set special tokens like `bos_token`,
  1728. `eos_token`, `unk_token`, `sep_token`, `pad_token`, `cls_token`, `mask_token`,
  1729. `additional_special_tokens`. See parameters in the `__init__` for more details.
  1730. <Tip>
  1731. Passing `token=True` is required when you want to use a private model.
  1732. </Tip>
  1733. Examples:
  1734. ```python
  1735. # We can't instantiate directly the base class *PreTrainedTokenizerBase* so let's show our examples on a derived class: BertTokenizer
  1736. # Download vocabulary from huggingface.co and cache.
  1737. tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased")
  1738. # Download vocabulary from huggingface.co (user-uploaded) and cache.
  1739. tokenizer = BertTokenizer.from_pretrained("dbmdz/bert-base-german-cased")
  1740. # If vocabulary files are in a directory (e.g. tokenizer was saved using *save_pretrained('./test/saved_model/')*)
  1741. tokenizer = BertTokenizer.from_pretrained("./test/saved_model/")
  1742. # If the tokenizer uses a single vocabulary file, you can point directly to this file
  1743. tokenizer = BertTokenizer.from_pretrained("./test/saved_model/my_vocab.txt")
  1744. # You can link tokens to special vocabulary when instantiating
  1745. tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased", unk_token="<unk>")
  1746. # You should be sure '<unk>' is in the vocabulary when doing that.
  1747. # Otherwise use tokenizer.add_special_tokens({'unk_token': '<unk>'}) instead)
  1748. assert tokenizer.unk_token == "<unk>"
  1749. ```"""
  1750. resume_download = kwargs.pop("resume_download", None)
  1751. proxies = kwargs.pop("proxies", None)
  1752. use_auth_token = kwargs.pop("use_auth_token", None)
  1753. subfolder = kwargs.pop("subfolder", None)
  1754. from_pipeline = kwargs.pop("_from_pipeline", None)
  1755. from_auto_class = kwargs.pop("_from_auto", False)
  1756. commit_hash = kwargs.pop("_commit_hash", None)
  1757. gguf_file = kwargs.get("gguf_file", None)
  1758. if use_auth_token is not None:
  1759. warnings.warn(
  1760. "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
  1761. FutureWarning,
  1762. )
  1763. if token is not None:
  1764. raise ValueError(
  1765. "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
  1766. )
  1767. token = use_auth_token
  1768. user_agent = {"file_type": "tokenizer", "from_auto_class": from_auto_class, "is_fast": "Fast" in cls.__name__}
  1769. if from_pipeline is not None:
  1770. user_agent["using_pipeline"] = from_pipeline
  1771. if is_offline_mode() and not local_files_only:
  1772. logger.info("Offline mode: forcing local_files_only=True")
  1773. local_files_only = True
  1774. pretrained_model_name_or_path = str(pretrained_model_name_or_path)
  1775. vocab_files = {}
  1776. init_configuration = {}
  1777. is_local = os.path.isdir(pretrained_model_name_or_path)
  1778. single_file_id = None
  1779. if os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
  1780. if len(cls.vocab_files_names) > 1 and not gguf_file:
  1781. raise ValueError(
  1782. f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is not "
  1783. "supported for this tokenizer. Use a model identifier or the path to a directory instead."
  1784. )
  1785. warnings.warn(
  1786. f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is deprecated and "
  1787. "won't be possible anymore in v5. Use a model identifier or the path to a directory instead.",
  1788. FutureWarning,
  1789. )
  1790. file_id = list(cls.vocab_files_names.keys())[0]
  1791. vocab_files[file_id] = pretrained_model_name_or_path
  1792. single_file_id = file_id
  1793. else:
  1794. if gguf_file:
  1795. vocab_files["vocab_file"] = gguf_file
  1796. else:
  1797. # At this point pretrained_model_name_or_path is either a directory or a model identifier name
  1798. additional_files_names = {
  1799. "added_tokens_file": ADDED_TOKENS_FILE, # kept only for legacy
  1800. "special_tokens_map_file": SPECIAL_TOKENS_MAP_FILE, # kept only for legacy
  1801. "tokenizer_config_file": TOKENIZER_CONFIG_FILE,
  1802. # tokenizer_file used to initialize a slow from a fast. Properly copy the `addedTokens` instead of adding in random orders
  1803. "tokenizer_file": FULL_TOKENIZER_FILE,
  1804. }
  1805. vocab_files = {**cls.vocab_files_names, **additional_files_names}
  1806. if "tokenizer_file" in vocab_files:
  1807. # Try to get the tokenizer config to see if there are versioned tokenizer files.
  1808. fast_tokenizer_file = FULL_TOKENIZER_FILE
  1809. resolved_config_file = cached_file(
  1810. pretrained_model_name_or_path,
  1811. TOKENIZER_CONFIG_FILE,
  1812. cache_dir=cache_dir,
  1813. force_download=force_download,
  1814. resume_download=resume_download,
  1815. proxies=proxies,
  1816. token=token,
  1817. revision=revision,
  1818. local_files_only=local_files_only,
  1819. subfolder=subfolder,
  1820. user_agent=user_agent,
  1821. _raise_exceptions_for_gated_repo=False,
  1822. _raise_exceptions_for_missing_entries=False,
  1823. _raise_exceptions_for_connection_errors=False,
  1824. _commit_hash=commit_hash,
  1825. )
  1826. commit_hash = extract_commit_hash(resolved_config_file, commit_hash)
  1827. if resolved_config_file is not None:
  1828. with open(resolved_config_file, encoding="utf-8") as reader:
  1829. tokenizer_config = json.load(reader)
  1830. if "fast_tokenizer_files" in tokenizer_config:
  1831. fast_tokenizer_file = get_fast_tokenizer_file(tokenizer_config["fast_tokenizer_files"])
  1832. vocab_files["tokenizer_file"] = fast_tokenizer_file
  1833. # Get files from url, cache, or disk depending on the case
  1834. resolved_vocab_files = {}
  1835. unresolved_files = []
  1836. for file_id, file_path in vocab_files.items():
  1837. if file_path is None:
  1838. resolved_vocab_files[file_id] = None
  1839. elif single_file_id == file_id:
  1840. if os.path.isfile(file_path):
  1841. resolved_vocab_files[file_id] = file_path
  1842. elif is_remote_url(file_path):
  1843. resolved_vocab_files[file_id] = download_url(file_path, proxies=proxies)
  1844. else:
  1845. resolved_vocab_files[file_id] = cached_file(
  1846. pretrained_model_name_or_path,
  1847. file_path,
  1848. cache_dir=cache_dir,
  1849. force_download=force_download,
  1850. proxies=proxies,
  1851. resume_download=resume_download,
  1852. local_files_only=local_files_only,
  1853. token=token,
  1854. user_agent=user_agent,
  1855. revision=revision,
  1856. subfolder=subfolder,
  1857. _raise_exceptions_for_gated_repo=False,
  1858. _raise_exceptions_for_missing_entries=False,
  1859. _raise_exceptions_for_connection_errors=False,
  1860. _commit_hash=commit_hash,
  1861. )
  1862. commit_hash = extract_commit_hash(resolved_vocab_files[file_id], commit_hash)
  1863. if len(unresolved_files) > 0:
  1864. logger.info(
  1865. f"Can't load following files from cache: {unresolved_files} and cannot check if these "
  1866. "files are necessary for the tokenizer to operate."
  1867. )
  1868. # If one passes a GGUF file path to `gguf_file` there is no need for this check as the tokenizer will be
  1869. # loaded directly from the GGUF file.
  1870. if all(full_file_name is None for full_file_name in resolved_vocab_files.values()) and not gguf_file:
  1871. raise EnvironmentError(
  1872. f"Can't load tokenizer for '{pretrained_model_name_or_path}'. If you were trying to load it from "
  1873. "'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
  1874. f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
  1875. f"containing all relevant files for a {cls.__name__} tokenizer."
  1876. )
  1877. for file_id, file_path in vocab_files.items():
  1878. if file_id not in resolved_vocab_files:
  1879. continue
  1880. if is_local:
  1881. logger.info(f"loading file {file_path}")
  1882. else:
  1883. logger.info(f"loading file {file_path} from cache at {resolved_vocab_files[file_id]}")
  1884. return cls._from_pretrained(
  1885. resolved_vocab_files,
  1886. pretrained_model_name_or_path,
  1887. init_configuration,
  1888. *init_inputs,
  1889. token=token,
  1890. cache_dir=cache_dir,
  1891. local_files_only=local_files_only,
  1892. _commit_hash=commit_hash,
  1893. _is_local=is_local,
  1894. trust_remote_code=trust_remote_code,
  1895. **kwargs,
  1896. )
  1897. @classmethod
  1898. def _from_pretrained(
  1899. cls,
  1900. resolved_vocab_files,
  1901. pretrained_model_name_or_path,
  1902. init_configuration,
  1903. *init_inputs,
  1904. token=None,
  1905. cache_dir=None,
  1906. local_files_only=False,
  1907. _commit_hash=None,
  1908. _is_local=False,
  1909. trust_remote_code=False,
  1910. **kwargs,
  1911. ):
  1912. # We instantiate fast tokenizers based on a slow tokenizer if we don't have access to the tokenizer.json
  1913. # file or if `from_slow` is set to True.
  1914. from_slow = kwargs.get("from_slow", False)
  1915. gguf_file = kwargs.get("gguf_file", None)
  1916. has_tokenizer_file = resolved_vocab_files.get("tokenizer_file", None) is not None
  1917. # If one passes a GGUF file path to `gguf_file` there is no need for this check as the tokenizer will be
  1918. # loaded directly from the GGUF file.
  1919. if (from_slow or not has_tokenizer_file) and cls.slow_tokenizer_class is not None and not gguf_file:
  1920. slow_tokenizer = (cls.slow_tokenizer_class)._from_pretrained(
  1921. copy.deepcopy(resolved_vocab_files),
  1922. pretrained_model_name_or_path,
  1923. copy.deepcopy(init_configuration),
  1924. *init_inputs,
  1925. token=token,
  1926. cache_dir=cache_dir,
  1927. local_files_only=local_files_only,
  1928. _commit_hash=_commit_hash,
  1929. **(copy.deepcopy(kwargs)),
  1930. )
  1931. else:
  1932. slow_tokenizer = None
  1933. # Prepare tokenizer initialization kwargs
  1934. # Did we saved some inputs and kwargs to reload ?
  1935. tokenizer_config_file = resolved_vocab_files.pop("tokenizer_config_file", None)
  1936. if tokenizer_config_file is not None:
  1937. with open(tokenizer_config_file, encoding="utf-8") as tokenizer_config_handle:
  1938. init_kwargs = json.load(tokenizer_config_handle)
  1939. # First attempt. We get tokenizer_class from tokenizer_config to check mismatch between tokenizers.
  1940. config_tokenizer_class = init_kwargs.get("tokenizer_class")
  1941. init_kwargs.pop("tokenizer_class", None)
  1942. if not has_tokenizer_file:
  1943. init_kwargs.pop("tokenizer_file", None)
  1944. saved_init_inputs = init_kwargs.pop("init_inputs", ())
  1945. if not init_inputs:
  1946. init_inputs = saved_init_inputs
  1947. else:
  1948. config_tokenizer_class = None
  1949. init_kwargs = init_configuration
  1950. if not _is_local:
  1951. if "auto_map" in init_kwargs:
  1952. # For backward compatibility with odl format.
  1953. if isinstance(init_kwargs["auto_map"], (tuple, list)):
  1954. init_kwargs["auto_map"] = {"AutoTokenizer": init_kwargs["auto_map"]}
  1955. init_kwargs["auto_map"] = add_model_info_to_auto_map(
  1956. init_kwargs["auto_map"], pretrained_model_name_or_path
  1957. )
  1958. if "custom_pipelines" in init_kwargs:
  1959. init_kwargs["custom_pipelines"] = add_model_info_to_custom_pipelines(
  1960. init_kwargs["custom_pipelines"], pretrained_model_name_or_path
  1961. )
  1962. if config_tokenizer_class is None:
  1963. # Matt: This entire block is only used to decide if the tokenizer class matches the class in the repo.
  1964. # If not, it raises a warning, but otherwise continues. Since we mostly load tokenizers with
  1965. # AutoTokenizer these days, it seems like a lot of work (and a source of bugs) for little gain.
  1966. # Maybe we can just remove this entirely?
  1967. from .models.auto.configuration_auto import AutoConfig # tests_ignore
  1968. # Second attempt. If we have not yet found tokenizer_class, let's try to use the config.
  1969. try:
  1970. config = AutoConfig.from_pretrained(
  1971. pretrained_model_name_or_path,
  1972. token=token,
  1973. cache_dir=cache_dir,
  1974. local_files_only=local_files_only,
  1975. trust_remote_code=trust_remote_code,
  1976. _commit_hash=_commit_hash,
  1977. )
  1978. config_tokenizer_class = config.tokenizer_class
  1979. except (OSError, ValueError, KeyError):
  1980. # skip if an error occurred.
  1981. config = None
  1982. if config_tokenizer_class is None:
  1983. # Third attempt. If we have not yet found the original type of the tokenizer,
  1984. # we are loading we see if we can infer it from the type of the configuration file
  1985. from .models.auto.tokenization_auto import TOKENIZER_MAPPING_NAMES # tests_ignore
  1986. if hasattr(config, "model_type"):
  1987. model_type = config.model_type
  1988. else:
  1989. # Fallback: use pattern matching on the string.
  1990. model_type = None
  1991. for pattern in TOKENIZER_MAPPING_NAMES.keys():
  1992. if pattern in str(pretrained_model_name_or_path):
  1993. model_type = pattern
  1994. break
  1995. if model_type is not None:
  1996. config_tokenizer_class, config_tokenizer_class_fast = TOKENIZER_MAPPING_NAMES.get(
  1997. model_type, (None, None)
  1998. )
  1999. if config_tokenizer_class is None:
  2000. config_tokenizer_class = config_tokenizer_class_fast
  2001. if config_tokenizer_class is not None:
  2002. if cls.__name__.replace("Fast", "") != config_tokenizer_class.replace("Fast", ""):
  2003. logger.warning(
  2004. "The tokenizer class you load from this checkpoint is not the same type as the class this"
  2005. " function is called from. It may result in unexpected tokenization. \nThe tokenizer class you"
  2006. f" load from this checkpoint is '{config_tokenizer_class}'. \nThe class this function is called"
  2007. f" from is '{cls.__name__}'."
  2008. )
  2009. # Update with newly provided kwargs
  2010. init_kwargs.update(kwargs)
  2011. # Merge resolved_vocab_files arguments in init_kwargs.
  2012. added_tokens_file = resolved_vocab_files.pop("added_tokens_file", None)
  2013. special_tokens_map_file = resolved_vocab_files.pop("special_tokens_map_file", None)
  2014. for args_name, file_path in resolved_vocab_files.items():
  2015. if args_name not in init_kwargs:
  2016. init_kwargs[args_name] = file_path
  2017. tokenizer_file = resolved_vocab_files.pop("tokenizer_file", None)
  2018. if slow_tokenizer is not None:
  2019. init_kwargs["__slow_tokenizer"] = slow_tokenizer
  2020. init_kwargs["name_or_path"] = pretrained_model_name_or_path
  2021. #### Handle tokenizer serialization of added and special tokens
  2022. added_tokens_decoder: Dict[int, AddedToken] = {}
  2023. added_tokens_map: Dict[str, AddedToken] = {}
  2024. # if we have info on the slow added tokens
  2025. if "added_tokens_decoder" in init_kwargs:
  2026. for idx, token in init_kwargs["added_tokens_decoder"].items():
  2027. if isinstance(token, dict):
  2028. token = AddedToken(**token)
  2029. if isinstance(token, AddedToken):
  2030. added_tokens_decoder[int(idx)] = token
  2031. added_tokens_map[str(token)] = token
  2032. else:
  2033. raise ValueError(
  2034. f"Found a {token.__class__} in the saved `added_tokens_decoder`, should be a dictionary or an AddedToken instance"
  2035. )
  2036. else:
  2037. # begin legacy: read the added_tokens_file and update kwargs with special_tokens_map if modified
  2038. if special_tokens_map_file is not None:
  2039. with open(special_tokens_map_file, encoding="utf-8") as special_tokens_map_handle:
  2040. special_tokens_map = json.load(special_tokens_map_handle)
  2041. for key, value in special_tokens_map.items():
  2042. if key in kwargs and kwargs[key]:
  2043. # This value has already been redefined by the kwargs
  2044. # We keep this new value and ignore the one stored in the special_tokens_map_file
  2045. continue
  2046. if isinstance(value, dict):
  2047. value["special"] = True
  2048. value = AddedToken(**value)
  2049. elif key == "additional_special_tokens" and isinstance(value, list):
  2050. additional_special_tokens = init_kwargs.pop("additional_special_tokens", []) or []
  2051. for token in value:
  2052. if isinstance(token, dict):
  2053. token["special"] = True
  2054. token = AddedToken(**token)
  2055. if token not in additional_special_tokens:
  2056. additional_special_tokens.append(token)
  2057. value = additional_special_tokens
  2058. init_kwargs[key] = value
  2059. # slow -> slow|fast, legacy: convert the `"added_tokens.json"` file to `added_tokens_decoder`.
  2060. # this is for legacy purpose. We don't add the tokens after init for efficiency.
  2061. if added_tokens_file is not None:
  2062. special_tokens = []
  2063. for key in cls.SPECIAL_TOKENS_ATTRIBUTES & init_kwargs.keys():
  2064. if init_kwargs[key] is not None:
  2065. if key == "additional_special_tokens":
  2066. special_tokens += [str(token) for token in init_kwargs[key]]
  2067. else:
  2068. special_tokens.append(str(init_kwargs[key]))
  2069. with open(added_tokens_file, encoding="utf-8") as added_tokens_handle:
  2070. added_tok_encoder = json.load(added_tokens_handle)
  2071. for str_token, index in added_tok_encoder.items():
  2072. # if index not in added_tokens_decoder and str_token not in added_tokens_map:
  2073. special = str_token in special_tokens
  2074. added_tokens_decoder[index] = AddedToken(
  2075. str_token, rstrip=False, lstrip=False, normalized=not special, special=special
  2076. )
  2077. added_tokens_map[str(token)] = added_tokens_decoder[index]
  2078. # allows converting a fast -> slow: add the `tokenizer.json`'s `"added_tokens"` to the slow tokenizer
  2079. # if `tokenizer_config.json` is `None`
  2080. if tokenizer_file is not None:
  2081. # This is for slow so can be done before
  2082. with open(tokenizer_file, encoding="utf-8") as tokenizer_file_handle:
  2083. tokenizer_file_handle = json.load(tokenizer_file_handle)
  2084. added_tokens = tokenizer_file_handle.pop("added_tokens")
  2085. for serialized_tokens in added_tokens:
  2086. idx = serialized_tokens.pop("id")
  2087. added_tokens_decoder[idx] = AddedToken(**serialized_tokens)
  2088. added_tokens_map[str(added_tokens_decoder[idx])] = added_tokens_decoder[idx]
  2089. # end legacy
  2090. # Passing AddedTokens and not strings to the class to prevent it from casting the string to a different AddedToken
  2091. # convert {'__type': 'AddedToken', 'content': '<ent>', 'lstrip': False, 'normalized': True, ...} to AddedTokens
  2092. init_kwargs["added_tokens_decoder"] = added_tokens_decoder
  2093. init_kwargs = cls.convert_added_tokens(init_kwargs, save=False)
  2094. for key in cls.SPECIAL_TOKENS_ATTRIBUTES & init_kwargs.keys():
  2095. if added_tokens_map != {} and init_kwargs[key] is not None:
  2096. if key != "additional_special_tokens":
  2097. init_kwargs[key] = added_tokens_map.get(str(init_kwargs[key]), init_kwargs[key])
  2098. # Instantiate the tokenizer.
  2099. try:
  2100. tokenizer = cls(*init_inputs, **init_kwargs)
  2101. except import_protobuf_decode_error():
  2102. logger.info(
  2103. "Unable to load tokenizer model from SPM, loading from TikToken will be attempted instead."
  2104. "(Google protobuf error: Tried to load SPM model with non-SPM vocab file).",
  2105. )
  2106. return False
  2107. except RuntimeError as e:
  2108. if "sentencepiece_processor.cc" in str(e):
  2109. logger.info(
  2110. "Unable to load tokenizer model from SPM, loading from TikToken will be attempted instead."
  2111. "(SentencePiece RuntimeError: Tried to load SPM model with non-SPM vocab file).",
  2112. )
  2113. return False
  2114. except OSError:
  2115. raise OSError(
  2116. "Unable to load vocabulary from file. "
  2117. "Please check that the provided vocabulary is accessible and not corrupted."
  2118. )
  2119. except RuntimeError as e:
  2120. if "sentencepiece_processor.cc" in str(e):
  2121. logger.info(
  2122. "Unable to load tokenizer model from SPM, loading from TikToken will be attempted instead."
  2123. "(SentencePiece RuntimeError: Tried to load SPM model with non-SPM vocab file).",
  2124. )
  2125. return False
  2126. if added_tokens_decoder != {} and max(list(added_tokens_decoder.keys())[-1], 0) > tokenizer.vocab_size:
  2127. logger.info(
  2128. "Special tokens have been added in the vocabulary, make sure the associated word embeddings are"
  2129. " fine-tuned or trained."
  2130. )
  2131. return tokenizer
  2132. @staticmethod
  2133. def _eventually_correct_t5_max_length(pretrained_model_name_or_path, max_model_length, init_max_model_length):
  2134. # This method should be deleted in Transformers v5
  2135. # Its only purpose is to potentially throw a warning
  2136. # that incorrectly defined max lengths of T5's tokenizer are used
  2137. # which we will correct in Transformers v5.
  2138. return max_model_length
  2139. @classmethod
  2140. def convert_added_tokens(cls, obj: Union[AddedToken, Any], save=False, add_type_field=True):
  2141. if isinstance(obj, dict) and "__type" in obj and obj["__type"] == "AddedToken":
  2142. obj.pop("__type")
  2143. return AddedToken(**obj)
  2144. if isinstance(obj, AddedToken) and save:
  2145. obj = obj.__getstate__()
  2146. if add_type_field:
  2147. obj["__type"] = "AddedToken"
  2148. else:
  2149. # Don't save "special" for previous tokenizers
  2150. obj.pop("special")
  2151. return obj
  2152. elif isinstance(obj, (list, tuple)):
  2153. return [cls.convert_added_tokens(o, save=save, add_type_field=add_type_field) for o in obj]
  2154. elif isinstance(obj, dict):
  2155. return {k: cls.convert_added_tokens(v, save=save, add_type_field=add_type_field) for k, v in obj.items()}
  2156. return obj
  2157. def save_pretrained(
  2158. self,
  2159. save_directory: Union[str, os.PathLike],
  2160. legacy_format: Optional[bool] = None,
  2161. filename_prefix: Optional[str] = None,
  2162. push_to_hub: bool = False,
  2163. **kwargs,
  2164. ) -> Tuple[str]:
  2165. """
  2166. Save the full tokenizer state.
  2167. This method make sure the full tokenizer can then be re-loaded using the
  2168. [`~tokenization_utils_base.PreTrainedTokenizer.from_pretrained`] class method..
  2169. Warning,None This won't save modifications you may have applied to the tokenizer after the instantiation (for
  2170. instance, modifying `tokenizer.do_lower_case` after creation).
  2171. Args:
  2172. save_directory (`str` or `os.PathLike`): The path to a directory where the tokenizer will be saved.
  2173. legacy_format (`bool`, *optional*):
  2174. Only applicable for a fast tokenizer. If unset (default), will save the tokenizer in the unified JSON
  2175. format as well as in legacy format if it exists, i.e. with tokenizer specific vocabulary and a separate
  2176. added_tokens files.
  2177. If `False`, will only save the tokenizer in the unified JSON format. This format is incompatible with
  2178. "slow" tokenizers (not powered by the *tokenizers* library), so the tokenizer will not be able to be
  2179. loaded in the corresponding "slow" tokenizer.
  2180. If `True`, will save the tokenizer in legacy format. If the "slow" tokenizer doesn't exits, a value
  2181. error is raised.
  2182. filename_prefix (`str`, *optional*):
  2183. A prefix to add to the names of the files saved by the tokenizer.
  2184. push_to_hub (`bool`, *optional*, defaults to `False`):
  2185. Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
  2186. repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
  2187. namespace).
  2188. kwargs (`Dict[str, Any]`, *optional*):
  2189. Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
  2190. Returns:
  2191. A tuple of `str`: The files saved.
  2192. """
  2193. use_auth_token = kwargs.pop("use_auth_token", None)
  2194. if use_auth_token is not None:
  2195. warnings.warn(
  2196. "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
  2197. FutureWarning,
  2198. )
  2199. if kwargs.get("token", None) is not None:
  2200. raise ValueError(
  2201. "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
  2202. )
  2203. kwargs["token"] = use_auth_token
  2204. if os.path.isfile(save_directory):
  2205. logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
  2206. return
  2207. os.makedirs(save_directory, exist_ok=True)
  2208. if push_to_hub:
  2209. commit_message = kwargs.pop("commit_message", None)
  2210. repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
  2211. repo_id = self._create_repo(repo_id, **kwargs)
  2212. files_timestamps = self._get_files_timestamps(save_directory)
  2213. special_tokens_map_file = os.path.join(
  2214. save_directory, (filename_prefix + "-" if filename_prefix else "") + SPECIAL_TOKENS_MAP_FILE
  2215. )
  2216. tokenizer_config_file = os.path.join(
  2217. save_directory, (filename_prefix + "-" if filename_prefix else "") + TOKENIZER_CONFIG_FILE
  2218. )
  2219. tokenizer_config = copy.deepcopy(self.init_kwargs)
  2220. # Let's save the init kwargs
  2221. target_keys = set(self.init_kwargs.keys())
  2222. # Let's save the special tokens map (only the strings)
  2223. target_keys.update(["model_max_length", "clean_up_tokenization_spaces"])
  2224. for k in target_keys:
  2225. if hasattr(self, k):
  2226. tokenizer_config[k] = getattr(self, k)
  2227. # Let's make sure we properly save the special tokens.
  2228. tokenizer_config.update(self.special_tokens_map)
  2229. if self.chat_template is not None:
  2230. if isinstance(self.chat_template, dict):
  2231. # Chat template dicts are saved to the config as lists of dicts with fixed key names.
  2232. # They will be reconstructed as a single dict during loading.
  2233. tokenizer_config["chat_template"] = [{"name": k, "template": v} for k, v in self.chat_template.items()]
  2234. else:
  2235. tokenizer_config["chat_template"] = self.chat_template
  2236. if len(self.init_inputs) > 0:
  2237. tokenizer_config["init_inputs"] = copy.deepcopy(self.init_inputs)
  2238. for file_id in self.vocab_files_names.keys():
  2239. tokenizer_config.pop(file_id, None)
  2240. # no typefields, this way old fast and slow can load it
  2241. tokenizer_config = self.convert_added_tokens(tokenizer_config, add_type_field=True, save=True)
  2242. # Process added tokens seperatly: allows previous versions to ignore it!
  2243. added_tokens = {}
  2244. for key, value in self.added_tokens_decoder.items():
  2245. added_tokens[key] = value.__getstate__()
  2246. tokenizer_config["added_tokens_decoder"] = added_tokens
  2247. # Add tokenizer class to the tokenizer config to be able to reload it with from_pretrained
  2248. tokenizer_class = self.__class__.__name__
  2249. # Remove the Fast at the end unless we have a special `PreTrainedTokenizerFast`
  2250. if tokenizer_class.endswith("Fast") and tokenizer_class != "PreTrainedTokenizerFast":
  2251. tokenizer_class = tokenizer_class[:-4]
  2252. tokenizer_config["tokenizer_class"] = tokenizer_class
  2253. if getattr(self, "_auto_map", None) is not None:
  2254. tokenizer_config["auto_map"] = self._auto_map
  2255. if getattr(self, "_processor_class", None) is not None:
  2256. tokenizer_config["processor_class"] = self._processor_class
  2257. # If we have a custom model, we copy the file defining it in the folder and set the attributes so it can be
  2258. # loaded from the Hub.
  2259. if self._auto_class is not None:
  2260. custom_object_save(self, save_directory, config=tokenizer_config)
  2261. # remove private information
  2262. if "name_or_path" in tokenizer_config:
  2263. tokenizer_config.pop("name_or_path")
  2264. tokenizer_config.pop("special_tokens_map_file", None)
  2265. tokenizer_config.pop("tokenizer_file", None)
  2266. if "device_map" in tokenizer_config:
  2267. tokenizer_config.pop("device_map")
  2268. with open(tokenizer_config_file, "w", encoding="utf-8") as f:
  2269. out_str = json.dumps(tokenizer_config, indent=2, sort_keys=True, ensure_ascii=False) + "\n"
  2270. f.write(out_str)
  2271. logger.info(f"tokenizer config file saved in {tokenizer_config_file}")
  2272. # Sanitize AddedTokens in special_tokens_map
  2273. # kept for forward compatibility, will be removed in transoformers 5. Typefields are not saved for FC, special should not be save either
  2274. write_dict = self.convert_added_tokens(self.special_tokens_map_extended, save=True, add_type_field=False)
  2275. with open(special_tokens_map_file, "w", encoding="utf-8") as f:
  2276. out_str = json.dumps(write_dict, indent=2, sort_keys=True, ensure_ascii=False) + "\n"
  2277. f.write(out_str)
  2278. logger.info(f"Special tokens file saved in {special_tokens_map_file}")
  2279. file_names = (tokenizer_config_file, special_tokens_map_file)
  2280. save_files = self._save_pretrained(
  2281. save_directory=save_directory,
  2282. file_names=file_names,
  2283. legacy_format=legacy_format,
  2284. filename_prefix=filename_prefix,
  2285. )
  2286. if push_to_hub:
  2287. self._upload_modified_files(
  2288. save_directory,
  2289. repo_id,
  2290. files_timestamps,
  2291. commit_message=commit_message,
  2292. token=kwargs.get("token"),
  2293. )
  2294. return save_files
  2295. def _save_pretrained(
  2296. self,
  2297. save_directory: Union[str, os.PathLike],
  2298. file_names: Tuple[str],
  2299. legacy_format: Optional[bool] = None,
  2300. filename_prefix: Optional[str] = None,
  2301. ) -> Tuple[str]:
  2302. """
  2303. Save a tokenizer using the slow-tokenizer/legacy format: vocabulary + added tokens.
  2304. Fast tokenizers can also be saved in a unique JSON file containing {config + vocab + added-tokens} using the
  2305. specific [`~tokenization_utils_fast.PreTrainedTokenizerFast._save_pretrained`]
  2306. """
  2307. if legacy_format is False:
  2308. raise ValueError(
  2309. "Only fast tokenizers (instances of PreTrainedTokenizerFast) can be saved in non legacy format."
  2310. )
  2311. save_directory = str(save_directory)
  2312. added_tokens_file = os.path.join(
  2313. save_directory, (filename_prefix + "-" if filename_prefix else "") + ADDED_TOKENS_FILE
  2314. )
  2315. # the new get_added_vocab() also returns special tokens and tokens that have an index < vocab_size
  2316. added_vocab = {tok: index for tok, index in self.added_tokens_encoder.items() if index >= self.vocab_size}
  2317. if added_vocab:
  2318. with open(added_tokens_file, "w", encoding="utf-8") as f:
  2319. out_str = json.dumps(added_vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n"
  2320. f.write(out_str)
  2321. logger.info(f"added tokens file saved in {added_tokens_file}")
  2322. vocab_files = self.save_vocabulary(save_directory, filename_prefix=filename_prefix)
  2323. return file_names + vocab_files + (added_tokens_file,)
  2324. def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
  2325. """
  2326. Save only the vocabulary of the tokenizer (vocabulary + added tokens).
  2327. This method won't save the configuration and special token mappings of the tokenizer. Use
  2328. [`~PreTrainedTokenizerFast._save_pretrained`] to save the whole state of the tokenizer.
  2329. Args:
  2330. save_directory (`str`):
  2331. The directory in which to save the vocabulary.
  2332. filename_prefix (`str`, *optional*):
  2333. An optional prefix to add to the named of the saved files.
  2334. Returns:
  2335. `Tuple(str)`: Paths to the files saved.
  2336. """
  2337. raise NotImplementedError
  2338. def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> List[str]:
  2339. """
  2340. Converts a string into a sequence of tokens, replacing unknown tokens with the `unk_token`.
  2341. Args:
  2342. text (`str`):
  2343. The sequence to be encoded.
  2344. pair (`str`, *optional*):
  2345. A second sequence to be encoded with the first.
  2346. add_special_tokens (`bool`, *optional*, defaults to `False`):
  2347. Whether or not to add the special tokens associated with the corresponding model.
  2348. kwargs (additional keyword arguments, *optional*):
  2349. Will be passed to the underlying model specific encode method. See details in
  2350. [`~PreTrainedTokenizerBase.__call__`]
  2351. Returns:
  2352. `List[str]`: The list of tokens.
  2353. """
  2354. raise NotImplementedError
  2355. @add_end_docstrings(
  2356. ENCODE_KWARGS_DOCSTRING,
  2357. """
  2358. **kwargs: Passed along to the `.tokenize()` method.
  2359. """,
  2360. """
  2361. Returns:
  2362. `List[int]`, `torch.Tensor`, `tf.Tensor` or `np.ndarray`: The tokenized ids of the text.
  2363. """,
  2364. )
  2365. def encode(
  2366. self,
  2367. text: Union[TextInput, PreTokenizedInput, EncodedInput],
  2368. text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
  2369. add_special_tokens: bool = True,
  2370. padding: Union[bool, str, PaddingStrategy] = False,
  2371. truncation: Union[bool, str, TruncationStrategy] = None,
  2372. max_length: Optional[int] = None,
  2373. stride: int = 0,
  2374. padding_side: Optional[bool] = None,
  2375. return_tensors: Optional[Union[str, TensorType]] = None,
  2376. **kwargs,
  2377. ) -> List[int]:
  2378. """
  2379. Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary.
  2380. Same as doing `self.convert_tokens_to_ids(self.tokenize(text))`.
  2381. Args:
  2382. text (`str`, `List[str]` or `List[int]`):
  2383. The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the
  2384. `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
  2385. method).
  2386. text_pair (`str`, `List[str]` or `List[int]`, *optional*):
  2387. Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using
  2388. the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
  2389. method).
  2390. """
  2391. encoded_inputs = self.encode_plus(
  2392. text,
  2393. text_pair=text_pair,
  2394. add_special_tokens=add_special_tokens,
  2395. padding=padding,
  2396. truncation=truncation,
  2397. max_length=max_length,
  2398. stride=stride,
  2399. padding_side=padding_side,
  2400. return_tensors=return_tensors,
  2401. **kwargs,
  2402. )
  2403. return encoded_inputs["input_ids"]
  2404. def num_special_tokens_to_add(self, pair: bool = False) -> int:
  2405. raise NotImplementedError
  2406. def _get_padding_truncation_strategies(
  2407. self, padding=False, truncation=None, max_length=None, pad_to_multiple_of=None, verbose=True, **kwargs
  2408. ):
  2409. """
  2410. Find the correct padding/truncation strategy with backward compatibility for old arguments (truncation_strategy
  2411. and pad_to_max_length) and behaviors.
  2412. """
  2413. old_truncation_strategy = kwargs.pop("truncation_strategy", "do_not_truncate")
  2414. old_pad_to_max_length = kwargs.pop("pad_to_max_length", False)
  2415. # Backward compatibility for previous behavior, maybe we should deprecate it:
  2416. # If you only set max_length, it activates truncation for max_length
  2417. if max_length is not None and padding is False and truncation is None:
  2418. if verbose:
  2419. if not self.deprecation_warnings.get("Truncation-not-explicitly-activated", False):
  2420. logger.warning(
  2421. "Truncation was not explicitly activated but `max_length` is provided a specific value, please"
  2422. " use `truncation=True` to explicitly truncate examples to max length. Defaulting to"
  2423. " 'longest_first' truncation strategy. If you encode pairs of sequences (GLUE-style) with the"
  2424. " tokenizer you can select this strategy more precisely by providing a specific strategy to"
  2425. " `truncation`."
  2426. )
  2427. self.deprecation_warnings["Truncation-not-explicitly-activated"] = True
  2428. truncation = "longest_first"
  2429. # Get padding strategy
  2430. if padding is False and old_pad_to_max_length:
  2431. if verbose:
  2432. warnings.warn(
  2433. "The `pad_to_max_length` argument is deprecated and will be removed in a future version, "
  2434. "use `padding=True` or `padding='longest'` to pad to the longest sequence in the batch, or "
  2435. "use `padding='max_length'` to pad to a max length. In this case, you can give a specific "
  2436. "length with `max_length` (e.g. `max_length=45`) or leave max_length to None to pad to the "
  2437. "maximal input size of the model (e.g. 512 for Bert).",
  2438. FutureWarning,
  2439. )
  2440. if max_length is None:
  2441. padding_strategy = PaddingStrategy.LONGEST
  2442. else:
  2443. padding_strategy = PaddingStrategy.MAX_LENGTH
  2444. elif padding is not False:
  2445. if padding is True:
  2446. if verbose:
  2447. if max_length is not None and (
  2448. truncation is None or truncation is False or truncation == "do_not_truncate"
  2449. ):
  2450. warnings.warn(
  2451. "`max_length` is ignored when `padding`=`True` and there is no truncation strategy. "
  2452. "To pad to max length, use `padding='max_length'`."
  2453. )
  2454. if old_pad_to_max_length is not False:
  2455. warnings.warn("Though `pad_to_max_length` = `True`, it is ignored because `padding`=`True`.")
  2456. padding_strategy = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
  2457. elif not isinstance(padding, PaddingStrategy):
  2458. padding_strategy = PaddingStrategy(padding)
  2459. elif isinstance(padding, PaddingStrategy):
  2460. padding_strategy = padding
  2461. else:
  2462. padding_strategy = PaddingStrategy.DO_NOT_PAD
  2463. # Get truncation strategy
  2464. if truncation is None and old_truncation_strategy != "do_not_truncate":
  2465. if verbose:
  2466. warnings.warn(
  2467. "The `truncation_strategy` argument is deprecated and will be removed in a future version, use"
  2468. " `truncation=True` to truncate examples to a max length. You can give a specific length with"
  2469. " `max_length` (e.g. `max_length=45`) or leave max_length to None to truncate to the maximal input"
  2470. " size of the model (e.g. 512 for Bert). If you have pairs of inputs, you can give a specific"
  2471. " truncation strategy selected among `truncation='only_first'` (will only truncate the first"
  2472. " sentence in the pairs) `truncation='only_second'` (will only truncate the second sentence in the"
  2473. " pairs) or `truncation='longest_first'` (will iteratively remove tokens from the longest sentence"
  2474. " in the pairs).",
  2475. FutureWarning,
  2476. )
  2477. truncation_strategy = TruncationStrategy(old_truncation_strategy)
  2478. elif truncation is not False and truncation is not None:
  2479. if truncation is True:
  2480. truncation_strategy = (
  2481. TruncationStrategy.LONGEST_FIRST
  2482. ) # Default to truncate the longest sequences in pairs of inputs
  2483. elif not isinstance(truncation, TruncationStrategy):
  2484. truncation_strategy = TruncationStrategy(truncation)
  2485. elif isinstance(truncation, TruncationStrategy):
  2486. truncation_strategy = truncation
  2487. else:
  2488. truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE
  2489. # Set max length if needed
  2490. if max_length is None:
  2491. if padding_strategy == PaddingStrategy.MAX_LENGTH:
  2492. if self.model_max_length > LARGE_INTEGER:
  2493. if verbose:
  2494. if not self.deprecation_warnings.get("Asking-to-pad-to-max_length", False):
  2495. logger.warning(
  2496. "Asking to pad to max_length but no maximum length is provided and the model has no"
  2497. " predefined maximum length. Default to no padding."
  2498. )
  2499. self.deprecation_warnings["Asking-to-pad-to-max_length"] = True
  2500. padding_strategy = PaddingStrategy.DO_NOT_PAD
  2501. else:
  2502. max_length = self.model_max_length
  2503. if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE:
  2504. if self.model_max_length > LARGE_INTEGER:
  2505. if verbose:
  2506. if not self.deprecation_warnings.get("Asking-to-truncate-to-max_length", False):
  2507. logger.warning(
  2508. "Asking to truncate to max_length but no maximum length is provided and the model has"
  2509. " no predefined maximum length. Default to no truncation."
  2510. )
  2511. self.deprecation_warnings["Asking-to-truncate-to-max_length"] = True
  2512. truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE
  2513. else:
  2514. max_length = self.model_max_length
  2515. # Test if we have a padding token
  2516. if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.pad_token is None or self.pad_token_id < 0):
  2517. raise ValueError(
  2518. "Asking to pad but the tokenizer does not have a padding token. "
  2519. "Please select a token to use as `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` "
  2520. "or add a new pad token via `tokenizer.add_special_tokens({'pad_token': '[PAD]'})`."
  2521. )
  2522. # Check that we will truncate to a multiple of pad_to_multiple_of if both are provided
  2523. if (
  2524. truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE
  2525. and padding_strategy != PaddingStrategy.DO_NOT_PAD
  2526. and pad_to_multiple_of is not None
  2527. and max_length is not None
  2528. and (max_length % pad_to_multiple_of != 0)
  2529. ):
  2530. raise ValueError(
  2531. "Truncation and padding are both activated but "
  2532. f"truncation length ({max_length}) is not a multiple of pad_to_multiple_of ({pad_to_multiple_of})."
  2533. )
  2534. return padding_strategy, truncation_strategy, max_length, kwargs
  2535. @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
  2536. def __call__(
  2537. self,
  2538. text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
  2539. text_pair: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None,
  2540. text_target: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
  2541. text_pair_target: Optional[
  2542. Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]
  2543. ] = None,
  2544. add_special_tokens: bool = True,
  2545. padding: Union[bool, str, PaddingStrategy] = False,
  2546. truncation: Union[bool, str, TruncationStrategy] = None,
  2547. max_length: Optional[int] = None,
  2548. stride: int = 0,
  2549. is_split_into_words: bool = False,
  2550. pad_to_multiple_of: Optional[int] = None,
  2551. padding_side: Optional[bool] = None,
  2552. return_tensors: Optional[Union[str, TensorType]] = None,
  2553. return_token_type_ids: Optional[bool] = None,
  2554. return_attention_mask: Optional[bool] = None,
  2555. return_overflowing_tokens: bool = False,
  2556. return_special_tokens_mask: bool = False,
  2557. return_offsets_mapping: bool = False,
  2558. return_length: bool = False,
  2559. verbose: bool = True,
  2560. **kwargs,
  2561. ) -> BatchEncoding:
  2562. """
  2563. Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
  2564. sequences.
  2565. Args:
  2566. text (`str`, `List[str]`, `List[List[str]]`, *optional*):
  2567. The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
  2568. (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
  2569. `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
  2570. text_pair (`str`, `List[str]`, `List[List[str]]`, *optional*):
  2571. The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
  2572. (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
  2573. `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
  2574. text_target (`str`, `List[str]`, `List[List[str]]`, *optional*):
  2575. The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a
  2576. list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized),
  2577. you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
  2578. text_pair_target (`str`, `List[str]`, `List[List[str]]`, *optional*):
  2579. The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a
  2580. list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized),
  2581. you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
  2582. """
  2583. # To avoid duplicating
  2584. all_kwargs = {
  2585. "add_special_tokens": add_special_tokens,
  2586. "padding": padding,
  2587. "truncation": truncation,
  2588. "max_length": max_length,
  2589. "stride": stride,
  2590. "is_split_into_words": is_split_into_words,
  2591. "pad_to_multiple_of": pad_to_multiple_of,
  2592. "padding_side": padding_side,
  2593. "return_tensors": return_tensors,
  2594. "return_token_type_ids": return_token_type_ids,
  2595. "return_attention_mask": return_attention_mask,
  2596. "return_overflowing_tokens": return_overflowing_tokens,
  2597. "return_special_tokens_mask": return_special_tokens_mask,
  2598. "return_offsets_mapping": return_offsets_mapping,
  2599. "return_length": return_length,
  2600. "split_special_tokens": kwargs.pop("split_special_tokens", self.split_special_tokens),
  2601. "verbose": verbose,
  2602. }
  2603. all_kwargs.update(kwargs)
  2604. if text is None and text_target is None:
  2605. raise ValueError("You need to specify either `text` or `text_target`.")
  2606. if text is not None:
  2607. # The context manager will send the inputs as normal texts and not text_target, but we shouldn't change the
  2608. # input mode in this case.
  2609. if not self._in_target_context_manager:
  2610. self._switch_to_input_mode()
  2611. encodings = self._call_one(text=text, text_pair=text_pair, **all_kwargs)
  2612. if text_target is not None:
  2613. self._switch_to_target_mode()
  2614. target_encodings = self._call_one(text=text_target, text_pair=text_pair_target, **all_kwargs)
  2615. # Leave back tokenizer in input mode
  2616. self._switch_to_input_mode()
  2617. if text_target is None:
  2618. return encodings
  2619. elif text is None:
  2620. return target_encodings
  2621. else:
  2622. encodings["labels"] = target_encodings["input_ids"]
  2623. return encodings
  2624. def _call_one(
  2625. self,
  2626. text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
  2627. text_pair: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None,
  2628. add_special_tokens: bool = True,
  2629. padding: Union[bool, str, PaddingStrategy] = False,
  2630. truncation: Union[bool, str, TruncationStrategy] = None,
  2631. max_length: Optional[int] = None,
  2632. stride: int = 0,
  2633. is_split_into_words: bool = False,
  2634. pad_to_multiple_of: Optional[int] = None,
  2635. padding_side: Optional[bool] = None,
  2636. return_tensors: Optional[Union[str, TensorType]] = None,
  2637. return_token_type_ids: Optional[bool] = None,
  2638. return_attention_mask: Optional[bool] = None,
  2639. return_overflowing_tokens: bool = False,
  2640. return_special_tokens_mask: bool = False,
  2641. return_offsets_mapping: bool = False,
  2642. return_length: bool = False,
  2643. verbose: bool = True,
  2644. split_special_tokens: bool = False,
  2645. **kwargs,
  2646. ) -> BatchEncoding:
  2647. # Input type checking for clearer error
  2648. def _is_valid_text_input(t):
  2649. if isinstance(t, str):
  2650. # Strings are fine
  2651. return True
  2652. elif isinstance(t, (list, tuple)):
  2653. # List are fine as long as they are...
  2654. if len(t) == 0:
  2655. # ... empty
  2656. return True
  2657. elif isinstance(t[0], str):
  2658. # ... list of strings
  2659. return True
  2660. elif isinstance(t[0], (list, tuple)):
  2661. # ... list with an empty list or with a list of strings
  2662. return len(t[0]) == 0 or isinstance(t[0][0], str)
  2663. else:
  2664. return False
  2665. else:
  2666. return False
  2667. if not _is_valid_text_input(text):
  2668. raise ValueError(
  2669. "text input must be of type `str` (single example), `List[str]` (batch or single pretokenized example) "
  2670. "or `List[List[str]]` (batch of pretokenized examples)."
  2671. )
  2672. if text_pair is not None and not _is_valid_text_input(text_pair):
  2673. raise ValueError(
  2674. "text input must be of type `str` (single example), `List[str]` (batch or single pretokenized example) "
  2675. "or `List[List[str]]` (batch of pretokenized examples)."
  2676. )
  2677. if is_split_into_words:
  2678. is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
  2679. else:
  2680. is_batched = isinstance(text, (list, tuple))
  2681. if is_batched:
  2682. if isinstance(text_pair, str):
  2683. raise TypeError(
  2684. "when tokenizing batches of text, `text_pair` must be a list or tuple with the same length as"
  2685. " `text`."
  2686. )
  2687. if text_pair is not None and len(text) != len(text_pair):
  2688. raise ValueError(
  2689. f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:"
  2690. f" {len(text_pair)}."
  2691. )
  2692. batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
  2693. return self.batch_encode_plus(
  2694. batch_text_or_text_pairs=batch_text_or_text_pairs,
  2695. add_special_tokens=add_special_tokens,
  2696. padding=padding,
  2697. truncation=truncation,
  2698. max_length=max_length,
  2699. stride=stride,
  2700. is_split_into_words=is_split_into_words,
  2701. pad_to_multiple_of=pad_to_multiple_of,
  2702. padding_side=padding_side,
  2703. return_tensors=return_tensors,
  2704. return_token_type_ids=return_token_type_ids,
  2705. return_attention_mask=return_attention_mask,
  2706. return_overflowing_tokens=return_overflowing_tokens,
  2707. return_special_tokens_mask=return_special_tokens_mask,
  2708. return_offsets_mapping=return_offsets_mapping,
  2709. return_length=return_length,
  2710. verbose=verbose,
  2711. split_special_tokens=split_special_tokens,
  2712. **kwargs,
  2713. )
  2714. else:
  2715. return self.encode_plus(
  2716. text=text,
  2717. text_pair=text_pair,
  2718. add_special_tokens=add_special_tokens,
  2719. padding=padding,
  2720. truncation=truncation,
  2721. max_length=max_length,
  2722. stride=stride,
  2723. is_split_into_words=is_split_into_words,
  2724. pad_to_multiple_of=pad_to_multiple_of,
  2725. padding_side=padding_side,
  2726. return_tensors=return_tensors,
  2727. return_token_type_ids=return_token_type_ids,
  2728. return_attention_mask=return_attention_mask,
  2729. return_overflowing_tokens=return_overflowing_tokens,
  2730. return_special_tokens_mask=return_special_tokens_mask,
  2731. return_offsets_mapping=return_offsets_mapping,
  2732. return_length=return_length,
  2733. verbose=verbose,
  2734. split_special_tokens=split_special_tokens,
  2735. **kwargs,
  2736. )
  2737. @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
  2738. def encode_plus(
  2739. self,
  2740. text: Union[TextInput, PreTokenizedInput, EncodedInput],
  2741. text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
  2742. add_special_tokens: bool = True,
  2743. padding: Union[bool, str, PaddingStrategy] = False,
  2744. truncation: Union[bool, str, TruncationStrategy] = None,
  2745. max_length: Optional[int] = None,
  2746. stride: int = 0,
  2747. is_split_into_words: bool = False,
  2748. pad_to_multiple_of: Optional[int] = None,
  2749. padding_side: Optional[bool] = None,
  2750. return_tensors: Optional[Union[str, TensorType]] = None,
  2751. return_token_type_ids: Optional[bool] = None,
  2752. return_attention_mask: Optional[bool] = None,
  2753. return_overflowing_tokens: bool = False,
  2754. return_special_tokens_mask: bool = False,
  2755. return_offsets_mapping: bool = False,
  2756. return_length: bool = False,
  2757. verbose: bool = True,
  2758. **kwargs,
  2759. ) -> BatchEncoding:
  2760. """
  2761. Tokenize and prepare for the model a sequence or a pair of sequences.
  2762. <Tip warning={true}>
  2763. This method is deprecated, `__call__` should be used instead.
  2764. </Tip>
  2765. Args:
  2766. text (`str`, `List[str]` or (for non-fast tokenizers) `List[int]`):
  2767. The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the
  2768. `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
  2769. method).
  2770. text_pair (`str`, `List[str]` or `List[int]`, *optional*):
  2771. Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using
  2772. the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
  2773. method).
  2774. """
  2775. # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
  2776. padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
  2777. padding=padding,
  2778. truncation=truncation,
  2779. max_length=max_length,
  2780. pad_to_multiple_of=pad_to_multiple_of,
  2781. verbose=verbose,
  2782. **kwargs,
  2783. )
  2784. return self._encode_plus(
  2785. text=text,
  2786. text_pair=text_pair,
  2787. add_special_tokens=add_special_tokens,
  2788. padding_strategy=padding_strategy,
  2789. truncation_strategy=truncation_strategy,
  2790. max_length=max_length,
  2791. stride=stride,
  2792. is_split_into_words=is_split_into_words,
  2793. pad_to_multiple_of=pad_to_multiple_of,
  2794. padding_side=padding_side,
  2795. return_tensors=return_tensors,
  2796. return_token_type_ids=return_token_type_ids,
  2797. return_attention_mask=return_attention_mask,
  2798. return_overflowing_tokens=return_overflowing_tokens,
  2799. return_special_tokens_mask=return_special_tokens_mask,
  2800. return_offsets_mapping=return_offsets_mapping,
  2801. return_length=return_length,
  2802. verbose=verbose,
  2803. split_special_tokens=kwargs.pop("split_special_tokens", self.split_special_tokens),
  2804. **kwargs,
  2805. )
  2806. def _encode_plus(
  2807. self,
  2808. text: Union[TextInput, PreTokenizedInput, EncodedInput],
  2809. text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
  2810. add_special_tokens: bool = True,
  2811. padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
  2812. truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
  2813. max_length: Optional[int] = None,
  2814. stride: int = 0,
  2815. is_split_into_words: bool = False,
  2816. pad_to_multiple_of: Optional[int] = None,
  2817. padding_side: Optional[bool] = None,
  2818. return_tensors: Optional[Union[str, TensorType]] = None,
  2819. return_token_type_ids: Optional[bool] = None,
  2820. return_attention_mask: Optional[bool] = None,
  2821. return_overflowing_tokens: bool = False,
  2822. return_special_tokens_mask: bool = False,
  2823. return_offsets_mapping: bool = False,
  2824. return_length: bool = False,
  2825. verbose: bool = True,
  2826. split_special_tokens: bool = False,
  2827. **kwargs,
  2828. ) -> BatchEncoding:
  2829. raise NotImplementedError
  2830. @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
  2831. def batch_encode_plus(
  2832. self,
  2833. batch_text_or_text_pairs: Union[
  2834. List[TextInput],
  2835. List[TextInputPair],
  2836. List[PreTokenizedInput],
  2837. List[PreTokenizedInputPair],
  2838. List[EncodedInput],
  2839. List[EncodedInputPair],
  2840. ],
  2841. add_special_tokens: bool = True,
  2842. padding: Union[bool, str, PaddingStrategy] = False,
  2843. truncation: Union[bool, str, TruncationStrategy] = None,
  2844. max_length: Optional[int] = None,
  2845. stride: int = 0,
  2846. is_split_into_words: bool = False,
  2847. pad_to_multiple_of: Optional[int] = None,
  2848. padding_side: Optional[bool] = None,
  2849. return_tensors: Optional[Union[str, TensorType]] = None,
  2850. return_token_type_ids: Optional[bool] = None,
  2851. return_attention_mask: Optional[bool] = None,
  2852. return_overflowing_tokens: bool = False,
  2853. return_special_tokens_mask: bool = False,
  2854. return_offsets_mapping: bool = False,
  2855. return_length: bool = False,
  2856. verbose: bool = True,
  2857. split_special_tokens: bool = False,
  2858. **kwargs,
  2859. ) -> BatchEncoding:
  2860. """
  2861. Tokenize and prepare for the model a list of sequences or a list of pairs of sequences.
  2862. <Tip warning={true}>
  2863. This method is deprecated, `__call__` should be used instead.
  2864. </Tip>
  2865. Args:
  2866. batch_text_or_text_pairs (`List[str]`, `List[Tuple[str, str]]`, `List[List[str]]`, `List[Tuple[List[str], List[str]]]`, and for not-fast tokenizers, also `List[List[int]]`, `List[Tuple[List[int], List[int]]]`):
  2867. Batch of sequences or pair of sequences to be encoded. This can be a list of
  2868. string/string-sequences/int-sequences or a list of pair of string/string-sequences/int-sequence (see
  2869. details in `encode_plus`).
  2870. """
  2871. # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
  2872. padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
  2873. padding=padding,
  2874. truncation=truncation,
  2875. max_length=max_length,
  2876. pad_to_multiple_of=pad_to_multiple_of,
  2877. verbose=verbose,
  2878. **kwargs,
  2879. )
  2880. return self._batch_encode_plus(
  2881. batch_text_or_text_pairs=batch_text_or_text_pairs,
  2882. add_special_tokens=add_special_tokens,
  2883. padding_strategy=padding_strategy,
  2884. truncation_strategy=truncation_strategy,
  2885. max_length=max_length,
  2886. stride=stride,
  2887. is_split_into_words=is_split_into_words,
  2888. pad_to_multiple_of=pad_to_multiple_of,
  2889. padding_side=padding_side,
  2890. return_tensors=return_tensors,
  2891. return_token_type_ids=return_token_type_ids,
  2892. return_attention_mask=return_attention_mask,
  2893. return_overflowing_tokens=return_overflowing_tokens,
  2894. return_special_tokens_mask=return_special_tokens_mask,
  2895. return_offsets_mapping=return_offsets_mapping,
  2896. return_length=return_length,
  2897. verbose=verbose,
  2898. split_special_tokens=split_special_tokens,
  2899. **kwargs,
  2900. )
  2901. def _batch_encode_plus(
  2902. self,
  2903. batch_text_or_text_pairs: Union[
  2904. List[TextInput],
  2905. List[TextInputPair],
  2906. List[PreTokenizedInput],
  2907. List[PreTokenizedInputPair],
  2908. List[EncodedInput],
  2909. List[EncodedInputPair],
  2910. ],
  2911. add_special_tokens: bool = True,
  2912. padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
  2913. truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
  2914. max_length: Optional[int] = None,
  2915. stride: int = 0,
  2916. is_split_into_words: bool = False,
  2917. pad_to_multiple_of: Optional[int] = None,
  2918. padding_side: Optional[bool] = None,
  2919. return_tensors: Optional[Union[str, TensorType]] = None,
  2920. return_token_type_ids: Optional[bool] = None,
  2921. return_attention_mask: Optional[bool] = None,
  2922. return_overflowing_tokens: bool = False,
  2923. return_special_tokens_mask: bool = False,
  2924. return_offsets_mapping: bool = False,
  2925. return_length: bool = False,
  2926. verbose: bool = True,
  2927. split_special_tokens: bool = False,
  2928. **kwargs,
  2929. ) -> BatchEncoding:
  2930. raise NotImplementedError
  2931. def pad(
  2932. self,
  2933. encoded_inputs: Union[
  2934. BatchEncoding,
  2935. List[BatchEncoding],
  2936. Dict[str, EncodedInput],
  2937. Dict[str, List[EncodedInput]],
  2938. List[Dict[str, EncodedInput]],
  2939. ],
  2940. padding: Union[bool, str, PaddingStrategy] = True,
  2941. max_length: Optional[int] = None,
  2942. pad_to_multiple_of: Optional[int] = None,
  2943. padding_side: Optional[bool] = None,
  2944. return_attention_mask: Optional[bool] = None,
  2945. return_tensors: Optional[Union[str, TensorType]] = None,
  2946. verbose: bool = True,
  2947. ) -> BatchEncoding:
  2948. """
  2949. Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length
  2950. in the batch.
  2951. Padding side (left/right) padding token ids are defined at the tokenizer level (with `self.padding_side`,
  2952. `self.pad_token_id` and `self.pad_token_type_id`).
  2953. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the
  2954. text followed by a call to the `pad` method to get a padded encoding.
  2955. <Tip>
  2956. If the `encoded_inputs` passed are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the
  2957. result will use the same type unless you provide a different tensor type with `return_tensors`. In the case of
  2958. PyTorch tensors, you will lose the specific device of your tensors however.
  2959. </Tip>
  2960. Args:
  2961. encoded_inputs ([`BatchEncoding`], list of [`BatchEncoding`], `Dict[str, List[int]]`, `Dict[str, List[List[int]]` or `List[Dict[str, List[int]]]`):
  2962. Tokenized inputs. Can represent one input ([`BatchEncoding`] or `Dict[str, List[int]]`) or a batch of
  2963. tokenized inputs (list of [`BatchEncoding`], *Dict[str, List[List[int]]]* or *List[Dict[str,
  2964. List[int]]]*) so you can use this method during preprocessing as well as in a PyTorch Dataloader
  2965. collate function.
  2966. Instead of `List[int]` you can have tensors (numpy arrays, PyTorch tensors or TensorFlow tensors), see
  2967. the note above for the return type.
  2968. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
  2969. Select a strategy to pad the returned sequences (according to the model's padding side and padding
  2970. index) among:
  2971. - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
  2972. sequence if provided).
  2973. - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
  2974. acceptable input length for the model if that argument is not provided.
  2975. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
  2976. lengths).
  2977. max_length (`int`, *optional*):
  2978. Maximum length of the returned list and optionally padding length (see above).
  2979. pad_to_multiple_of (`int`, *optional*):
  2980. If set will pad the sequence to a multiple of the provided value.
  2981. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
  2982. `>= 7.5` (Volta).
  2983. padding_side (`str`, *optional*):
  2984. The side on which the model should have padding applied. Should be selected between ['right', 'left'].
  2985. Default value is picked from the class attribute of the same name.
  2986. return_attention_mask (`bool`, *optional*):
  2987. Whether to return the attention mask. If left to the default, will return the attention mask according
  2988. to the specific tokenizer's default, defined by the `return_outputs` attribute.
  2989. [What are attention masks?](../glossary#attention-mask)
  2990. return_tensors (`str` or [`~utils.TensorType`], *optional*):
  2991. If set, will return tensors instead of list of python integers. Acceptable values are:
  2992. - `'tf'`: Return TensorFlow `tf.constant` objects.
  2993. - `'pt'`: Return PyTorch `torch.Tensor` objects.
  2994. - `'np'`: Return Numpy `np.ndarray` objects.
  2995. verbose (`bool`, *optional*, defaults to `True`):
  2996. Whether or not to print more information and warnings.
  2997. """
  2998. if self.__class__.__name__.endswith("Fast"):
  2999. if not self.deprecation_warnings.get("Asking-to-pad-a-fast-tokenizer", False):
  3000. logger.warning_advice(
  3001. f"You're using a {self.__class__.__name__} tokenizer. Please note that with a fast tokenizer,"
  3002. " using the `__call__` method is faster than using a method to encode the text followed by a call"
  3003. " to the `pad` method to get a padded encoding."
  3004. )
  3005. self.deprecation_warnings["Asking-to-pad-a-fast-tokenizer"] = True
  3006. # If we have a list of dicts, let's convert it in a dict of lists
  3007. # We do this to allow using this method as a collate_fn function in PyTorch Dataloader
  3008. if isinstance(encoded_inputs, (list, tuple)) and isinstance(encoded_inputs[0], Mapping):
  3009. encoded_inputs = {key: [example[key] for example in encoded_inputs] for key in encoded_inputs[0].keys()}
  3010. # The model's main input name, usually `input_ids`, has been passed for padding
  3011. if self.model_input_names[0] not in encoded_inputs:
  3012. raise ValueError(
  3013. "You should supply an encoding or a list of encodings to this method "
  3014. f"that includes {self.model_input_names[0]}, but you provided {list(encoded_inputs.keys())}"
  3015. )
  3016. required_input = encoded_inputs[self.model_input_names[0]]
  3017. if required_input is None or (isinstance(required_input, Sized) and len(required_input) == 0):
  3018. if return_attention_mask:
  3019. encoded_inputs["attention_mask"] = []
  3020. return encoded_inputs
  3021. # If we have PyTorch/TF/NumPy tensors/arrays as inputs, we cast them as python objects
  3022. # and rebuild them afterwards if no return_tensors is specified
  3023. # Note that we lose the specific device the tensor may be on for PyTorch
  3024. first_element = required_input[0]
  3025. if isinstance(first_element, (list, tuple)):
  3026. # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
  3027. for item in required_input:
  3028. if len(item) != 0:
  3029. first_element = item[0]
  3030. break
  3031. # At this state, if `first_element` is still a list/tuple, it's an empty one so there is nothing to do.
  3032. if not isinstance(first_element, (int, list, tuple)):
  3033. if is_tf_tensor(first_element):
  3034. return_tensors = "tf" if return_tensors is None else return_tensors
  3035. elif is_torch_tensor(first_element):
  3036. return_tensors = "pt" if return_tensors is None else return_tensors
  3037. elif isinstance(first_element, np.ndarray):
  3038. return_tensors = "np" if return_tensors is None else return_tensors
  3039. else:
  3040. raise ValueError(
  3041. f"type of {first_element} unknown: {type(first_element)}. "
  3042. "Should be one of a python, numpy, pytorch or tensorflow object."
  3043. )
  3044. for key, value in encoded_inputs.items():
  3045. encoded_inputs[key] = to_py_obj(value)
  3046. # Convert padding_strategy in PaddingStrategy
  3047. padding_strategy, _, max_length, _ = self._get_padding_truncation_strategies(
  3048. padding=padding, max_length=max_length, verbose=verbose
  3049. )
  3050. required_input = encoded_inputs[self.model_input_names[0]]
  3051. if required_input and not isinstance(required_input[0], (list, tuple)):
  3052. encoded_inputs = self._pad(
  3053. encoded_inputs,
  3054. max_length=max_length,
  3055. padding_strategy=padding_strategy,
  3056. pad_to_multiple_of=pad_to_multiple_of,
  3057. padding_side=padding_side,
  3058. return_attention_mask=return_attention_mask,
  3059. )
  3060. return BatchEncoding(encoded_inputs, tensor_type=return_tensors)
  3061. batch_size = len(required_input)
  3062. assert all(
  3063. len(v) == batch_size for v in encoded_inputs.values()
  3064. ), "Some items in the output dictionary have a different batch size than others."
  3065. if padding_strategy == PaddingStrategy.LONGEST:
  3066. max_length = max(len(inputs) for inputs in required_input)
  3067. padding_strategy = PaddingStrategy.MAX_LENGTH
  3068. batch_outputs = {}
  3069. for i in range(batch_size):
  3070. inputs = {k: v[i] for k, v in encoded_inputs.items()}
  3071. outputs = self._pad(
  3072. inputs,
  3073. max_length=max_length,
  3074. padding_strategy=padding_strategy,
  3075. pad_to_multiple_of=pad_to_multiple_of,
  3076. padding_side=padding_side,
  3077. return_attention_mask=return_attention_mask,
  3078. )
  3079. for key, value in outputs.items():
  3080. if key not in batch_outputs:
  3081. batch_outputs[key] = []
  3082. batch_outputs[key].append(value)
  3083. return BatchEncoding(batch_outputs, tensor_type=return_tensors)
  3084. def create_token_type_ids_from_sequences(
  3085. self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
  3086. ) -> List[int]:
  3087. """
  3088. Create the token type IDs corresponding to the sequences passed. [What are token type
  3089. IDs?](../glossary#token-type-ids)
  3090. Should be overridden in a subclass if the model has a special way of building those.
  3091. Args:
  3092. token_ids_0 (`List[int]`): The first tokenized sequence.
  3093. token_ids_1 (`List[int]`, *optional*): The second tokenized sequence.
  3094. Returns:
  3095. `List[int]`: The token type ids.
  3096. """
  3097. if token_ids_1 is None:
  3098. return len(token_ids_0) * [0]
  3099. return [0] * len(token_ids_0) + [1] * len(token_ids_1)
  3100. def build_inputs_with_special_tokens(
  3101. self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
  3102. ) -> List[int]:
  3103. """
  3104. Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
  3105. adding special tokens.
  3106. This implementation does not add special tokens and this method should be overridden in a subclass.
  3107. Args:
  3108. token_ids_0 (`List[int]`): The first tokenized sequence.
  3109. token_ids_1 (`List[int]`, *optional*): The second tokenized sequence.
  3110. Returns:
  3111. `List[int]`: The model input with special tokens.
  3112. """
  3113. if token_ids_1 is None:
  3114. return token_ids_0
  3115. return token_ids_0 + token_ids_1
  3116. @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
  3117. def prepare_for_model(
  3118. self,
  3119. ids: List[int],
  3120. pair_ids: Optional[List[int]] = None,
  3121. add_special_tokens: bool = True,
  3122. padding: Union[bool, str, PaddingStrategy] = False,
  3123. truncation: Union[bool, str, TruncationStrategy] = None,
  3124. max_length: Optional[int] = None,
  3125. stride: int = 0,
  3126. pad_to_multiple_of: Optional[int] = None,
  3127. padding_side: Optional[bool] = None,
  3128. return_tensors: Optional[Union[str, TensorType]] = None,
  3129. return_token_type_ids: Optional[bool] = None,
  3130. return_attention_mask: Optional[bool] = None,
  3131. return_overflowing_tokens: bool = False,
  3132. return_special_tokens_mask: bool = False,
  3133. return_offsets_mapping: bool = False,
  3134. return_length: bool = False,
  3135. verbose: bool = True,
  3136. prepend_batch_axis: bool = False,
  3137. **kwargs,
  3138. ) -> BatchEncoding:
  3139. """
  3140. Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
  3141. adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
  3142. manages a moving window (with user defined stride) for overflowing tokens. Please Note, for *pair_ids*
  3143. different than `None` and *truncation_strategy = longest_first* or `True`, it is not possible to return
  3144. overflowing tokens. Such a combination of arguments will raise an error.
  3145. Args:
  3146. ids (`List[int]`):
  3147. Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and
  3148. `convert_tokens_to_ids` methods.
  3149. pair_ids (`List[int]`, *optional*):
  3150. Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`
  3151. and `convert_tokens_to_ids` methods.
  3152. """
  3153. # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
  3154. padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
  3155. padding=padding,
  3156. truncation=truncation,
  3157. max_length=max_length,
  3158. pad_to_multiple_of=pad_to_multiple_of,
  3159. verbose=verbose,
  3160. **kwargs,
  3161. )
  3162. pair = bool(pair_ids is not None)
  3163. len_ids = len(ids)
  3164. len_pair_ids = len(pair_ids) if pair else 0
  3165. if return_token_type_ids and not add_special_tokens:
  3166. raise ValueError(
  3167. "Asking to return token_type_ids while setting add_special_tokens to False "
  3168. "results in an undefined behavior. Please set add_special_tokens to True or "
  3169. "set return_token_type_ids to None."
  3170. )
  3171. if (
  3172. return_overflowing_tokens
  3173. and truncation_strategy == TruncationStrategy.LONGEST_FIRST
  3174. and pair_ids is not None
  3175. ):
  3176. raise ValueError(
  3177. "Not possible to return overflowing tokens for pair of sequences with the "
  3178. "`longest_first`. Please select another truncation strategy than `longest_first`, "
  3179. "for instance `only_second` or `only_first`."
  3180. )
  3181. # Load from model defaults
  3182. if return_token_type_ids is None:
  3183. return_token_type_ids = "token_type_ids" in self.model_input_names
  3184. if return_attention_mask is None:
  3185. return_attention_mask = "attention_mask" in self.model_input_names
  3186. encoded_inputs = {}
  3187. # Compute the total size of the returned encodings
  3188. total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
  3189. # Truncation: Handle max sequence length
  3190. overflowing_tokens = []
  3191. if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length:
  3192. ids, pair_ids, overflowing_tokens = self.truncate_sequences(
  3193. ids,
  3194. pair_ids=pair_ids,
  3195. num_tokens_to_remove=total_len - max_length,
  3196. truncation_strategy=truncation_strategy,
  3197. stride=stride,
  3198. )
  3199. if return_overflowing_tokens:
  3200. encoded_inputs["overflowing_tokens"] = overflowing_tokens
  3201. encoded_inputs["num_truncated_tokens"] = total_len - max_length
  3202. # Add special tokens
  3203. if add_special_tokens:
  3204. sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
  3205. token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
  3206. else:
  3207. sequence = ids + pair_ids if pair else ids
  3208. token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else [])
  3209. # Build output dictionary
  3210. encoded_inputs["input_ids"] = sequence
  3211. if return_token_type_ids:
  3212. encoded_inputs["token_type_ids"] = token_type_ids
  3213. if return_special_tokens_mask:
  3214. if add_special_tokens:
  3215. encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids)
  3216. else:
  3217. encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
  3218. # Check lengths
  3219. self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose)
  3220. # Padding
  3221. if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
  3222. encoded_inputs = self.pad(
  3223. encoded_inputs,
  3224. max_length=max_length,
  3225. padding=padding_strategy.value,
  3226. pad_to_multiple_of=pad_to_multiple_of,
  3227. padding_side=padding_side,
  3228. return_attention_mask=return_attention_mask,
  3229. )
  3230. if return_length:
  3231. encoded_inputs["length"] = len(encoded_inputs["input_ids"])
  3232. batch_outputs = BatchEncoding(
  3233. encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis
  3234. )
  3235. return batch_outputs
  3236. def truncate_sequences(
  3237. self,
  3238. ids: List[int],
  3239. pair_ids: Optional[List[int]] = None,
  3240. num_tokens_to_remove: int = 0,
  3241. truncation_strategy: Union[str, TruncationStrategy] = "longest_first",
  3242. stride: int = 0,
  3243. ) -> Tuple[List[int], List[int], List[int]]:
  3244. """
  3245. Truncates a sequence pair in-place following the strategy.
  3246. Args:
  3247. ids (`List[int]`):
  3248. Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and
  3249. `convert_tokens_to_ids` methods.
  3250. pair_ids (`List[int]`, *optional*):
  3251. Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`
  3252. and `convert_tokens_to_ids` methods.
  3253. num_tokens_to_remove (`int`, *optional*, defaults to 0):
  3254. Number of tokens to remove using the truncation strategy.
  3255. truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `'longest_first'`):
  3256. The strategy to follow for truncation. Can be:
  3257. - `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
  3258. maximum acceptable input length for the model if that argument is not provided. This will truncate
  3259. token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a
  3260. batch of pairs) is provided.
  3261. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
  3262. maximum acceptable input length for the model if that argument is not provided. This will only
  3263. truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
  3264. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
  3265. maximum acceptable input length for the model if that argument is not provided. This will only
  3266. truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
  3267. - `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater
  3268. than the model maximum admissible input size).
  3269. stride (`int`, *optional*, defaults to 0):
  3270. If set to a positive number, the overflowing tokens returned will contain some tokens from the main
  3271. sequence returned. The value of this argument defines the number of additional tokens.
  3272. Returns:
  3273. `Tuple[List[int], List[int], List[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of
  3274. overflowing tokens. Note: The *longest_first* strategy returns empty list of overflowing tokens if a pair
  3275. of sequences (or a batch of pairs) is provided.
  3276. """
  3277. if num_tokens_to_remove <= 0:
  3278. return ids, pair_ids, []
  3279. if not isinstance(truncation_strategy, TruncationStrategy):
  3280. truncation_strategy = TruncationStrategy(truncation_strategy)
  3281. overflowing_tokens = []
  3282. if truncation_strategy == TruncationStrategy.ONLY_FIRST or (
  3283. truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is None
  3284. ):
  3285. if len(ids) > num_tokens_to_remove:
  3286. window_len = min(len(ids), stride + num_tokens_to_remove)
  3287. if self.truncation_side == "left":
  3288. overflowing_tokens = ids[:window_len]
  3289. ids = ids[num_tokens_to_remove:]
  3290. elif self.truncation_side == "right":
  3291. overflowing_tokens = ids[-window_len:]
  3292. ids = ids[:-num_tokens_to_remove]
  3293. else:
  3294. raise ValueError(f"invalid truncation strategy: {self.truncation_side}, use 'left' or 'right'.")
  3295. else:
  3296. error_msg = (
  3297. f"We need to remove {num_tokens_to_remove} to truncate the input "
  3298. f"but the first sequence has a length {len(ids)}. "
  3299. )
  3300. if truncation_strategy == TruncationStrategy.ONLY_FIRST:
  3301. error_msg = (
  3302. error_msg + "Please select another truncation strategy than "
  3303. f"{truncation_strategy}, for instance 'longest_first' or 'only_second'."
  3304. )
  3305. logger.error(error_msg)
  3306. elif truncation_strategy == TruncationStrategy.LONGEST_FIRST:
  3307. logger.warning(
  3308. "Be aware, overflowing tokens are not returned for the setting you have chosen,"
  3309. f" i.e. sequence pairs with the '{TruncationStrategy.LONGEST_FIRST.value}' "
  3310. "truncation strategy. So the returned list will always be empty even if some "
  3311. "tokens have been removed."
  3312. )
  3313. len_pair_ids = len(pair_ids) if pair_ids is not None else 0
  3314. len_ids = len(ids)
  3315. first_remove = min(abs(len_pair_ids - len_ids), num_tokens_to_remove)
  3316. second_remove = num_tokens_to_remove - first_remove
  3317. if len_ids > len_pair_ids:
  3318. ids_to_move = first_remove + second_remove // 2
  3319. pair_ids_to_move = second_remove - second_remove // 2
  3320. else:
  3321. ids_to_move = second_remove // 2
  3322. pair_ids_to_move = first_remove + second_remove - (second_remove // 2)
  3323. if self.truncation_side == "right":
  3324. ids = ids[:-ids_to_move] if ids_to_move > 0 else ids
  3325. pair_ids = pair_ids[:-pair_ids_to_move] if pair_ids is not None and pair_ids_to_move > 0 else pair_ids
  3326. elif self.truncation_side == "left":
  3327. ids = ids[ids_to_move:]
  3328. pair_ids = pair_ids[pair_ids_to_move:] if pair_ids is not None else None
  3329. else:
  3330. raise ValueError(f"invalid truncation strategy:{self.truncation_side}")
  3331. elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None:
  3332. if len(pair_ids) > num_tokens_to_remove:
  3333. window_len = min(len(pair_ids), stride + num_tokens_to_remove)
  3334. if self.truncation_side == "right":
  3335. overflowing_tokens = pair_ids[-window_len:]
  3336. pair_ids = pair_ids[:-num_tokens_to_remove]
  3337. elif self.truncation_side == "left":
  3338. overflowing_tokens = pair_ids[:window_len]
  3339. pair_ids = pair_ids[num_tokens_to_remove:]
  3340. else:
  3341. raise ValueError(f"invalid truncation strategy:{self.truncation_side}")
  3342. else:
  3343. logger.error(
  3344. f"We need to remove {num_tokens_to_remove} to truncate the input "
  3345. f"but the second sequence has a length {len(pair_ids)}. "
  3346. f"Please select another truncation strategy than {truncation_strategy}, "
  3347. "for instance 'longest_first' or 'only_first'."
  3348. )
  3349. return (ids, pair_ids, overflowing_tokens)
  3350. def _pad(
  3351. self,
  3352. encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
  3353. max_length: Optional[int] = None,
  3354. padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
  3355. pad_to_multiple_of: Optional[int] = None,
  3356. padding_side: Optional[bool] = None,
  3357. return_attention_mask: Optional[bool] = None,
  3358. ) -> dict:
  3359. """
  3360. Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
  3361. Args:
  3362. encoded_inputs:
  3363. Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
  3364. max_length: maximum length of the returned list and optionally padding length (see below).
  3365. Will truncate by taking into account the special tokens.
  3366. padding_strategy: PaddingStrategy to use for padding.
  3367. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
  3368. - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
  3369. - PaddingStrategy.DO_NOT_PAD: Do not pad
  3370. The tokenizer padding sides are defined in `padding_side` argument:
  3371. - 'left': pads on the left of the sequences
  3372. - 'right': pads on the right of the sequences
  3373. pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
  3374. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
  3375. `>= 7.5` (Volta).
  3376. padding_side:
  3377. The side on which the model should have padding applied. Should be selected between ['right', 'left'].
  3378. Default value is picked from the class attribute of the same name.
  3379. return_attention_mask:
  3380. (optional) Set to False to avoid returning attention mask (default: set to model specifics)
  3381. """
  3382. # Load from model defaults
  3383. if return_attention_mask is None:
  3384. return_attention_mask = "attention_mask" in self.model_input_names
  3385. required_input = encoded_inputs[self.model_input_names[0]]
  3386. if padding_strategy == PaddingStrategy.LONGEST:
  3387. max_length = len(required_input)
  3388. if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
  3389. max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
  3390. needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
  3391. # Initialize attention mask if not present.
  3392. if return_attention_mask and "attention_mask" not in encoded_inputs:
  3393. encoded_inputs["attention_mask"] = [1] * len(required_input)
  3394. if needs_to_be_padded:
  3395. difference = max_length - len(required_input)
  3396. padding_side = padding_side if padding_side is not None else self.padding_side
  3397. if padding_side == "right":
  3398. if return_attention_mask:
  3399. encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
  3400. if "token_type_ids" in encoded_inputs:
  3401. encoded_inputs["token_type_ids"] = (
  3402. encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
  3403. )
  3404. if "special_tokens_mask" in encoded_inputs:
  3405. encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
  3406. encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
  3407. elif padding_side == "left":
  3408. if return_attention_mask:
  3409. encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
  3410. if "token_type_ids" in encoded_inputs:
  3411. encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
  3412. "token_type_ids"
  3413. ]
  3414. if "special_tokens_mask" in encoded_inputs:
  3415. encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
  3416. encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
  3417. else:
  3418. raise ValueError(f"Invalid padding strategy:{padding_side}")
  3419. return encoded_inputs
  3420. def convert_tokens_to_string(self, tokens: List[str]) -> str:
  3421. """
  3422. Converts a sequence of tokens in a single string. The most simple way to do it is `" ".join(tokens)` but we
  3423. often want to remove sub-word tokenization artifacts at the same time.
  3424. Args:
  3425. tokens (`List[str]`): The token to join in a string.
  3426. Returns:
  3427. `str`: The joined tokens.
  3428. """
  3429. raise NotImplementedError
  3430. def batch_decode(
  3431. self,
  3432. sequences: Union[List[int], List[List[int]], "np.ndarray", "torch.Tensor", "tf.Tensor"],
  3433. skip_special_tokens: bool = False,
  3434. clean_up_tokenization_spaces: bool = None,
  3435. **kwargs,
  3436. ) -> List[str]:
  3437. """
  3438. Convert a list of lists of token ids into a list of strings by calling decode.
  3439. Args:
  3440. sequences (`Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]`):
  3441. List of tokenized input ids. Can be obtained using the `__call__` method.
  3442. skip_special_tokens (`bool`, *optional*, defaults to `False`):
  3443. Whether or not to remove special tokens in the decoding.
  3444. clean_up_tokenization_spaces (`bool`, *optional*):
  3445. Whether or not to clean up the tokenization spaces. If `None`, will default to
  3446. `self.clean_up_tokenization_spaces`.
  3447. kwargs (additional keyword arguments, *optional*):
  3448. Will be passed to the underlying model specific decode method.
  3449. Returns:
  3450. `List[str]`: The list of decoded sentences.
  3451. """
  3452. return [
  3453. self.decode(
  3454. seq,
  3455. skip_special_tokens=skip_special_tokens,
  3456. clean_up_tokenization_spaces=clean_up_tokenization_spaces,
  3457. **kwargs,
  3458. )
  3459. for seq in sequences
  3460. ]
  3461. def decode(
  3462. self,
  3463. token_ids: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"],
  3464. skip_special_tokens: bool = False,
  3465. clean_up_tokenization_spaces: bool = None,
  3466. **kwargs,
  3467. ) -> str:
  3468. """
  3469. Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special
  3470. tokens and clean up tokenization spaces.
  3471. Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.
  3472. Args:
  3473. token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):
  3474. List of tokenized input ids. Can be obtained using the `__call__` method.
  3475. skip_special_tokens (`bool`, *optional*, defaults to `False`):
  3476. Whether or not to remove special tokens in the decoding.
  3477. clean_up_tokenization_spaces (`bool`, *optional*):
  3478. Whether or not to clean up the tokenization spaces. If `None`, will default to
  3479. `self.clean_up_tokenization_spaces`.
  3480. kwargs (additional keyword arguments, *optional*):
  3481. Will be passed to the underlying model specific decode method.
  3482. Returns:
  3483. `str`: The decoded sentence.
  3484. """
  3485. # Convert inputs to python lists
  3486. token_ids = to_py_obj(token_ids)
  3487. return self._decode(
  3488. token_ids=token_ids,
  3489. skip_special_tokens=skip_special_tokens,
  3490. clean_up_tokenization_spaces=clean_up_tokenization_spaces,
  3491. **kwargs,
  3492. )
  3493. def _decode(
  3494. self,
  3495. token_ids: Union[int, List[int]],
  3496. skip_special_tokens: bool = False,
  3497. clean_up_tokenization_spaces: bool = None,
  3498. **kwargs,
  3499. ) -> str:
  3500. raise NotImplementedError
  3501. def get_special_tokens_mask(
  3502. self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
  3503. ) -> List[int]:
  3504. """
  3505. Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
  3506. special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
  3507. Args:
  3508. token_ids_0 (`List[int]`):
  3509. List of ids of the first sequence.
  3510. token_ids_1 (`List[int]`, *optional*):
  3511. List of ids of the second sequence.
  3512. already_has_special_tokens (`bool`, *optional*, defaults to `False`):
  3513. Whether or not the token list is already formatted with special tokens for the model.
  3514. Returns:
  3515. A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
  3516. """
  3517. assert already_has_special_tokens and token_ids_1 is None, (
  3518. "You cannot use ``already_has_special_tokens=False`` with this tokenizer. "
  3519. "Please use a slow (full python) tokenizer to activate this argument. "
  3520. "Or set `return_special_tokens_mask=True` when calling the encoding method "
  3521. "to get the special tokens mask in any tokenizer. "
  3522. )
  3523. all_special_ids = self.all_special_ids # cache the property
  3524. special_tokens_mask = [1 if token in all_special_ids else 0 for token in token_ids_0]
  3525. return special_tokens_mask
  3526. @staticmethod
  3527. def clean_up_tokenization(out_string: str) -> str:
  3528. """
  3529. Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms.
  3530. Args:
  3531. out_string (`str`): The text to clean up.
  3532. Returns:
  3533. `str`: The cleaned-up string.
  3534. """
  3535. out_string = (
  3536. out_string.replace(" .", ".")
  3537. .replace(" ?", "?")
  3538. .replace(" !", "!")
  3539. .replace(" ,", ",")
  3540. .replace(" ' ", "'")
  3541. .replace(" n't", "n't")
  3542. .replace(" 'm", "'m")
  3543. .replace(" 's", "'s")
  3544. .replace(" 've", "'ve")
  3545. .replace(" 're", "'re")
  3546. )
  3547. return out_string
  3548. def _eventual_warn_about_too_long_sequence(self, ids: List[int], max_length: Optional[int], verbose: bool):
  3549. """
  3550. Depending on the input and internal state we might trigger a warning about a sequence that is too long for its
  3551. corresponding model
  3552. Args:
  3553. ids (`List[str]`): The ids produced by the tokenization
  3554. max_length (`int`, *optional*): The max_length desired (does not trigger a warning if it is set)
  3555. verbose (`bool`): Whether or not to print more information and warnings.
  3556. """
  3557. if max_length is None and len(ids) > self.model_max_length and verbose:
  3558. if not self.deprecation_warnings.get("sequence-length-is-longer-than-the-specified-maximum", False):
  3559. logger.warning(
  3560. "Token indices sequence length is longer than the specified maximum sequence length "
  3561. f"for this model ({len(ids)} > {self.model_max_length}). Running this sequence through the model "
  3562. "will result in indexing errors"
  3563. )
  3564. self.deprecation_warnings["sequence-length-is-longer-than-the-specified-maximum"] = True
  3565. def _switch_to_input_mode(self):
  3566. """
  3567. Private method to put the tokenizer in input mode (when it has different modes for input/outputs)
  3568. """
  3569. pass
  3570. def _switch_to_target_mode(self):
  3571. """
  3572. Private method to put the tokenizer in target mode (when it has different modes for input/outputs)
  3573. """
  3574. pass
  3575. @contextmanager
  3576. def as_target_tokenizer(self):
  3577. """
  3578. Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to
  3579. sequence-to-sequence models that need a slightly different processing for the labels.
  3580. """
  3581. warnings.warn(
  3582. "`as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your "
  3583. "labels by using the argument `text_target` of the regular `__call__` method (either in the same call as "
  3584. "your input texts if you use the same keyword arguments, or in a separate call."
  3585. )
  3586. self._switch_to_target_mode()
  3587. self._in_target_context_manager = True
  3588. yield
  3589. self._in_target_context_manager = False
  3590. self._switch_to_input_mode()
  3591. @classmethod
  3592. def register_for_auto_class(cls, auto_class="AutoTokenizer"):
  3593. """
  3594. Register this class with a given auto class. This should only be used for custom tokenizers as the ones in the
  3595. library are already mapped with `AutoTokenizer`.
  3596. <Tip warning={true}>
  3597. This API is experimental and may have some slight breaking changes in the next releases.
  3598. </Tip>
  3599. Args:
  3600. auto_class (`str` or `type`, *optional*, defaults to `"AutoTokenizer"`):
  3601. The auto class to register this new tokenizer with.
  3602. """
  3603. if not isinstance(auto_class, str):
  3604. auto_class = auto_class.__name__
  3605. import transformers.models.auto as auto_module
  3606. if not hasattr(auto_module, auto_class):
  3607. raise ValueError(f"{auto_class} is not a valid auto class.")
  3608. cls._auto_class = auto_class
  3609. def prepare_seq2seq_batch(
  3610. self,
  3611. src_texts: List[str],
  3612. tgt_texts: Optional[List[str]] = None,
  3613. max_length: Optional[int] = None,
  3614. max_target_length: Optional[int] = None,
  3615. padding: str = "longest",
  3616. return_tensors: str = None,
  3617. truncation: bool = True,
  3618. **kwargs,
  3619. ) -> BatchEncoding:
  3620. """
  3621. Prepare model inputs for translation. For best performance, translate one sentence at a time.
  3622. Arguments:
  3623. src_texts (`List[str]`):
  3624. List of documents to summarize or source language texts.
  3625. tgt_texts (`list`, *optional*):
  3626. List of summaries or target language texts.
  3627. max_length (`int`, *optional*):
  3628. Controls the maximum length for encoder inputs (documents to summarize or source language texts) If
  3629. left unset or set to `None`, this will use the predefined model maximum length if a maximum length is
  3630. required by one of the truncation/padding parameters. If the model has no specific maximum input length
  3631. (like XLNet) truncation/padding to a maximum length will be deactivated.
  3632. max_target_length (`int`, *optional*):
  3633. Controls the maximum length of decoder inputs (target language texts or summaries) If left unset or set
  3634. to `None`, this will use the max_length value.
  3635. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
  3636. Activates and controls padding. Accepts the following values:
  3637. - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
  3638. sequence if provided).
  3639. - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
  3640. acceptable input length for the model if that argument is not provided.
  3641. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
  3642. lengths).
  3643. return_tensors (`str` or [`~utils.TensorType`], *optional*):
  3644. If set, will return tensors instead of list of python integers. Acceptable values are:
  3645. - `'tf'`: Return TensorFlow `tf.constant` objects.
  3646. - `'pt'`: Return PyTorch `torch.Tensor` objects.
  3647. - `'np'`: Return Numpy `np.ndarray` objects.
  3648. truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `True`):
  3649. Activates and controls truncation. Accepts the following values:
  3650. - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
  3651. to the maximum acceptable input length for the model if that argument is not provided. This will
  3652. truncate token by token, removing a token from the longest sequence in the pair if a pair of
  3653. sequences (or a batch of pairs) is provided.
  3654. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
  3655. maximum acceptable input length for the model if that argument is not provided. This will only
  3656. truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
  3657. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
  3658. maximum acceptable input length for the model if that argument is not provided. This will only
  3659. truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
  3660. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
  3661. greater than the model maximum admissible input size).
  3662. **kwargs:
  3663. Additional keyword arguments passed along to `self.__call__`.
  3664. Return:
  3665. [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
  3666. - **input_ids** -- List of token ids to be fed to the encoder.
  3667. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model.
  3668. - **labels** -- List of token ids for tgt_texts.
  3669. The full set of keys `[input_ids, attention_mask, labels]`, will only be returned if tgt_texts is passed.
  3670. Otherwise, input_ids, attention_mask will be the only keys.
  3671. """
  3672. # docstyle-ignore
  3673. formatted_warning = """
  3674. `prepare_seq2seq_batch` is deprecated and will be removed in version 5 of HuggingFace Transformers. Use the regular
  3675. `__call__` method to prepare your inputs and targets.
  3676. Here is a short example:
  3677. model_inputs = tokenizer(src_texts, text_target=tgt_texts, ...)
  3678. If you either need to use different keyword arguments for the source and target texts, you should do two calls like
  3679. this:
  3680. model_inputs = tokenizer(src_texts, ...)
  3681. labels = tokenizer(text_target=tgt_texts, ...)
  3682. model_inputs["labels"] = labels["input_ids"]
  3683. See the documentation of your specific tokenizer for more details on the specific arguments to the tokenizer of choice.
  3684. For a more complete example, see the implementation of `prepare_seq2seq_batch`.
  3685. """
  3686. warnings.warn(formatted_warning, FutureWarning)
  3687. # mBART-specific kwargs that should be ignored by other models.
  3688. kwargs.pop("src_lang", None)
  3689. kwargs.pop("tgt_lang", None)
  3690. if max_length is None:
  3691. max_length = self.model_max_length
  3692. model_inputs = self(
  3693. src_texts,
  3694. add_special_tokens=True,
  3695. return_tensors=return_tensors,
  3696. max_length=max_length,
  3697. padding=padding,
  3698. truncation=truncation,
  3699. **kwargs,
  3700. )
  3701. if tgt_texts is None:
  3702. return model_inputs
  3703. # Process tgt_texts
  3704. if max_target_length is None:
  3705. max_target_length = max_length
  3706. with self.as_target_tokenizer():
  3707. labels = self(
  3708. tgt_texts,
  3709. add_special_tokens=True,
  3710. return_tensors=return_tensors,
  3711. padding=padding,
  3712. max_length=max_target_length,
  3713. truncation=truncation,
  3714. **kwargs,
  3715. )
  3716. model_inputs["labels"] = labels["input_ids"]
  3717. return model_inputs
  3718. def get_fast_tokenizer_file(tokenization_files: List[str]) -> str:
  3719. """
  3720. Get the tokenization file to use for this version of transformers.
  3721. Args:
  3722. tokenization_files (`List[str]`): The list of available configuration files.
  3723. Returns:
  3724. `str`: The tokenization file to use.
  3725. """
  3726. tokenizer_files_map = {}
  3727. for file_name in tokenization_files:
  3728. search = _re_tokenizer_file.search(file_name)
  3729. if search is not None:
  3730. v = search.groups()[0]
  3731. tokenizer_files_map[v] = file_name
  3732. available_versions = sorted(tokenizer_files_map.keys())
  3733. # Defaults to FULL_TOKENIZER_FILE and then try to look at some newer versions.
  3734. tokenizer_file = FULL_TOKENIZER_FILE
  3735. transformers_version = version.parse(__version__)
  3736. for v in available_versions:
  3737. if version.parse(v) <= transformers_version:
  3738. tokenizer_file = tokenizer_files_map[v]
  3739. else:
  3740. # No point going further since the versions are sorted.
  3741. break
  3742. return tokenizer_file
  3743. # To update the docstring, we need to copy the method, otherwise we change the original docstring.
  3744. PreTrainedTokenizerBase.push_to_hub = copy_func(PreTrainedTokenizerBase.push_to_hub)
  3745. if PreTrainedTokenizerBase.push_to_hub.__doc__ is not None:
  3746. PreTrainedTokenizerBase.push_to_hub.__doc__ = PreTrainedTokenizerBase.push_to_hub.__doc__.format(
  3747. object="tokenizer", object_class="AutoTokenizer", object_files="tokenizer files"
  3748. )