| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310 |
- # coding=utf-8
- # Copyright 2020 The HuggingFace Inc. team.
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- """
- Base classes common to both the slow and the fast tokenization classes: PreTrainedTokenizerBase (host all the user
- fronting encoding methods) Special token mixing (host the special tokens logic) and BatchEncoding (wrap the dictionary
- of output with special method for the Fast tokenizers)
- """
- import copy
- import json
- import os
- import re
- import warnings
- from collections import UserDict
- from collections.abc import Mapping, Sized
- from contextlib import contextmanager
- from dataclasses import dataclass
- from inspect import isfunction
- from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Sequence, Tuple, Union
- import numpy as np
- from packaging import version
- from . import __version__
- from .dynamic_module_utils import custom_object_save
- from .utils import (
- ExplicitEnum,
- PaddingStrategy,
- PushToHubMixin,
- TensorType,
- add_end_docstrings,
- add_model_info_to_auto_map,
- add_model_info_to_custom_pipelines,
- cached_file,
- copy_func,
- download_url,
- extract_commit_hash,
- get_json_schema,
- is_flax_available,
- is_jax_tensor,
- is_mlx_available,
- is_numpy_array,
- is_offline_mode,
- is_protobuf_available,
- is_remote_url,
- is_tf_available,
- is_tf_tensor,
- is_tokenizers_available,
- is_torch_available,
- is_torch_device,
- is_torch_tensor,
- logging,
- requires_backends,
- to_py_obj,
- )
- from .utils.chat_template_utils import _compile_jinja_template, _render_with_assistant_indices
- from .utils.import_utils import PROTOBUF_IMPORT_ERROR
- if TYPE_CHECKING:
- if is_torch_available():
- import torch
- if is_tf_available():
- import tensorflow as tf
- if is_flax_available():
- import jax.numpy as jnp # noqa: F401
- def import_protobuf_decode_error(error_message=""):
- if is_protobuf_available():
- from google.protobuf.message import DecodeError
- return DecodeError
- else:
- raise ImportError(PROTOBUF_IMPORT_ERROR.format(error_message))
- if is_tokenizers_available():
- from tokenizers import AddedToken
- from tokenizers import Encoding as EncodingFast
- else:
- @dataclass(frozen=False, eq=True)
- class AddedToken:
- """
- AddedToken represents a token to be added to a Tokenizer An AddedToken can have special options defining the
- way it should behave.
- The `normalized` will default to `not special` if it is not specified, similarly to the definition in
- `tokenizers`.
- """
- def __init__(
- self, content: str, single_word=False, lstrip=False, rstrip=False, special=False, normalized=None
- ):
- self.content = content
- self.single_word = single_word
- self.lstrip = lstrip
- self.rstrip = rstrip
- self.special = special
- self.normalized = normalized if normalized is not None else not special
- def __getstate__(self):
- return self.__dict__
- def __str__(self):
- return self.content
- @dataclass
- class EncodingFast:
- """This is dummy class because without the `tokenizers` library we don't have these objects anyway"""
- pass
- logger = logging.get_logger(__name__)
- VERY_LARGE_INTEGER = int(1e30) # This is used to set the max input length for a model with infinite size input
- LARGE_INTEGER = int(1e20) # This is used when we need something big but slightly smaller than VERY_LARGE_INTEGER
- # Define type aliases and NamedTuples
- TextInput = str
- PreTokenizedInput = List[str]
- EncodedInput = List[int]
- TextInputPair = Tuple[str, str]
- PreTokenizedInputPair = Tuple[List[str], List[str]]
- EncodedInputPair = Tuple[List[int], List[int]]
- # Define type aliases for text-related non-text modalities
- AudioInput = Union["np.ndarray", "torch.Tensor", List["np.ndarray"], List["torch.Tensor"]]
- # Slow tokenizers used to be saved in three separated files
- SPECIAL_TOKENS_MAP_FILE = "special_tokens_map.json"
- ADDED_TOKENS_FILE = "added_tokens.json"
- TOKENIZER_CONFIG_FILE = "tokenizer_config.json"
- # Fast tokenizers (provided by HuggingFace tokenizer's library) can be saved in a single file
- FULL_TOKENIZER_FILE = "tokenizer.json"
- _re_tokenizer_file = re.compile(r"tokenizer\.(.*)\.json")
- class TruncationStrategy(ExplicitEnum):
- """
- Possible values for the `truncation` argument in [`PreTrainedTokenizerBase.__call__`]. Useful for tab-completion in
- an IDE.
- """
- ONLY_FIRST = "only_first"
- ONLY_SECOND = "only_second"
- LONGEST_FIRST = "longest_first"
- DO_NOT_TRUNCATE = "do_not_truncate"
- class CharSpan(NamedTuple):
- """
- Character span in the original string.
- Args:
- start (`int`): Index of the first character in the original string.
- end (`int`): Index of the character following the last character in the original string.
- """
- start: int
- end: int
- class TokenSpan(NamedTuple):
- """
- Token span in an encoded string (list of tokens).
- Args:
- start (`int`): Index of the first token in the span.
- end (`int`): Index of the token following the last token in the span.
- """
- start: int
- end: int
- class BatchEncoding(UserDict):
- """
- Holds the output of the [`~tokenization_utils_base.PreTrainedTokenizerBase.__call__`],
- [`~tokenization_utils_base.PreTrainedTokenizerBase.encode_plus`] and
- [`~tokenization_utils_base.PreTrainedTokenizerBase.batch_encode_plus`] methods (tokens, attention_masks, etc).
- This class is derived from a python dictionary and can be used as a dictionary. In addition, this class exposes
- utility methods to map from word/character space to token space.
- Args:
- data (`dict`, *optional*):
- Dictionary of lists/arrays/tensors returned by the `__call__`/`encode_plus`/`batch_encode_plus` methods
- ('input_ids', 'attention_mask', etc.).
- encoding (`tokenizers.Encoding` or `Sequence[tokenizers.Encoding]`, *optional*):
- If the tokenizer is a fast tokenizer which outputs additional information like mapping from word/character
- space to token space the `tokenizers.Encoding` instance or list of instance (for batches) hold this
- information.
- tensor_type (`Union[None, str, TensorType]`, *optional*):
- You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at
- initialization.
- prepend_batch_axis (`bool`, *optional*, defaults to `False`):
- Whether or not to add a batch axis when converting to tensors (see `tensor_type` above). Note that this
- parameter has an effect if the parameter `tensor_type` is set, *otherwise has no effect*.
- n_sequences (`Optional[int]`, *optional*):
- You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at
- initialization.
- """
- def __init__(
- self,
- data: Optional[Dict[str, Any]] = None,
- encoding: Optional[Union[EncodingFast, Sequence[EncodingFast]]] = None,
- tensor_type: Union[None, str, TensorType] = None,
- prepend_batch_axis: bool = False,
- n_sequences: Optional[int] = None,
- ):
- super().__init__(data)
- if isinstance(encoding, EncodingFast):
- encoding = [encoding]
- self._encodings = encoding
- if n_sequences is None and encoding is not None and len(encoding):
- n_sequences = encoding[0].n_sequences
- self._n_sequences = n_sequences
- self.convert_to_tensors(tensor_type=tensor_type, prepend_batch_axis=prepend_batch_axis)
- @property
- def n_sequences(self) -> Optional[int]:
- """
- `Optional[int]`: The number of sequences used to generate each sample from the batch encoded in this
- [`BatchEncoding`]. Currently can be one of `None` (unknown), `1` (a single sentence) or `2` (a pair of
- sentences)
- """
- return self._n_sequences
- @property
- def is_fast(self) -> bool:
- """
- `bool`: Indicate whether this [`BatchEncoding`] was generated from the result of a [`PreTrainedTokenizerFast`]
- or not.
- """
- return self._encodings is not None
- def __getitem__(self, item: Union[int, str]) -> Union[Any, EncodingFast]:
- """
- If the key is a string, returns the value of the dict associated to `key` ('input_ids', 'attention_mask',
- etc.).
- If the key is an integer, get the `tokenizers.Encoding` for batch item with index `key`.
- If the key is a slice, returns the value of the dict associated to `key` ('input_ids', 'attention_mask', etc.)
- with the constraint of slice.
- """
- if isinstance(item, str):
- return self.data[item]
- elif self._encodings is not None:
- return self._encodings[item]
- elif isinstance(item, slice):
- return {key: self.data[key][item] for key in self.data.keys()}
- else:
- raise KeyError(
- "Invalid key. Only three types of key are available: "
- "(1) string, (2) integers for backend Encoding, and (3) slices for data subsetting."
- )
- def __getattr__(self, item: str):
- try:
- return self.data[item]
- except KeyError:
- raise AttributeError
- def __getstate__(self):
- return {"data": self.data, "encodings": self._encodings}
- def __setstate__(self, state):
- if "data" in state:
- self.data = state["data"]
- if "encodings" in state:
- self._encodings = state["encodings"]
- def keys(self):
- return self.data.keys()
- def values(self):
- return self.data.values()
- def items(self):
- return self.data.items()
- # After this point:
- # Extended properties and methods only available for fast (Rust-based) tokenizers
- # provided by HuggingFace tokenizers library.
- @property
- def encodings(self) -> Optional[List[EncodingFast]]:
- """
- `Optional[List[tokenizers.Encoding]]`: The list all encodings from the tokenization process. Returns `None` if
- the input was tokenized through Python (i.e., not a fast) tokenizer.
- """
- return self._encodings
- def tokens(self, batch_index: int = 0) -> List[str]:
- """
- Return the list of tokens (sub-parts of the input strings after word/subword splitting and before conversion to
- integer indices) at a given batch index (only works for the output of a fast tokenizer).
- Args:
- batch_index (`int`, *optional*, defaults to 0): The index to access in the batch.
- Returns:
- `List[str]`: The list of tokens at that index.
- """
- if not self._encodings:
- raise ValueError(
- "tokens() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast`"
- " class)."
- )
- return self._encodings[batch_index].tokens
- def sequence_ids(self, batch_index: int = 0) -> List[Optional[int]]:
- """
- Return a list mapping the tokens to the id of their original sentences:
- - `None` for special tokens added around or between sequences,
- - `0` for tokens corresponding to words in the first sequence,
- - `1` for tokens corresponding to words in the second sequence when a pair of sequences was jointly
- encoded.
- Args:
- batch_index (`int`, *optional*, defaults to 0): The index to access in the batch.
- Returns:
- `List[Optional[int]]`: A list indicating the sequence id corresponding to each token. Special tokens added
- by the tokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding
- sequence.
- """
- if not self._encodings:
- raise ValueError(
- "sequence_ids() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast`"
- " class)."
- )
- return self._encodings[batch_index].sequence_ids
- def words(self, batch_index: int = 0) -> List[Optional[int]]:
- """
- Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer.
- Args:
- batch_index (`int`, *optional*, defaults to 0): The index to access in the batch.
- Returns:
- `List[Optional[int]]`: A list indicating the word corresponding to each token. Special tokens added by the
- tokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding word
- (several tokens will be mapped to the same word index if they are parts of that word).
- """
- if not self._encodings:
- raise ValueError(
- "words() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast`"
- " class)."
- )
- warnings.warn(
- "`BatchEncoding.words()` property is deprecated and should be replaced with the identical, "
- "but more self-explanatory `BatchEncoding.word_ids()` property.",
- FutureWarning,
- )
- return self.word_ids(batch_index)
- def word_ids(self, batch_index: int = 0) -> List[Optional[int]]:
- """
- Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer.
- Args:
- batch_index (`int`, *optional*, defaults to 0): The index to access in the batch.
- Returns:
- `List[Optional[int]]`: A list indicating the word corresponding to each token. Special tokens added by the
- tokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding word
- (several tokens will be mapped to the same word index if they are parts of that word).
- """
- if not self._encodings:
- raise ValueError(
- "word_ids() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast`"
- " class)."
- )
- return self._encodings[batch_index].word_ids
- def token_to_sequence(self, batch_or_token_index: int, token_index: Optional[int] = None) -> int:
- """
- Get the index of the sequence represented by the given token. In the general use case, this method returns `0`
- for a single sequence or the first sequence of a pair, and `1` for the second sequence of a pair
- Can be called as:
- - `self.token_to_sequence(token_index)` if batch size is 1
- - `self.token_to_sequence(batch_index, token_index)` if batch size is greater than 1
- This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e.,
- words are defined by the user). In this case it allows to easily associate encoded tokens with provided
- tokenized words.
- Args:
- batch_or_token_index (`int`):
- Index of the sequence in the batch. If the batch only comprises one sequence, this can be the index of
- the token in the sequence.
- token_index (`int`, *optional*):
- If a batch index is provided in *batch_or_token_index*, this can be the index of the token in the
- sequence.
- Returns:
- `int`: Index of the word in the input sequence.
- """
- if not self._encodings:
- raise ValueError("token_to_sequence() is not available when using Python based tokenizers")
- if token_index is not None:
- batch_index = batch_or_token_index
- else:
- batch_index = 0
- token_index = batch_or_token_index
- if batch_index < 0:
- batch_index = self._batch_size + batch_index
- if token_index < 0:
- token_index = self._seq_len + token_index
- return self._encodings[batch_index].token_to_sequence(token_index)
- def token_to_word(self, batch_or_token_index: int, token_index: Optional[int] = None) -> int:
- """
- Get the index of the word corresponding (i.e. comprising) to an encoded token in a sequence of the batch.
- Can be called as:
- - `self.token_to_word(token_index)` if batch size is 1
- - `self.token_to_word(batch_index, token_index)` if batch size is greater than 1
- This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e.,
- words are defined by the user). In this case it allows to easily associate encoded tokens with provided
- tokenized words.
- Args:
- batch_or_token_index (`int`):
- Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
- the token in the sequence.
- token_index (`int`, *optional*):
- If a batch index is provided in *batch_or_token_index*, this can be the index of the token in the
- sequence.
- Returns:
- `int`: Index of the word in the input sequence.
- """
- if not self._encodings:
- raise ValueError("token_to_word() is not available when using Python based tokenizers")
- if token_index is not None:
- batch_index = batch_or_token_index
- else:
- batch_index = 0
- token_index = batch_or_token_index
- if batch_index < 0:
- batch_index = self._batch_size + batch_index
- if token_index < 0:
- token_index = self._seq_len + token_index
- return self._encodings[batch_index].token_to_word(token_index)
- def word_to_tokens(
- self, batch_or_word_index: int, word_index: Optional[int] = None, sequence_index: int = 0
- ) -> Optional[TokenSpan]:
- """
- Get the encoded token span corresponding to a word in a sequence of the batch.
- Token spans are returned as a [`~tokenization_utils_base.TokenSpan`] with:
- - **start** -- Index of the first token.
- - **end** -- Index of the token following the last token.
- Can be called as:
- - `self.word_to_tokens(word_index, sequence_index: int = 0)` if batch size is 1
- - `self.word_to_tokens(batch_index, word_index, sequence_index: int = 0)` if batch size is greater or equal to
- 1
- This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words
- are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized
- words.
- Args:
- batch_or_word_index (`int`):
- Index of the sequence in the batch. If the batch only comprises one sequence, this can be the index of
- the word in the sequence.
- word_index (`int`, *optional*):
- If a batch index is provided in *batch_or_token_index*, this can be the index of the word in the
- sequence.
- sequence_index (`int`, *optional*, defaults to 0):
- If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0
- or 1) the provided word index belongs to.
- Returns:
- ([`~tokenization_utils_base.TokenSpan`], *optional*): Span of tokens in the encoded sequence. Returns
- `None` if no tokens correspond to the word. This can happen especially when the token is a special token
- that has been used to format the tokenization. For example when we add a class token at the very beginning
- of the tokenization.
- """
- if not self._encodings:
- raise ValueError("word_to_tokens() is not available when using Python based tokenizers")
- if word_index is not None:
- batch_index = batch_or_word_index
- else:
- batch_index = 0
- word_index = batch_or_word_index
- if batch_index < 0:
- batch_index = self._batch_size + batch_index
- if word_index < 0:
- word_index = self._seq_len + word_index
- span = self._encodings[batch_index].word_to_tokens(word_index, sequence_index)
- return TokenSpan(*span) if span is not None else None
- def token_to_chars(self, batch_or_token_index: int, token_index: Optional[int] = None) -> CharSpan:
- """
- Get the character span corresponding to an encoded token in a sequence of the batch.
- Character spans are returned as a [`~tokenization_utils_base.CharSpan`] with:
- - **start** -- Index of the first character in the original string associated to the token.
- - **end** -- Index of the character following the last character in the original string associated to the
- token.
- Can be called as:
- - `self.token_to_chars(token_index)` if batch size is 1
- - `self.token_to_chars(batch_index, token_index)` if batch size is greater or equal to 1
- Args:
- batch_or_token_index (`int`):
- Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
- the token in the sequence.
- token_index (`int`, *optional*):
- If a batch index is provided in *batch_or_token_index*, this can be the index of the token or tokens in
- the sequence.
- Returns:
- [`~tokenization_utils_base.CharSpan`]: Span of characters in the original string, or None, if the token
- (e.g. <s>, </s>) doesn't correspond to any chars in the origin string.
- """
- if not self._encodings:
- raise ValueError("token_to_chars() is not available when using Python based tokenizers")
- if token_index is not None:
- batch_index = batch_or_token_index
- else:
- batch_index = 0
- token_index = batch_or_token_index
- span_indices = self._encodings[batch_index].token_to_chars(token_index)
- return CharSpan(*span_indices) if span_indices is not None else None
- def char_to_token(
- self, batch_or_char_index: int, char_index: Optional[int] = None, sequence_index: int = 0
- ) -> int:
- """
- Get the index of the token in the encoded output comprising a character in the original string for a sequence
- of the batch.
- Can be called as:
- - `self.char_to_token(char_index)` if batch size is 1
- - `self.char_to_token(batch_index, char_index)` if batch size is greater or equal to 1
- This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words
- are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized
- words.
- Args:
- batch_or_char_index (`int`):
- Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
- the word in the sequence
- char_index (`int`, *optional*):
- If a batch index is provided in *batch_or_token_index*, this can be the index of the word in the
- sequence.
- sequence_index (`int`, *optional*, defaults to 0):
- If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0
- or 1) the provided character index belongs to.
- Returns:
- `int`: Index of the token, or None if the char index refers to a whitespace only token and whitespace is
- trimmed with `trim_offsets=True`.
- """
- if not self._encodings:
- raise ValueError("char_to_token() is not available when using Python based tokenizers")
- if char_index is not None:
- batch_index = batch_or_char_index
- else:
- batch_index = 0
- char_index = batch_or_char_index
- return self._encodings[batch_index].char_to_token(char_index, sequence_index)
- def word_to_chars(
- self, batch_or_word_index: int, word_index: Optional[int] = None, sequence_index: int = 0
- ) -> CharSpan:
- """
- Get the character span in the original string corresponding to given word in a sequence of the batch.
- Character spans are returned as a CharSpan NamedTuple with:
- - start: index of the first character in the original string
- - end: index of the character following the last character in the original string
- Can be called as:
- - `self.word_to_chars(word_index)` if batch size is 1
- - `self.word_to_chars(batch_index, word_index)` if batch size is greater or equal to 1
- Args:
- batch_or_word_index (`int`):
- Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
- the word in the sequence
- word_index (`int`, *optional*):
- If a batch index is provided in *batch_or_token_index*, this can be the index of the word in the
- sequence.
- sequence_index (`int`, *optional*, defaults to 0):
- If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0
- or 1) the provided word index belongs to.
- Returns:
- `CharSpan` or `List[CharSpan]`: Span(s) of the associated character or characters in the string. CharSpan
- are NamedTuple with:
- - start: index of the first character associated to the token in the original string
- - end: index of the character following the last character associated to the token in the original
- string
- """
- if not self._encodings:
- raise ValueError("word_to_chars() is not available when using Python based tokenizers")
- if word_index is not None:
- batch_index = batch_or_word_index
- else:
- batch_index = 0
- word_index = batch_or_word_index
- return CharSpan(*(self._encodings[batch_index].word_to_chars(word_index, sequence_index)))
- def char_to_word(self, batch_or_char_index: int, char_index: Optional[int] = None, sequence_index: int = 0) -> int:
- """
- Get the word in the original string corresponding to a character in the original string of a sequence of the
- batch.
- Can be called as:
- - `self.char_to_word(char_index)` if batch size is 1
- - `self.char_to_word(batch_index, char_index)` if batch size is greater than 1
- This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words
- are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized
- words.
- Args:
- batch_or_char_index (`int`):
- Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
- the character in the original string.
- char_index (`int`, *optional*):
- If a batch index is provided in *batch_or_token_index*, this can be the index of the character in the
- original string.
- sequence_index (`int`, *optional*, defaults to 0):
- If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0
- or 1) the provided character index belongs to.
- Returns:
- `int` or `List[int]`: Index or indices of the associated encoded token(s).
- """
- if not self._encodings:
- raise ValueError("char_to_word() is not available when using Python based tokenizers")
- if char_index is not None:
- batch_index = batch_or_char_index
- else:
- batch_index = 0
- char_index = batch_or_char_index
- return self._encodings[batch_index].char_to_word(char_index, sequence_index)
- def convert_to_tensors(
- self, tensor_type: Optional[Union[str, TensorType]] = None, prepend_batch_axis: bool = False
- ):
- """
- Convert the inner content to tensors.
- Args:
- tensor_type (`str` or [`~utils.TensorType`], *optional*):
- The type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If
- `None`, no modification is done.
- prepend_batch_axis (`int`, *optional*, defaults to `False`):
- Whether or not to add the batch dimension during the conversion.
- """
- if tensor_type is None:
- return self
- # Convert to TensorType
- if not isinstance(tensor_type, TensorType):
- tensor_type = TensorType(tensor_type)
- # Get a function reference for the correct framework
- if tensor_type == TensorType.TENSORFLOW:
- if not is_tf_available():
- raise ImportError(
- "Unable to convert output to TensorFlow tensors format, TensorFlow is not installed."
- )
- import tensorflow as tf
- as_tensor = tf.constant
- is_tensor = tf.is_tensor
- elif tensor_type == TensorType.PYTORCH:
- if not is_torch_available():
- raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed.")
- import torch
- is_tensor = torch.is_tensor
- def as_tensor(value, dtype=None):
- if isinstance(value, list) and isinstance(value[0], np.ndarray):
- return torch.from_numpy(np.array(value))
- return torch.tensor(value)
- elif tensor_type == TensorType.JAX:
- if not is_flax_available():
- raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed.")
- import jax.numpy as jnp # noqa: F811
- as_tensor = jnp.array
- is_tensor = is_jax_tensor
- elif tensor_type == TensorType.MLX:
- if not is_mlx_available():
- raise ImportError("Unable to convert output to MLX tensors format, MLX is not installed.")
- import mlx.core as mx
- as_tensor = mx.array
- def is_tensor(obj):
- return isinstance(obj, mx.array)
- else:
- def as_tensor(value, dtype=None):
- if isinstance(value, (list, tuple)) and isinstance(value[0], (list, tuple, np.ndarray)):
- value_lens = [len(val) for val in value]
- if len(set(value_lens)) > 1 and dtype is None:
- # we have a ragged list so handle explicitly
- value = as_tensor([np.asarray(val) for val in value], dtype=object)
- return np.asarray(value, dtype=dtype)
- is_tensor = is_numpy_array
- # Do the tensor conversion in batch
- for key, value in self.items():
- try:
- if prepend_batch_axis:
- value = [value]
- if not is_tensor(value):
- tensor = as_tensor(value)
- # Removing this for now in favor of controlling the shape with `prepend_batch_axis`
- # # at-least2d
- # if tensor.ndim > 2:
- # tensor = tensor.squeeze(0)
- # elif tensor.ndim < 2:
- # tensor = tensor[None, :]
- self[key] = tensor
- except Exception as e:
- if key == "overflowing_tokens":
- raise ValueError(
- "Unable to create tensor returning overflowing tokens of different lengths. "
- "Please see if a fast version of this tokenizer is available to have this feature available."
- ) from e
- raise ValueError(
- "Unable to create tensor, you should probably activate truncation and/or padding with"
- " 'padding=True' 'truncation=True' to have batched tensors with the same length. Perhaps your"
- f" features (`{key}` in this case) have excessive nesting (inputs type `list` where type `int` is"
- " expected)."
- ) from e
- return self
- def to(self, device: Union[str, "torch.device"]) -> "BatchEncoding":
- """
- Send all values to device by calling `v.to(device)` (PyTorch only).
- Args:
- device (`str` or `torch.device`): The device to put the tensors on.
- Returns:
- [`BatchEncoding`]: The same instance after modification.
- """
- requires_backends(self, ["torch"])
- import torch
- # This check catches things like APEX blindly calling "to" on all inputs to a module
- # Otherwise it passes the casts down and casts the LongTensor containing the token idxs
- # into a HalfTensor
- if isinstance(device, str) or is_torch_device(device) or isinstance(device, int):
- self.data = {k: v.to(device=device) for k, v in self.data.items() if isinstance(v, torch.Tensor)}
- else:
- logger.warning(f"Attempting to cast a BatchEncoding to type {str(device)}. This is not supported.")
- return self
- class SpecialTokensMixin:
- """
- A mixin derived by [`PreTrainedTokenizer`] and [`PreTrainedTokenizerFast`] to handle specific behaviors related to
- special tokens. In particular, this class hold the attributes which can be used to directly access these special
- tokens in a model-independent manner and allow to set and update the special tokens.
- Args:
- bos_token (`str` or `tokenizers.AddedToken`, *optional*):
- A special token representing the beginning of a sentence.
- eos_token (`str` or `tokenizers.AddedToken`, *optional*):
- A special token representing the end of a sentence.
- unk_token (`str` or `tokenizers.AddedToken`, *optional*):
- A special token representing an out-of-vocabulary token.
- sep_token (`str` or `tokenizers.AddedToken`, *optional*):
- A special token separating two different sentences in the same input (used by BERT for instance).
- pad_token (`str` or `tokenizers.AddedToken`, *optional*):
- A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
- attention mechanisms or loss computation.
- cls_token (`str` or `tokenizers.AddedToken`, *optional*):
- A special token representing the class of the input (used by BERT for instance).
- mask_token (`str` or `tokenizers.AddedToken`, *optional*):
- A special token representing a masked token (used by masked-language modeling pretraining objectives, like
- BERT).
- additional_special_tokens (tuple or list of `str` or `tokenizers.AddedToken`, *optional*):
- A tuple or a list of additional tokens, which will be marked as `special`, meaning that they will be
- skipped when decoding if `skip_special_tokens` is set to `True`.
- """
- SPECIAL_TOKENS_ATTRIBUTES = [
- "bos_token",
- "eos_token",
- "unk_token",
- "sep_token",
- "pad_token",
- "cls_token",
- "mask_token",
- "additional_special_tokens",
- ]
- def __init__(self, verbose=False, **kwargs):
- self._bos_token = None
- self._eos_token = None
- self._unk_token = None
- self._sep_token = None
- self._pad_token = None
- self._cls_token = None
- self._mask_token = None
- self._pad_token_type_id = 0
- self._additional_special_tokens = []
- self.verbose = verbose
- # We directly set the hidden value to allow initialization with special tokens
- # which are not yet in the vocabulary. Necessary for serialization/de-serialization
- # TODO clean this up at some point (probably by switching to fast tokenizers)
- for key, value in kwargs.items():
- if value is None:
- continue
- if key in self.SPECIAL_TOKENS_ATTRIBUTES:
- if key == "additional_special_tokens":
- assert isinstance(value, (list, tuple)), f"Value {value} is not a list or tuple"
- assert all(
- isinstance(t, (str, AddedToken)) for t in value
- ), "One of the tokens is not a string or an AddedToken"
- setattr(self, key, value)
- elif isinstance(value, (str, AddedToken)):
- setattr(self, key, value)
- else:
- raise TypeError(f"Special token {key} has to be either str or AddedToken but got: {type(value)}")
- def sanitize_special_tokens(self) -> int:
- """
- The `sanitize_special_tokens` is now deprecated kept for backward compatibility and will be removed in
- transformers v5.
- """
- logger.warning_once("The `sanitize_special_tokens` will be removed in transformers v5.")
- return self.add_tokens(self.all_special_tokens_extended, special_tokens=True)
- def add_special_tokens(
- self, special_tokens_dict: Dict[str, Union[str, AddedToken]], replace_additional_special_tokens=True
- ) -> int:
- """
- Add a dictionary of special tokens (eos, pad, cls, etc.) to the encoder and link them to class attributes. If
- special tokens are NOT in the vocabulary, they are added to it (indexed starting from the last index of the
- current vocabulary).
- When adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix of the
- model so that its embedding matrix matches the tokenizer.
- In order to do that, please use the [`~PreTrainedModel.resize_token_embeddings`] method.
- Using `add_special_tokens` will ensure your special tokens can be used in several ways:
- - Special tokens can be skipped when decoding using `skip_special_tokens = True`.
- - Special tokens are carefully handled by the tokenizer (they are never split), similar to `AddedTokens`.
- - You can easily refer to special tokens using tokenizer class attributes like `tokenizer.cls_token`. This
- makes it easy to develop model-agnostic training and fine-tuning scripts.
- When possible, special tokens are already registered for provided pretrained models (for instance
- [`BertTokenizer`] `cls_token` is already registered to be :obj*'[CLS]'* and XLM's one is also registered to be
- `'</s>'`).
- Args:
- special_tokens_dict (dictionary *str* to *str* or `tokenizers.AddedToken`):
- Keys should be in the list of predefined special attributes: [`bos_token`, `eos_token`, `unk_token`,
- `sep_token`, `pad_token`, `cls_token`, `mask_token`, `additional_special_tokens`].
- Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer
- assign the index of the `unk_token` to them).
- replace_additional_special_tokens (`bool`, *optional*,, defaults to `True`):
- If `True`, the existing list of additional special tokens will be replaced by the list provided in
- `special_tokens_dict`. Otherwise, `self._additional_special_tokens` is just extended. In the former
- case, the tokens will NOT be removed from the tokenizer's full vocabulary - they are only being flagged
- as non-special tokens. Remember, this only affects which tokens are skipped during decoding, not the
- `added_tokens_encoder` and `added_tokens_decoder`. This means that the previous
- `additional_special_tokens` are still added tokens, and will not be split by the model.
- Returns:
- `int`: Number of tokens added to the vocabulary.
- Examples:
- ```python
- # Let's see how to add a new classification token to GPT-2
- tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2")
- model = GPT2Model.from_pretrained("openai-community/gpt2")
- special_tokens_dict = {"cls_token": "<CLS>"}
- num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
- print("We have added", num_added_toks, "tokens")
- # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer.
- model.resize_token_embeddings(len(tokenizer))
- assert tokenizer.cls_token == "<CLS>"
- ```"""
- if not special_tokens_dict:
- return 0
- added_tokens = []
- for key, value in special_tokens_dict.items():
- assert key in self.SPECIAL_TOKENS_ATTRIBUTES, f"Key {key} is not a special token"
- if self.verbose:
- logger.info(f"Assigning {value} to the {key} key of the tokenizer")
- if key == "additional_special_tokens":
- assert isinstance(value, (list, tuple)) and all(
- isinstance(t, (str, AddedToken)) for t in value
- ), f"Tokens {value} for key {key} should all be str or AddedToken instances"
- to_add = []
- for token in value:
- if isinstance(token, str):
- # for legacy purpose we default to stripping. `test_add_tokens_tokenizer` depends on this
- token = AddedToken(token, rstrip=False, lstrip=False, normalized=False, special=True)
- if not replace_additional_special_tokens and str(token) in self.additional_special_tokens:
- continue
- to_add.append(token)
- if replace_additional_special_tokens and len(to_add) > 0:
- setattr(self, key, list(to_add))
- else:
- self._additional_special_tokens.extend(to_add)
- added_tokens += to_add
- else:
- if not isinstance(value, (str, AddedToken)):
- raise ValueError(f"Token {value} for key {key} should be a str or an AddedToken instance")
- if isinstance(value, (str)):
- # for legacy purpose we default to stripping. `False` depends on this
- value = AddedToken(value, rstrip=False, lstrip=False, normalized=False, special=True)
- if isinstance(value, AddedToken):
- setattr(self, key, value)
- if value not in added_tokens:
- added_tokens.append(value)
- # if we are adding tokens that were not part of the vocab, we ought to add them
- added_tokens = self.add_tokens(added_tokens, special_tokens=True)
- return added_tokens
- def add_tokens(
- self, new_tokens: Union[str, AddedToken, List[Union[str, AddedToken]]], special_tokens: bool = False
- ) -> int:
- """
- Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to
- it with indices starting from length of the current vocabulary and and will be isolated before the tokenization
- algorithm is applied. Added tokens and tokens from the vocabulary of the tokenization algorithm are therefore
- not treated in the same way.
- Note, when adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix
- of the model so that its embedding matrix matches the tokenizer.
- In order to do that, please use the [`~PreTrainedModel.resize_token_embeddings`] method.
- Args:
- new_tokens (`str`, `tokenizers.AddedToken` or a list of *str* or `tokenizers.AddedToken`):
- Tokens are only added if they are not already in the vocabulary. `tokenizers.AddedToken` wraps a string
- token to let you personalize its behavior: whether this token should only match against a single word,
- whether this token should strip all potential whitespaces on the left side, whether this token should
- strip all potential whitespaces on the right side, etc.
- special_tokens (`bool`, *optional*, defaults to `False`):
- Can be used to specify if the token is a special token. This mostly change the normalization behavior
- (special tokens like CLS or [MASK] are usually not lower-cased for instance).
- See details for `tokenizers.AddedToken` in HuggingFace tokenizers library.
- Returns:
- `int`: Number of tokens added to the vocabulary.
- Examples:
- ```python
- # Let's see how to increase the vocabulary of Bert model and tokenizer
- tokenizer = BertTokenizerFast.from_pretrained("google-bert/bert-base-uncased")
- model = BertModel.from_pretrained("google-bert/bert-base-uncased")
- num_added_toks = tokenizer.add_tokens(["new_tok1", "my_new-tok2"])
- print("We have added", num_added_toks, "tokens")
- # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer.
- model.resize_token_embeddings(len(tokenizer))
- ```"""
- if not new_tokens:
- return 0
- if not isinstance(new_tokens, (list, tuple)):
- new_tokens = [new_tokens]
- return self._add_tokens(new_tokens, special_tokens=special_tokens)
- def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int:
- raise NotImplementedError
- @property
- def bos_token(self) -> str:
- """
- `str`: Beginning of sentence token. Log an error if used while not having been set.
- """
- if self._bos_token is None:
- if self.verbose:
- logger.error("Using bos_token, but it is not set yet.")
- return None
- return str(self._bos_token)
- @property
- def eos_token(self) -> str:
- """
- `str`: End of sentence token. Log an error if used while not having been set.
- """
- if self._eos_token is None:
- if self.verbose:
- logger.error("Using eos_token, but it is not set yet.")
- return None
- return str(self._eos_token)
- @property
- def unk_token(self) -> str:
- """
- `str`: Unknown token. Log an error if used while not having been set.
- """
- if self._unk_token is None:
- if self.verbose:
- logger.error("Using unk_token, but it is not set yet.")
- return None
- return str(self._unk_token)
- @property
- def sep_token(self) -> str:
- """
- `str`: Separation token, to separate context and query in an input sequence. Log an error if used while not
- having been set.
- """
- if self._sep_token is None:
- if self.verbose:
- logger.error("Using sep_token, but it is not set yet.")
- return None
- return str(self._sep_token)
- @property
- def pad_token(self) -> str:
- """
- `str`: Padding token. Log an error if used while not having been set.
- """
- if self._pad_token is None:
- if self.verbose:
- logger.error("Using pad_token, but it is not set yet.")
- return None
- return str(self._pad_token)
- @property
- def cls_token(self) -> str:
- """
- `str`: Classification token, to extract a summary of an input sequence leveraging self-attention along the full
- depth of the model. Log an error if used while not having been set.
- """
- if self._cls_token is None:
- if self.verbose:
- logger.error("Using cls_token, but it is not set yet.")
- return None
- return str(self._cls_token)
- @property
- def mask_token(self) -> str:
- """
- `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not
- having been set.
- """
- if self._mask_token is None:
- if self.verbose:
- logger.error("Using mask_token, but it is not set yet.")
- return None
- return str(self._mask_token)
- @property
- def additional_special_tokens(self) -> List[str]:
- """
- `List[str]`: All the additional special tokens you may want to use. Log an error if used while not having been
- set.
- """
- if self._additional_special_tokens is None:
- if self.verbose:
- logger.error("Using additional_special_tokens, but it is not set yet.")
- return None
- return [str(tok) for tok in self._additional_special_tokens]
- @bos_token.setter
- def bos_token(self, value):
- if not isinstance(value, (str, AddedToken)) and value is not None:
- raise ValueError("Cannot set a non-string value as the BOS token")
- self._bos_token = value
- @eos_token.setter
- def eos_token(self, value):
- if not isinstance(value, (str, AddedToken)) and value is not None:
- raise ValueError("Cannot set a non-string value as the EOS token")
- self._eos_token = value
- @unk_token.setter
- def unk_token(self, value):
- if not isinstance(value, (str, AddedToken)) and value is not None:
- raise ValueError("Cannot set a non-string value as the UNK token")
- self._unk_token = value
- @sep_token.setter
- def sep_token(self, value):
- if not isinstance(value, (str, AddedToken)) and value is not None:
- raise ValueError("Cannot set a non-string value as the SEP token")
- self._sep_token = value
- @pad_token.setter
- def pad_token(self, value):
- if not isinstance(value, (str, AddedToken)) and value is not None:
- raise ValueError("Cannot set a non-string value as the PAD token")
- self._pad_token = value
- @cls_token.setter
- def cls_token(self, value):
- if not isinstance(value, (str, AddedToken)) and value is not None:
- raise ValueError("Cannot set a non-string value as the CLS token")
- self._cls_token = value
- @mask_token.setter
- def mask_token(self, value):
- if not isinstance(value, (str, AddedToken)) and value is not None:
- raise ValueError("Cannot set a non-string value as the MASK token")
- self._mask_token = value
- @additional_special_tokens.setter
- def additional_special_tokens(self, value):
- self._additional_special_tokens = value if value is not None else None
- @property
- def bos_token_id(self) -> Optional[int]:
- """
- `Optional[int]`: Id of the beginning of sentence token in the vocabulary. Returns `None` if the token has not
- been set.
- """
- if self._bos_token is None:
- return None
- return self.convert_tokens_to_ids(self.bos_token)
- @property
- def eos_token_id(self) -> Optional[int]:
- """
- `Optional[int]`: Id of the end of sentence token in the vocabulary. Returns `None` if the token has not been
- set.
- """
- if self._eos_token is None:
- return None
- return self.convert_tokens_to_ids(self.eos_token)
- @property
- def unk_token_id(self) -> Optional[int]:
- """
- `Optional[int]`: Id of the unknown token in the vocabulary. Returns `None` if the token has not been set.
- """
- if self._unk_token is None:
- return None
- return self.convert_tokens_to_ids(self.unk_token)
- @property
- def sep_token_id(self) -> Optional[int]:
- """
- `Optional[int]`: Id of the separation token in the vocabulary, to separate context and query in an input
- sequence. Returns `None` if the token has not been set.
- """
- if self._sep_token is None:
- return None
- return self.convert_tokens_to_ids(self.sep_token)
- @property
- def pad_token_id(self) -> Optional[int]:
- """
- `Optional[int]`: Id of the padding token in the vocabulary. Returns `None` if the token has not been set.
- """
- if self._pad_token is None:
- return None
- return self.convert_tokens_to_ids(self.pad_token)
- @property
- def pad_token_type_id(self) -> int:
- """
- `int`: Id of the padding token type in the vocabulary.
- """
- return self._pad_token_type_id
- @property
- def cls_token_id(self) -> Optional[int]:
- """
- `Optional[int]`: Id of the classification token in the vocabulary, to extract a summary of an input sequence
- leveraging self-attention along the full depth of the model.
- Returns `None` if the token has not been set.
- """
- if self._cls_token is None:
- return None
- return self.convert_tokens_to_ids(self.cls_token)
- @property
- def mask_token_id(self) -> Optional[int]:
- """
- `Optional[int]`: Id of the mask token in the vocabulary, used when training a model with masked-language
- modeling. Returns `None` if the token has not been set.
- """
- if self._mask_token is None:
- return None
- return self.convert_tokens_to_ids(self.mask_token)
- @property
- def additional_special_tokens_ids(self) -> List[int]:
- """
- `List[int]`: Ids of all the additional special tokens in the vocabulary. Log an error if used while not having
- been set.
- """
- return self.convert_tokens_to_ids(self.additional_special_tokens)
- @bos_token_id.setter
- def bos_token_id(self, value):
- self._bos_token = self.convert_ids_to_tokens(value) if value is not None else None
- @eos_token_id.setter
- def eos_token_id(self, value):
- self._eos_token = self.convert_ids_to_tokens(value) if value is not None else None
- @unk_token_id.setter
- def unk_token_id(self, value):
- self._unk_token = self.convert_ids_to_tokens(value) if value is not None else None
- @sep_token_id.setter
- def sep_token_id(self, value):
- self._sep_token = self.convert_ids_to_tokens(value) if value is not None else None
- @pad_token_id.setter
- def pad_token_id(self, value):
- self._pad_token = self.convert_ids_to_tokens(value) if value is not None else None
- @cls_token_id.setter
- def cls_token_id(self, value):
- self._cls_token = self.convert_ids_to_tokens(value) if value is not None else None
- @mask_token_id.setter
- def mask_token_id(self, value):
- self._mask_token = self.convert_ids_to_tokens(value) if value is not None else None
- @additional_special_tokens_ids.setter
- def additional_special_tokens_ids(self, values):
- self._additional_special_tokens = [self.convert_ids_to_tokens(value) for value in values]
- @property
- def special_tokens_map(self) -> Dict[str, Union[str, List[str]]]:
- """
- `Dict[str, Union[str, List[str]]]`: A dictionary mapping special token class attributes (`cls_token`,
- `unk_token`, etc.) to their values (`'<unk>'`, `'<cls>'`, etc.).
- Convert potential tokens of `tokenizers.AddedToken` type to string.
- """
- set_attr = {}
- for attr in self.SPECIAL_TOKENS_ATTRIBUTES:
- attr_value = getattr(self, attr)
- if attr_value:
- set_attr[attr] = attr_value
- return set_attr
- @property
- def special_tokens_map_extended(self) -> Dict[str, Union[str, AddedToken, List[Union[str, AddedToken]]]]:
- """
- `Dict[str, Union[str, tokenizers.AddedToken, List[Union[str, tokenizers.AddedToken]]]]`: A dictionary mapping
- special token class attributes (`cls_token`, `unk_token`, etc.) to their values (`'<unk>'`, `'<cls>'`, etc.).
- Don't convert tokens of `tokenizers.AddedToken` type to string so they can be used to control more finely how
- special tokens are tokenized.
- """
- set_attr = {}
- for attr in self.SPECIAL_TOKENS_ATTRIBUTES:
- attr_value = getattr(self, "_" + attr)
- if attr_value:
- set_attr[attr] = attr_value
- return set_attr
- @property
- def all_special_tokens_extended(self) -> List[Union[str, AddedToken]]:
- """
- `List[Union[str, tokenizers.AddedToken]]`: All the special tokens (`'<unk>'`, `'<cls>'`, etc.), the order has
- nothing to do with the index of each tokens. If you want to know the correct indices, check
- `self.added_tokens_encoder`. We can't create an order anymore as the keys are `AddedTokens` and not `Strings`.
- Don't convert tokens of `tokenizers.AddedToken` type to string so they can be used to control more finely how
- special tokens are tokenized.
- """
- all_tokens = []
- seen = set()
- for value in self.special_tokens_map_extended.values():
- if isinstance(value, (list, tuple)):
- tokens_to_add = [token for token in value if str(token) not in seen]
- else:
- tokens_to_add = [value] if str(value) not in seen else []
- seen.update(map(str, tokens_to_add))
- all_tokens.extend(tokens_to_add)
- return all_tokens
- @property
- def all_special_tokens(self) -> List[str]:
- """
- `List[str]`: A list of the unique special tokens (`'<unk>'`, `'<cls>'`, ..., etc.).
- Convert tokens of `tokenizers.AddedToken` type to string.
- """
- all_toks = [str(s) for s in self.all_special_tokens_extended]
- return all_toks
- @property
- def all_special_ids(self) -> List[int]:
- """
- `List[int]`: List the ids of the special tokens(`'<unk>'`, `'<cls>'`, etc.) mapped to class attributes.
- """
- all_toks = self.all_special_tokens
- all_ids = self.convert_tokens_to_ids(all_toks)
- return all_ids
- ENCODE_KWARGS_DOCSTRING = r"""
- add_special_tokens (`bool`, *optional*, defaults to `True`):
- Whether or not to add special tokens when encoding the sequences. This will use the underlying
- `PretrainedTokenizerBase.build_inputs_with_special_tokens` function, which defines which tokens are
- automatically added to the input ids. This is usefull if you want to add `bos` or `eos` tokens
- automatically.
- padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
- Activates and controls padding. Accepts the following values:
- - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
- sequence if provided).
- - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
- acceptable input length for the model if that argument is not provided.
- - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
- lengths).
- truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
- Activates and controls truncation. Accepts the following values:
- - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
- to the maximum acceptable input length for the model if that argument is not provided. This will
- truncate token by token, removing a token from the longest sequence in the pair if a pair of
- sequences (or a batch of pairs) is provided.
- - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
- maximum acceptable input length for the model if that argument is not provided. This will only
- truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
- maximum acceptable input length for the model if that argument is not provided. This will only
- truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
- greater than the model maximum admissible input size).
- max_length (`int`, *optional*):
- Controls the maximum length to use by one of the truncation/padding parameters.
- If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
- is required by one of the truncation/padding parameters. If the model has no specific maximum input
- length (like XLNet) truncation/padding to a maximum length will be deactivated.
- stride (`int`, *optional*, defaults to 0):
- If set to a number along with `max_length`, the overflowing tokens returned when
- `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
- returned to provide some overlap between truncated and overflowing sequences. The value of this
- argument defines the number of overlapping tokens.
- is_split_into_words (`bool`, *optional*, defaults to `False`):
- Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the
- tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace)
- which it will tokenize. This is useful for NER or token classification.
- pad_to_multiple_of (`int`, *optional*):
- If set will pad the sequence to a multiple of the provided value. Requires `padding` to be activated.
- This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
- `>= 7.5` (Volta).
- padding_side (`str`, *optional*):
- The side on which the model should have padding applied. Should be selected between ['right', 'left'].
- Default value is picked from the class attribute of the same name.
- return_tensors (`str` or [`~utils.TensorType`], *optional*):
- If set, will return tensors instead of list of python integers. Acceptable values are:
- - `'tf'`: Return TensorFlow `tf.constant` objects.
- - `'pt'`: Return PyTorch `torch.Tensor` objects.
- - `'np'`: Return Numpy `np.ndarray` objects.
- """
- ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r"""
- return_token_type_ids (`bool`, *optional*):
- Whether to return token type IDs. If left to the default, will return the token type IDs according to
- the specific tokenizer's default, defined by the `return_outputs` attribute.
- [What are token type IDs?](../glossary#token-type-ids)
- return_attention_mask (`bool`, *optional*):
- Whether to return the attention mask. If left to the default, will return the attention mask according
- to the specific tokenizer's default, defined by the `return_outputs` attribute.
- [What are attention masks?](../glossary#attention-mask)
- return_overflowing_tokens (`bool`, *optional*, defaults to `False`):
- Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch
- of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead
- of returning overflowing tokens.
- return_special_tokens_mask (`bool`, *optional*, defaults to `False`):
- Whether or not to return special tokens mask information.
- return_offsets_mapping (`bool`, *optional*, defaults to `False`):
- Whether or not to return `(char_start, char_end)` for each token.
- This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using
- Python's tokenizer, this method will raise `NotImplementedError`.
- return_length (`bool`, *optional*, defaults to `False`):
- Whether or not to return the lengths of the encoded inputs.
- verbose (`bool`, *optional*, defaults to `True`):
- Whether or not to print more information and warnings.
- **kwargs: passed to the `self.tokenize()` method
- Return:
- [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
- - **input_ids** -- List of token ids to be fed to a model.
- [What are input IDs?](../glossary#input-ids)
- - **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or
- if *"token_type_ids"* is in `self.model_input_names`).
- [What are token type IDs?](../glossary#token-type-ids)
- - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
- `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`).
- [What are attention masks?](../glossary#attention-mask)
- - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and
- `return_overflowing_tokens=True`).
- - **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and
- `return_overflowing_tokens=True`).
- - **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying
- regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`).
- - **length** -- The length of the inputs (when `return_length=True`)
- """
- INIT_TOKENIZER_DOCSTRING = r"""
- Class attributes (overridden by derived classes)
- - **vocab_files_names** (`Dict[str, str]`) -- A dictionary with, as keys, the `__init__` keyword name of each
- vocabulary file required by the model, and as associated values, the filename for saving the associated file
- (string).
- - **pretrained_vocab_files_map** (`Dict[str, Dict[str, str]]`) -- A dictionary of dictionaries, with the
- high-level keys being the `__init__` keyword name of each vocabulary file required by the model, the
- low-level being the `short-cut-names` of the pretrained models with, as associated values, the `url` to the
- associated pretrained vocabulary file.
- - **model_input_names** (`List[str]`) -- A list of inputs expected in the forward pass of the model.
- - **padding_side** (`str`) -- The default value for the side on which the model should have padding applied.
- Should be `'right'` or `'left'`.
- - **truncation_side** (`str`) -- The default value for the side on which the model should have truncation
- applied. Should be `'right'` or `'left'`.
- Args:
- model_max_length (`int`, *optional*):
- The maximum length (in number of tokens) for the inputs to the transformer model. When the tokenizer is
- loaded with [`~tokenization_utils_base.PreTrainedTokenizerBase.from_pretrained`], this will be set to the
- value stored for the associated model in `max_model_input_sizes` (see above). If no value is provided, will
- default to VERY_LARGE_INTEGER (`int(1e30)`).
- padding_side (`str`, *optional*):
- The side on which the model should have padding applied. Should be selected between ['right', 'left'].
- Default value is picked from the class attribute of the same name.
- truncation_side (`str`, *optional*):
- The side on which the model should have truncation applied. Should be selected between ['right', 'left'].
- Default value is picked from the class attribute of the same name.
- chat_template (`str`, *optional*):
- A Jinja template string that will be used to format lists of chat messages. See
- https://huggingface.co/docs/transformers/chat_templating for a full description.
- model_input_names (`List[string]`, *optional*):
- The list of inputs accepted by the forward pass of the model (like `"token_type_ids"` or
- `"attention_mask"`). Default value is picked from the class attribute of the same name.
- bos_token (`str` or `tokenizers.AddedToken`, *optional*):
- A special token representing the beginning of a sentence. Will be associated to `self.bos_token` and
- `self.bos_token_id`.
- eos_token (`str` or `tokenizers.AddedToken`, *optional*):
- A special token representing the end of a sentence. Will be associated to `self.eos_token` and
- `self.eos_token_id`.
- unk_token (`str` or `tokenizers.AddedToken`, *optional*):
- A special token representing an out-of-vocabulary token. Will be associated to `self.unk_token` and
- `self.unk_token_id`.
- sep_token (`str` or `tokenizers.AddedToken`, *optional*):
- A special token separating two different sentences in the same input (used by BERT for instance). Will be
- associated to `self.sep_token` and `self.sep_token_id`.
- pad_token (`str` or `tokenizers.AddedToken`, *optional*):
- A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
- attention mechanisms or loss computation. Will be associated to `self.pad_token` and `self.pad_token_id`.
- cls_token (`str` or `tokenizers.AddedToken`, *optional*):
- A special token representing the class of the input (used by BERT for instance). Will be associated to
- `self.cls_token` and `self.cls_token_id`.
- mask_token (`str` or `tokenizers.AddedToken`, *optional*):
- A special token representing a masked token (used by masked-language modeling pretraining objectives, like
- BERT). Will be associated to `self.mask_token` and `self.mask_token_id`.
- additional_special_tokens (tuple or list of `str` or `tokenizers.AddedToken`, *optional*):
- A tuple or a list of additional special tokens. Add them here to ensure they are skipped when decoding with
- `skip_special_tokens` is set to True. If they are not part of the vocabulary, they will be added at the end
- of the vocabulary.
- clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`):
- Whether or not the model should cleanup the spaces that were added when splitting the input text during the
- tokenization process.
- split_special_tokens (`bool`, *optional*, defaults to `False`):
- Whether or not the special tokens should be split during the tokenization process. Passing will affect the
- internal state of the tokenizer. The default behavior is to not split special tokens. This means that if
- `<s>` is the `bos_token`, then `tokenizer.tokenize("<s>") = ['<s>`]. Otherwise, if
- `split_special_tokens=True`, then `tokenizer.tokenize("<s>")` will be give `['<','s', '>']`.
- """
- @add_end_docstrings(INIT_TOKENIZER_DOCSTRING)
- class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin):
- """
- Base class for [`PreTrainedTokenizer`] and [`PreTrainedTokenizerFast`].
- Handles shared (mostly boiler plate) methods for those two classes.
- """
- vocab_files_names: Dict[str, str] = {}
- pretrained_vocab_files_map: Dict[str, Dict[str, str]] = {}
- _auto_class: Optional[str] = None
- # first name has to correspond to main model input name
- # to make sure `tokenizer.pad(...)` works correctly
- model_input_names: List[str] = ["input_ids", "token_type_ids", "attention_mask"]
- padding_side: str = "right"
- truncation_side: str = "right"
- slow_tokenizer_class = None
- def __init__(self, **kwargs):
- # inputs and kwargs for saving and re-loading (see ``from_pretrained`` and ``save_pretrained``)
- self.init_inputs = ()
- for key in kwargs:
- if hasattr(self, key) and callable(getattr(self, key)):
- raise AttributeError(f"{key} conflicts with the method {key} in {self.__class__.__name__}")
- self.init_kwargs = copy.deepcopy(kwargs)
- self.name_or_path = kwargs.pop("name_or_path", "")
- self._processor_class = kwargs.pop("processor_class", None)
- # For backward compatibility we fallback to set model_max_length from max_len if provided
- model_max_length = kwargs.pop("model_max_length", kwargs.pop("max_len", None))
- self.model_max_length = model_max_length if model_max_length is not None else VERY_LARGE_INTEGER
- # Padding and truncation side are right by default and overridden in subclasses. If specified in the kwargs, it
- # is changed.
- self.padding_side = kwargs.pop("padding_side", self.padding_side)
- if self.padding_side not in ["right", "left"]:
- raise ValueError(
- f"Padding side should be selected between 'right' and 'left', current value: {self.padding_side}"
- )
- self.truncation_side = kwargs.pop("truncation_side", self.truncation_side)
- if self.truncation_side not in ["right", "left"]:
- raise ValueError(
- f"Truncation side should be selected between 'right' and 'left', current value: {self.truncation_side}"
- )
- self.model_input_names = kwargs.pop("model_input_names", self.model_input_names)
- # By default, cleaning tokenization spaces for both fast and slow tokenizers
- self.clean_up_tokenization_spaces = kwargs.pop("clean_up_tokenization_spaces", False)
- # By default, do not split special tokens for both fast and slow tokenizers
- self.split_special_tokens = kwargs.pop("split_special_tokens", False)
- self.deprecation_warnings = {} # Use to store when we have already noticed a deprecation warning (avoid overlogging).
- self._in_target_context_manager = False
- # Stores a Jinja template that formats chat histories into tokenizable strings
- self.chat_template = kwargs.pop("chat_template", None)
- if isinstance(self.chat_template, (list, tuple)):
- # Chat templates are stored as lists of dicts with fixed key names,
- # we reconstruct that into a single dict while loading them.
- self.chat_template = {template["name"]: template["template"] for template in self.chat_template}
- super().__init__(**kwargs)
- @property
- def max_len_single_sentence(self) -> int:
- """
- `int`: The maximum length of a sentence that can be fed to the model.
- """
- return self.model_max_length - self.num_special_tokens_to_add(pair=False)
- @property
- def max_len_sentences_pair(self) -> int:
- """
- `int`: The maximum combined length of a pair of sentences that can be fed to the model.
- """
- return self.model_max_length - self.num_special_tokens_to_add(pair=True)
- @max_len_single_sentence.setter
- def max_len_single_sentence(self, value) -> int:
- # For backward compatibility, allow to try to setup 'max_len_single_sentence'.
- if value == self.model_max_length - self.num_special_tokens_to_add(pair=False) and self.verbose:
- if not self.deprecation_warnings.get("max_len_single_sentence", False):
- logger.warning(
- "Setting 'max_len_single_sentence' is now deprecated. This value is automatically set up."
- )
- self.deprecation_warnings["max_len_single_sentence"] = True
- else:
- raise ValueError(
- "Setting 'max_len_single_sentence' is now deprecated. This value is automatically set up."
- )
- @max_len_sentences_pair.setter
- def max_len_sentences_pair(self, value) -> int:
- # For backward compatibility, allow to try to setup 'max_len_sentences_pair'.
- if value == self.model_max_length - self.num_special_tokens_to_add(pair=True) and self.verbose:
- if not self.deprecation_warnings.get("max_len_sentences_pair", False):
- logger.warning(
- "Setting 'max_len_sentences_pair' is now deprecated. This value is automatically set up."
- )
- self.deprecation_warnings["max_len_sentences_pair"] = True
- else:
- raise ValueError("Setting 'max_len_sentences_pair' is now deprecated. This value is automatically set up.")
- def _set_processor_class(self, processor_class: str):
- """Sets processor class as an attribute."""
- self._processor_class = processor_class
- @property
- def added_tokens_decoder(self) -> Dict[int, AddedToken]:
- raise NotImplementedError()
- def __repr__(self) -> str:
- added_tokens_decoder_rep = "\n\t".join([f"{k}: {v.__repr__()}," for k, v in self.added_tokens_decoder.items()])
- return (
- f"{self.__class__.__name__}(name_or_path='{self.name_or_path}',"
- f" vocab_size={self.vocab_size}, model_max_length={self.model_max_length}, is_fast={self.is_fast},"
- f" padding_side='{self.padding_side}', truncation_side='{self.truncation_side}',"
- f" special_tokens={self.special_tokens_map}, clean_up_tokenization_spaces={self.clean_up_tokenization_spaces}), "
- " added_tokens_decoder={\n\t" + added_tokens_decoder_rep + "\n}"
- )
- def __len__(self) -> int:
- raise NotImplementedError()
- def get_vocab(self) -> Dict[str, int]:
- """
- Returns the vocabulary as a dictionary of token to index.
- `tokenizer.get_vocab()[token]` is equivalent to `tokenizer.convert_tokens_to_ids(token)` when `token` is in the
- vocab.
- Returns:
- `Dict[str, int]`: The vocabulary.
- """
- raise NotImplementedError()
- def apply_chat_template(
- self,
- conversation: Union[List[Dict[str, str]], List[List[Dict[str, str]]]],
- tools: Optional[List[Dict]] = None,
- documents: Optional[List[Dict[str, str]]] = None,
- chat_template: Optional[str] = None,
- add_generation_prompt: bool = False,
- continue_final_message: bool = False,
- tokenize: bool = True,
- padding: bool = False,
- truncation: bool = False,
- max_length: Optional[int] = None,
- return_tensors: Optional[Union[str, TensorType]] = None,
- return_dict: bool = False,
- return_assistant_tokens_mask: bool = False,
- tokenizer_kwargs: Optional[Dict[str, Any]] = None,
- **kwargs,
- ) -> Union[str, List[int], List[str], List[List[int]], BatchEncoding]:
- """
- Converts a list of dictionaries with `"role"` and `"content"` keys to a list of token
- ids. This method is intended for use with chat models, and will read the tokenizer's chat_template attribute to
- determine the format and control tokens to use when converting.
- Args:
- conversation (Union[List[Dict[str, str]], List[List[Dict[str, str]]]]): A list of dicts
- with "role" and "content" keys, representing the chat history so far.
- tools (`List[Dict]`, *optional*):
- A list of tools (callable functions) that will be accessible to the model. If the template does not
- support function calling, this argument will have no effect. Each tool should be passed as a JSON Schema,
- giving the name, description and argument types for the tool. See our
- [chat templating guide](https://huggingface.co/docs/transformers/main/en/chat_templating#automated-function-conversion-for-tool-use)
- for more information.
- documents (`List[Dict[str, str]]`, *optional*):
- A list of dicts representing documents that will be accessible to the model if it is performing RAG
- (retrieval-augmented generation). If the template does not support RAG, this argument will have no
- effect. We recommend that each document should be a dict containing "title" and "text" keys. Please
- see the RAG section of the [chat templating guide](https://huggingface.co/docs/transformers/main/en/chat_templating#arguments-for-RAG)
- for examples of passing documents with chat templates.
- chat_template (`str`, *optional*):
- A Jinja template to use for this conversion. It is usually not necessary to pass anything to this
- argument, as the model's template will be used by default.
- add_generation_prompt (bool, *optional*):
- If this is set, a prompt with the token(s) that indicate
- the start of an assistant message will be appended to the formatted output. This is useful when you want to generate a response from the model.
- Note that this argument will be passed to the chat template, and so it must be supported in the
- template for this argument to have any effect.
- continue_final_message (bool, *optional*):
- If this is set, the chat will be formatted so that the final
- message in the chat is open-ended, without any EOS tokens. The model will continue this message
- rather than starting a new one. This allows you to "prefill" part of
- the model's response for it. Cannot be used at the same time as `add_generation_prompt`.
- tokenize (`bool`, defaults to `True`):
- Whether to tokenize the output. If `False`, the output will be a string.
- padding (`bool`, defaults to `False`):
- Whether to pad sequences to the maximum length. Has no effect if tokenize is `False`.
- truncation (`bool`, defaults to `False`):
- Whether to truncate sequences at the maximum length. Has no effect if tokenize is `False`.
- max_length (`int`, *optional*):
- Maximum length (in tokens) to use for padding or truncation. Has no effect if tokenize is `False`. If
- not specified, the tokenizer's `max_length` attribute will be used as a default.
- return_tensors (`str` or [`~utils.TensorType`], *optional*):
- If set, will return tensors of a particular framework. Has no effect if tokenize is `False`. Acceptable
- values are:
- - `'tf'`: Return TensorFlow `tf.Tensor` objects.
- - `'pt'`: Return PyTorch `torch.Tensor` objects.
- - `'np'`: Return NumPy `np.ndarray` objects.
- - `'jax'`: Return JAX `jnp.ndarray` objects.
- return_dict (`bool`, defaults to `False`):
- Whether to return a dictionary with named outputs. Has no effect if tokenize is `False`.
- tokenizer_kwargs (`Dict[str: Any]`, *optional*): Additional kwargs to pass to the tokenizer.
- return_assistant_tokens_mask (`bool`, defaults to `False`):
- Whether to return a mask of the assistant generated tokens. For tokens generated by the assistant,
- the mask will contain 1. For user and system tokens, the mask will contain 0.
- This functionality is only available for chat templates that support it via the `{% generation %}` keyword.
- **kwargs: Additional kwargs to pass to the template renderer. Will be accessible by the chat template.
- Returns:
- `Union[List[int], Dict]`: A list of token ids representing the tokenized chat so far, including control tokens. This
- output is ready to pass to the model, either directly or via methods like `generate()`. If `return_dict` is
- set, will return a dict of tokenizer outputs instead.
- """
- if return_dict and not tokenize:
- raise ValueError(
- "`return_dict=True` is incompatible with `tokenize=False`, because there is no dict "
- "of tokenizer outputs to return."
- )
- if return_assistant_tokens_mask and not return_dict:
- raise ValueError("`return_assistant_tokens_mask=True` is incompatible with `return_dict=False`")
- if tokenizer_kwargs is None:
- tokenizer_kwargs = {}
- chat_template = self.get_chat_template(chat_template, tools)
- if return_assistant_tokens_mask and not re.search(r"\{\%-?\s*generation\s*-?\%\}", chat_template):
- logger.warning_once(
- "return_assistant_tokens_mask==True but chat template does not contain `{% generation %}` keyword."
- )
- # Compilation function uses a cache to avoid recompiling the same template
- compiled_template = _compile_jinja_template(chat_template)
- if isinstance(conversation, (list, tuple)) and (
- isinstance(conversation[0], (list, tuple)) or hasattr(conversation[0], "messages")
- ):
- conversations = conversation
- is_batched = True
- else:
- conversations = [conversation]
- is_batched = False
- if continue_final_message:
- if add_generation_prompt:
- raise ValueError(
- "continue_final_message and add_generation_prompt are not compatible. Use continue_final_message when you want the model to continue the final message, and add_generation_prompt when you want to add a header that will prompt it to start a new assistant message instead."
- )
- if return_assistant_tokens_mask:
- raise ValueError("continue_final_message is not compatible with return_assistant_tokens_mask.")
- # We accept either JSON schemas or functions for tools. If we get functions, we convert them to schemas
- if tools is not None:
- tool_schemas = []
- for tool in tools:
- if isinstance(tool, dict):
- tool_schemas.append(tool)
- elif isfunction(tool):
- tool_schemas.append(get_json_schema(tool))
- else:
- raise ValueError(
- "Tools should either be a JSON schema, or a callable function with type hints "
- "and a docstring suitable for auto-conversion to a schema."
- )
- else:
- tool_schemas = None
- if documents is not None:
- for document in documents:
- if not isinstance(document, dict):
- raise TypeError("Documents should be a list of dicts with 'title' and 'text' keys!")
- rendered = []
- all_generation_indices = []
- template_kwargs = {**self.special_tokens_map, **kwargs} # kwargs overwrite special tokens if both are present
- for chat in conversations:
- if hasattr(chat, "messages"):
- # Indicates it's a Conversation object
- chat = chat.messages
- if return_assistant_tokens_mask:
- rendered_chat, generation_indices = _render_with_assistant_indices(
- compiled_template=compiled_template,
- messages=chat,
- tools=tool_schemas,
- documents=documents,
- add_generation_prompt=add_generation_prompt,
- **template_kwargs,
- )
- all_generation_indices.append(generation_indices)
- else:
- rendered_chat = compiled_template.render(
- messages=chat,
- tools=tool_schemas,
- documents=documents,
- add_generation_prompt=add_generation_prompt,
- **template_kwargs,
- )
- if continue_final_message:
- final_message = chat[-1]["content"]
- if isinstance(final_message, (list, tuple)):
- final_message = final_message[-1]["text"]
- final_message = final_message.strip()
- rendered_chat = rendered_chat[: rendered_chat.rindex(final_message) + len(final_message)].rstrip()
- rendered.append(rendered_chat)
- if not is_batched:
- rendered = rendered[0]
- if tokenize:
- out = self(
- rendered,
- padding=padding,
- truncation=truncation,
- max_length=max_length,
- add_special_tokens=False,
- return_tensors=return_tensors,
- **tokenizer_kwargs,
- )
- if return_dict:
- if return_assistant_tokens_mask:
- assistant_masks = []
- if is_batched or return_tensors:
- input_ids = out["input_ids"]
- else:
- input_ids = [out["input_ids"]]
- for i in range(len(input_ids)):
- current_mask = [0] * len(input_ids[i])
- for assistant_start_char, assistant_end_char in all_generation_indices[i]:
- start_token = out.char_to_token(i, assistant_start_char)
- end_token = out.char_to_token(i, assistant_end_char - 1)
- if start_token is None:
- # start_token is out of bounds maybe due to truncation.
- break
- for token_id in range(start_token, end_token + 1 if end_token else len(input_ids)):
- current_mask[token_id] = 1
- assistant_masks.append(current_mask)
- out["assistant_masks"] = assistant_masks if is_batched else assistant_masks[0]
- return out
- else:
- return out["input_ids"]
- else:
- return rendered
- def get_chat_template(self, chat_template: Optional[str] = None, tools: Optional[List[Dict]] = None) -> str:
- """
- Retrieve the chat template string used for tokenizing chat messages. This template is used
- internally by the `apply_chat_template` method and can also be used externally to retrieve the model's chat
- template for better generation tracking.
- Args:
- chat_template (`str`, *optional*):
- A Jinja template or the name of a template to use for this conversion.
- It is usually not necessary to pass anything to this argument,
- as the model's template will be used by default.
- tools (`List[Dict]`, *optional*):
- A list of tools (callable functions) that will be accessible to the model. If the template does not
- support function calling, this argument will have no effect. Each tool should be passed as a JSON Schema,
- giving the name, description and argument types for the tool. See our
- [chat templating guide](https://huggingface.co/docs/transformers/main/en/chat_templating#automated-function-conversion-for-tool-use)
- for more information.
- Returns:
- `str`: The chat template string.
- """
- # First, handle the cases when the model has a dict of multiple templates
- if isinstance(self.chat_template, dict):
- template_dict = self.chat_template
- if chat_template is not None and chat_template in template_dict:
- # The user can pass the name of a template to the chat template argument instead of an entire template
- chat_template = template_dict[chat_template]
- elif chat_template is None:
- if tools is not None and "tool_use" in template_dict:
- chat_template = template_dict["tool_use"]
- elif "default" in template_dict:
- chat_template = template_dict["default"]
- else:
- raise ValueError(
- "This model has multiple chat templates with no default specified! Please either pass a chat "
- "template or the name of the template you wish to use to the `chat_template` argument. Available "
- f"template names are {sorted(template_dict.keys())}."
- )
- elif chat_template is None:
- # These are the cases when the model has a single template
- # priority: `chat_template` argument > `tokenizer.chat_template`
- if self.chat_template is not None:
- chat_template = self.chat_template
- else:
- raise ValueError(
- "Cannot use chat template functions because tokenizer.chat_template is not set and no template "
- "argument was passed! For information about writing templates and setting the "
- "tokenizer.chat_template attribute, please see the documentation at "
- "https://huggingface.co/docs/transformers/main/en/chat_templating"
- )
- return chat_template
- @classmethod
- def from_pretrained(
- cls,
- pretrained_model_name_or_path: Union[str, os.PathLike],
- *init_inputs,
- cache_dir: Optional[Union[str, os.PathLike]] = None,
- force_download: bool = False,
- local_files_only: bool = False,
- token: Optional[Union[str, bool]] = None,
- revision: str = "main",
- trust_remote_code=False,
- **kwargs,
- ):
- r"""
- Instantiate a [`~tokenization_utils_base.PreTrainedTokenizerBase`] (or a derived class) from a predefined
- tokenizer.
- Args:
- pretrained_model_name_or_path (`str` or `os.PathLike`):
- Can be either:
- - A string, the *model id* of a predefined tokenizer hosted inside a model repo on huggingface.co.
- - A path to a *directory* containing vocabulary files required by the tokenizer, for instance saved
- using the [`~tokenization_utils_base.PreTrainedTokenizerBase.save_pretrained`] method, e.g.,
- `./my_model_directory/`.
- - (**Deprecated**, not applicable to all derived classes) A path or url to a single saved vocabulary
- file (if and only if the tokenizer only requires a single vocabulary file like Bert or XLNet), e.g.,
- `./my_model_directory/vocab.txt`.
- cache_dir (`str` or `os.PathLike`, *optional*):
- Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the
- standard cache should not be used.
- force_download (`bool`, *optional*, defaults to `False`):
- Whether or not to force the (re-)download the vocabulary files and override the cached versions if they
- exist.
- resume_download:
- Deprecated and ignored. All downloads are now resumed by default when possible.
- Will be removed in v5 of Transformers.
- proxies (`Dict[str, str]`, *optional*):
- A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
- 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
- token (`str` or *bool*, *optional*):
- The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
- when running `huggingface-cli login` (stored in `~/.huggingface`).
- local_files_only (`bool`, *optional*, defaults to `False`):
- Whether or not to only rely on local files and not to attempt to download any files.
- revision (`str`, *optional*, defaults to `"main"`):
- The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
- git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
- identifier allowed by git.
- subfolder (`str`, *optional*):
- In case the relevant files are located inside a subfolder of the model repo on huggingface.co (e.g. for
- facebook/rag-token-base), specify it here.
- inputs (additional positional arguments, *optional*):
- Will be passed along to the Tokenizer `__init__` method.
- trust_remote_code (`bool`, *optional*, defaults to `False`):
- Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
- should only be set to `True` for repositories you trust and in which you have read the code, as it will
- execute code present on the Hub on your local machine.
- kwargs (additional keyword arguments, *optional*):
- Will be passed to the Tokenizer `__init__` method. Can be used to set special tokens like `bos_token`,
- `eos_token`, `unk_token`, `sep_token`, `pad_token`, `cls_token`, `mask_token`,
- `additional_special_tokens`. See parameters in the `__init__` for more details.
- <Tip>
- Passing `token=True` is required when you want to use a private model.
- </Tip>
- Examples:
- ```python
- # We can't instantiate directly the base class *PreTrainedTokenizerBase* so let's show our examples on a derived class: BertTokenizer
- # Download vocabulary from huggingface.co and cache.
- tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased")
- # Download vocabulary from huggingface.co (user-uploaded) and cache.
- tokenizer = BertTokenizer.from_pretrained("dbmdz/bert-base-german-cased")
- # If vocabulary files are in a directory (e.g. tokenizer was saved using *save_pretrained('./test/saved_model/')*)
- tokenizer = BertTokenizer.from_pretrained("./test/saved_model/")
- # If the tokenizer uses a single vocabulary file, you can point directly to this file
- tokenizer = BertTokenizer.from_pretrained("./test/saved_model/my_vocab.txt")
- # You can link tokens to special vocabulary when instantiating
- tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased", unk_token="<unk>")
- # You should be sure '<unk>' is in the vocabulary when doing that.
- # Otherwise use tokenizer.add_special_tokens({'unk_token': '<unk>'}) instead)
- assert tokenizer.unk_token == "<unk>"
- ```"""
- resume_download = kwargs.pop("resume_download", None)
- proxies = kwargs.pop("proxies", None)
- use_auth_token = kwargs.pop("use_auth_token", None)
- subfolder = kwargs.pop("subfolder", None)
- from_pipeline = kwargs.pop("_from_pipeline", None)
- from_auto_class = kwargs.pop("_from_auto", False)
- commit_hash = kwargs.pop("_commit_hash", None)
- gguf_file = kwargs.get("gguf_file", None)
- if use_auth_token is not None:
- warnings.warn(
- "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
- FutureWarning,
- )
- if token is not None:
- raise ValueError(
- "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
- )
- token = use_auth_token
- user_agent = {"file_type": "tokenizer", "from_auto_class": from_auto_class, "is_fast": "Fast" in cls.__name__}
- if from_pipeline is not None:
- user_agent["using_pipeline"] = from_pipeline
- if is_offline_mode() and not local_files_only:
- logger.info("Offline mode: forcing local_files_only=True")
- local_files_only = True
- pretrained_model_name_or_path = str(pretrained_model_name_or_path)
- vocab_files = {}
- init_configuration = {}
- is_local = os.path.isdir(pretrained_model_name_or_path)
- single_file_id = None
- if os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
- if len(cls.vocab_files_names) > 1 and not gguf_file:
- raise ValueError(
- f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is not "
- "supported for this tokenizer. Use a model identifier or the path to a directory instead."
- )
- warnings.warn(
- f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is deprecated and "
- "won't be possible anymore in v5. Use a model identifier or the path to a directory instead.",
- FutureWarning,
- )
- file_id = list(cls.vocab_files_names.keys())[0]
- vocab_files[file_id] = pretrained_model_name_or_path
- single_file_id = file_id
- else:
- if gguf_file:
- vocab_files["vocab_file"] = gguf_file
- else:
- # At this point pretrained_model_name_or_path is either a directory or a model identifier name
- additional_files_names = {
- "added_tokens_file": ADDED_TOKENS_FILE, # kept only for legacy
- "special_tokens_map_file": SPECIAL_TOKENS_MAP_FILE, # kept only for legacy
- "tokenizer_config_file": TOKENIZER_CONFIG_FILE,
- # tokenizer_file used to initialize a slow from a fast. Properly copy the `addedTokens` instead of adding in random orders
- "tokenizer_file": FULL_TOKENIZER_FILE,
- }
- vocab_files = {**cls.vocab_files_names, **additional_files_names}
- if "tokenizer_file" in vocab_files:
- # Try to get the tokenizer config to see if there are versioned tokenizer files.
- fast_tokenizer_file = FULL_TOKENIZER_FILE
- resolved_config_file = cached_file(
- pretrained_model_name_or_path,
- TOKENIZER_CONFIG_FILE,
- cache_dir=cache_dir,
- force_download=force_download,
- resume_download=resume_download,
- proxies=proxies,
- token=token,
- revision=revision,
- local_files_only=local_files_only,
- subfolder=subfolder,
- user_agent=user_agent,
- _raise_exceptions_for_gated_repo=False,
- _raise_exceptions_for_missing_entries=False,
- _raise_exceptions_for_connection_errors=False,
- _commit_hash=commit_hash,
- )
- commit_hash = extract_commit_hash(resolved_config_file, commit_hash)
- if resolved_config_file is not None:
- with open(resolved_config_file, encoding="utf-8") as reader:
- tokenizer_config = json.load(reader)
- if "fast_tokenizer_files" in tokenizer_config:
- fast_tokenizer_file = get_fast_tokenizer_file(tokenizer_config["fast_tokenizer_files"])
- vocab_files["tokenizer_file"] = fast_tokenizer_file
- # Get files from url, cache, or disk depending on the case
- resolved_vocab_files = {}
- unresolved_files = []
- for file_id, file_path in vocab_files.items():
- if file_path is None:
- resolved_vocab_files[file_id] = None
- elif single_file_id == file_id:
- if os.path.isfile(file_path):
- resolved_vocab_files[file_id] = file_path
- elif is_remote_url(file_path):
- resolved_vocab_files[file_id] = download_url(file_path, proxies=proxies)
- else:
- resolved_vocab_files[file_id] = cached_file(
- pretrained_model_name_or_path,
- file_path,
- cache_dir=cache_dir,
- force_download=force_download,
- proxies=proxies,
- resume_download=resume_download,
- local_files_only=local_files_only,
- token=token,
- user_agent=user_agent,
- revision=revision,
- subfolder=subfolder,
- _raise_exceptions_for_gated_repo=False,
- _raise_exceptions_for_missing_entries=False,
- _raise_exceptions_for_connection_errors=False,
- _commit_hash=commit_hash,
- )
- commit_hash = extract_commit_hash(resolved_vocab_files[file_id], commit_hash)
- if len(unresolved_files) > 0:
- logger.info(
- f"Can't load following files from cache: {unresolved_files} and cannot check if these "
- "files are necessary for the tokenizer to operate."
- )
- # If one passes a GGUF file path to `gguf_file` there is no need for this check as the tokenizer will be
- # loaded directly from the GGUF file.
- if all(full_file_name is None for full_file_name in resolved_vocab_files.values()) and not gguf_file:
- raise EnvironmentError(
- f"Can't load tokenizer for '{pretrained_model_name_or_path}'. If you were trying to load it from "
- "'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
- f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
- f"containing all relevant files for a {cls.__name__} tokenizer."
- )
- for file_id, file_path in vocab_files.items():
- if file_id not in resolved_vocab_files:
- continue
- if is_local:
- logger.info(f"loading file {file_path}")
- else:
- logger.info(f"loading file {file_path} from cache at {resolved_vocab_files[file_id]}")
- return cls._from_pretrained(
- resolved_vocab_files,
- pretrained_model_name_or_path,
- init_configuration,
- *init_inputs,
- token=token,
- cache_dir=cache_dir,
- local_files_only=local_files_only,
- _commit_hash=commit_hash,
- _is_local=is_local,
- trust_remote_code=trust_remote_code,
- **kwargs,
- )
- @classmethod
- def _from_pretrained(
- cls,
- resolved_vocab_files,
- pretrained_model_name_or_path,
- init_configuration,
- *init_inputs,
- token=None,
- cache_dir=None,
- local_files_only=False,
- _commit_hash=None,
- _is_local=False,
- trust_remote_code=False,
- **kwargs,
- ):
- # We instantiate fast tokenizers based on a slow tokenizer if we don't have access to the tokenizer.json
- # file or if `from_slow` is set to True.
- from_slow = kwargs.get("from_slow", False)
- gguf_file = kwargs.get("gguf_file", None)
- has_tokenizer_file = resolved_vocab_files.get("tokenizer_file", None) is not None
- # If one passes a GGUF file path to `gguf_file` there is no need for this check as the tokenizer will be
- # loaded directly from the GGUF file.
- if (from_slow or not has_tokenizer_file) and cls.slow_tokenizer_class is not None and not gguf_file:
- slow_tokenizer = (cls.slow_tokenizer_class)._from_pretrained(
- copy.deepcopy(resolved_vocab_files),
- pretrained_model_name_or_path,
- copy.deepcopy(init_configuration),
- *init_inputs,
- token=token,
- cache_dir=cache_dir,
- local_files_only=local_files_only,
- _commit_hash=_commit_hash,
- **(copy.deepcopy(kwargs)),
- )
- else:
- slow_tokenizer = None
- # Prepare tokenizer initialization kwargs
- # Did we saved some inputs and kwargs to reload ?
- tokenizer_config_file = resolved_vocab_files.pop("tokenizer_config_file", None)
- if tokenizer_config_file is not None:
- with open(tokenizer_config_file, encoding="utf-8") as tokenizer_config_handle:
- init_kwargs = json.load(tokenizer_config_handle)
- # First attempt. We get tokenizer_class from tokenizer_config to check mismatch between tokenizers.
- config_tokenizer_class = init_kwargs.get("tokenizer_class")
- init_kwargs.pop("tokenizer_class", None)
- if not has_tokenizer_file:
- init_kwargs.pop("tokenizer_file", None)
- saved_init_inputs = init_kwargs.pop("init_inputs", ())
- if not init_inputs:
- init_inputs = saved_init_inputs
- else:
- config_tokenizer_class = None
- init_kwargs = init_configuration
- if not _is_local:
- if "auto_map" in init_kwargs:
- # For backward compatibility with odl format.
- if isinstance(init_kwargs["auto_map"], (tuple, list)):
- init_kwargs["auto_map"] = {"AutoTokenizer": init_kwargs["auto_map"]}
- init_kwargs["auto_map"] = add_model_info_to_auto_map(
- init_kwargs["auto_map"], pretrained_model_name_or_path
- )
- if "custom_pipelines" in init_kwargs:
- init_kwargs["custom_pipelines"] = add_model_info_to_custom_pipelines(
- init_kwargs["custom_pipelines"], pretrained_model_name_or_path
- )
- if config_tokenizer_class is None:
- # Matt: This entire block is only used to decide if the tokenizer class matches the class in the repo.
- # If not, it raises a warning, but otherwise continues. Since we mostly load tokenizers with
- # AutoTokenizer these days, it seems like a lot of work (and a source of bugs) for little gain.
- # Maybe we can just remove this entirely?
- from .models.auto.configuration_auto import AutoConfig # tests_ignore
- # Second attempt. If we have not yet found tokenizer_class, let's try to use the config.
- try:
- config = AutoConfig.from_pretrained(
- pretrained_model_name_or_path,
- token=token,
- cache_dir=cache_dir,
- local_files_only=local_files_only,
- trust_remote_code=trust_remote_code,
- _commit_hash=_commit_hash,
- )
- config_tokenizer_class = config.tokenizer_class
- except (OSError, ValueError, KeyError):
- # skip if an error occurred.
- config = None
- if config_tokenizer_class is None:
- # Third attempt. If we have not yet found the original type of the tokenizer,
- # we are loading we see if we can infer it from the type of the configuration file
- from .models.auto.tokenization_auto import TOKENIZER_MAPPING_NAMES # tests_ignore
- if hasattr(config, "model_type"):
- model_type = config.model_type
- else:
- # Fallback: use pattern matching on the string.
- model_type = None
- for pattern in TOKENIZER_MAPPING_NAMES.keys():
- if pattern in str(pretrained_model_name_or_path):
- model_type = pattern
- break
- if model_type is not None:
- config_tokenizer_class, config_tokenizer_class_fast = TOKENIZER_MAPPING_NAMES.get(
- model_type, (None, None)
- )
- if config_tokenizer_class is None:
- config_tokenizer_class = config_tokenizer_class_fast
- if config_tokenizer_class is not None:
- if cls.__name__.replace("Fast", "") != config_tokenizer_class.replace("Fast", ""):
- logger.warning(
- "The tokenizer class you load from this checkpoint is not the same type as the class this"
- " function is called from. It may result in unexpected tokenization. \nThe tokenizer class you"
- f" load from this checkpoint is '{config_tokenizer_class}'. \nThe class this function is called"
- f" from is '{cls.__name__}'."
- )
- # Update with newly provided kwargs
- init_kwargs.update(kwargs)
- # Merge resolved_vocab_files arguments in init_kwargs.
- added_tokens_file = resolved_vocab_files.pop("added_tokens_file", None)
- special_tokens_map_file = resolved_vocab_files.pop("special_tokens_map_file", None)
- for args_name, file_path in resolved_vocab_files.items():
- if args_name not in init_kwargs:
- init_kwargs[args_name] = file_path
- tokenizer_file = resolved_vocab_files.pop("tokenizer_file", None)
- if slow_tokenizer is not None:
- init_kwargs["__slow_tokenizer"] = slow_tokenizer
- init_kwargs["name_or_path"] = pretrained_model_name_or_path
- #### Handle tokenizer serialization of added and special tokens
- added_tokens_decoder: Dict[int, AddedToken] = {}
- added_tokens_map: Dict[str, AddedToken] = {}
- # if we have info on the slow added tokens
- if "added_tokens_decoder" in init_kwargs:
- for idx, token in init_kwargs["added_tokens_decoder"].items():
- if isinstance(token, dict):
- token = AddedToken(**token)
- if isinstance(token, AddedToken):
- added_tokens_decoder[int(idx)] = token
- added_tokens_map[str(token)] = token
- else:
- raise ValueError(
- f"Found a {token.__class__} in the saved `added_tokens_decoder`, should be a dictionary or an AddedToken instance"
- )
- else:
- # begin legacy: read the added_tokens_file and update kwargs with special_tokens_map if modified
- if special_tokens_map_file is not None:
- with open(special_tokens_map_file, encoding="utf-8") as special_tokens_map_handle:
- special_tokens_map = json.load(special_tokens_map_handle)
- for key, value in special_tokens_map.items():
- if key in kwargs and kwargs[key]:
- # This value has already been redefined by the kwargs
- # We keep this new value and ignore the one stored in the special_tokens_map_file
- continue
- if isinstance(value, dict):
- value["special"] = True
- value = AddedToken(**value)
- elif key == "additional_special_tokens" and isinstance(value, list):
- additional_special_tokens = init_kwargs.pop("additional_special_tokens", []) or []
- for token in value:
- if isinstance(token, dict):
- token["special"] = True
- token = AddedToken(**token)
- if token not in additional_special_tokens:
- additional_special_tokens.append(token)
- value = additional_special_tokens
- init_kwargs[key] = value
- # slow -> slow|fast, legacy: convert the `"added_tokens.json"` file to `added_tokens_decoder`.
- # this is for legacy purpose. We don't add the tokens after init for efficiency.
- if added_tokens_file is not None:
- special_tokens = []
- for key in cls.SPECIAL_TOKENS_ATTRIBUTES & init_kwargs.keys():
- if init_kwargs[key] is not None:
- if key == "additional_special_tokens":
- special_tokens += [str(token) for token in init_kwargs[key]]
- else:
- special_tokens.append(str(init_kwargs[key]))
- with open(added_tokens_file, encoding="utf-8") as added_tokens_handle:
- added_tok_encoder = json.load(added_tokens_handle)
- for str_token, index in added_tok_encoder.items():
- # if index not in added_tokens_decoder and str_token not in added_tokens_map:
- special = str_token in special_tokens
- added_tokens_decoder[index] = AddedToken(
- str_token, rstrip=False, lstrip=False, normalized=not special, special=special
- )
- added_tokens_map[str(token)] = added_tokens_decoder[index]
- # allows converting a fast -> slow: add the `tokenizer.json`'s `"added_tokens"` to the slow tokenizer
- # if `tokenizer_config.json` is `None`
- if tokenizer_file is not None:
- # This is for slow so can be done before
- with open(tokenizer_file, encoding="utf-8") as tokenizer_file_handle:
- tokenizer_file_handle = json.load(tokenizer_file_handle)
- added_tokens = tokenizer_file_handle.pop("added_tokens")
- for serialized_tokens in added_tokens:
- idx = serialized_tokens.pop("id")
- added_tokens_decoder[idx] = AddedToken(**serialized_tokens)
- added_tokens_map[str(added_tokens_decoder[idx])] = added_tokens_decoder[idx]
- # end legacy
- # Passing AddedTokens and not strings to the class to prevent it from casting the string to a different AddedToken
- # convert {'__type': 'AddedToken', 'content': '<ent>', 'lstrip': False, 'normalized': True, ...} to AddedTokens
- init_kwargs["added_tokens_decoder"] = added_tokens_decoder
- init_kwargs = cls.convert_added_tokens(init_kwargs, save=False)
- for key in cls.SPECIAL_TOKENS_ATTRIBUTES & init_kwargs.keys():
- if added_tokens_map != {} and init_kwargs[key] is not None:
- if key != "additional_special_tokens":
- init_kwargs[key] = added_tokens_map.get(str(init_kwargs[key]), init_kwargs[key])
- # Instantiate the tokenizer.
- try:
- tokenizer = cls(*init_inputs, **init_kwargs)
- except import_protobuf_decode_error():
- logger.info(
- "Unable to load tokenizer model from SPM, loading from TikToken will be attempted instead."
- "(Google protobuf error: Tried to load SPM model with non-SPM vocab file).",
- )
- return False
- except RuntimeError as e:
- if "sentencepiece_processor.cc" in str(e):
- logger.info(
- "Unable to load tokenizer model from SPM, loading from TikToken will be attempted instead."
- "(SentencePiece RuntimeError: Tried to load SPM model with non-SPM vocab file).",
- )
- return False
- except OSError:
- raise OSError(
- "Unable to load vocabulary from file. "
- "Please check that the provided vocabulary is accessible and not corrupted."
- )
- except RuntimeError as e:
- if "sentencepiece_processor.cc" in str(e):
- logger.info(
- "Unable to load tokenizer model from SPM, loading from TikToken will be attempted instead."
- "(SentencePiece RuntimeError: Tried to load SPM model with non-SPM vocab file).",
- )
- return False
- if added_tokens_decoder != {} and max(list(added_tokens_decoder.keys())[-1], 0) > tokenizer.vocab_size:
- logger.info(
- "Special tokens have been added in the vocabulary, make sure the associated word embeddings are"
- " fine-tuned or trained."
- )
- return tokenizer
- @staticmethod
- def _eventually_correct_t5_max_length(pretrained_model_name_or_path, max_model_length, init_max_model_length):
- # This method should be deleted in Transformers v5
- # Its only purpose is to potentially throw a warning
- # that incorrectly defined max lengths of T5's tokenizer are used
- # which we will correct in Transformers v5.
- return max_model_length
- @classmethod
- def convert_added_tokens(cls, obj: Union[AddedToken, Any], save=False, add_type_field=True):
- if isinstance(obj, dict) and "__type" in obj and obj["__type"] == "AddedToken":
- obj.pop("__type")
- return AddedToken(**obj)
- if isinstance(obj, AddedToken) and save:
- obj = obj.__getstate__()
- if add_type_field:
- obj["__type"] = "AddedToken"
- else:
- # Don't save "special" for previous tokenizers
- obj.pop("special")
- return obj
- elif isinstance(obj, (list, tuple)):
- return [cls.convert_added_tokens(o, save=save, add_type_field=add_type_field) for o in obj]
- elif isinstance(obj, dict):
- return {k: cls.convert_added_tokens(v, save=save, add_type_field=add_type_field) for k, v in obj.items()}
- return obj
- def save_pretrained(
- self,
- save_directory: Union[str, os.PathLike],
- legacy_format: Optional[bool] = None,
- filename_prefix: Optional[str] = None,
- push_to_hub: bool = False,
- **kwargs,
- ) -> Tuple[str]:
- """
- Save the full tokenizer state.
- This method make sure the full tokenizer can then be re-loaded using the
- [`~tokenization_utils_base.PreTrainedTokenizer.from_pretrained`] class method..
- Warning,None This won't save modifications you may have applied to the tokenizer after the instantiation (for
- instance, modifying `tokenizer.do_lower_case` after creation).
- Args:
- save_directory (`str` or `os.PathLike`): The path to a directory where the tokenizer will be saved.
- legacy_format (`bool`, *optional*):
- Only applicable for a fast tokenizer. If unset (default), will save the tokenizer in the unified JSON
- format as well as in legacy format if it exists, i.e. with tokenizer specific vocabulary and a separate
- added_tokens files.
- If `False`, will only save the tokenizer in the unified JSON format. This format is incompatible with
- "slow" tokenizers (not powered by the *tokenizers* library), so the tokenizer will not be able to be
- loaded in the corresponding "slow" tokenizer.
- If `True`, will save the tokenizer in legacy format. If the "slow" tokenizer doesn't exits, a value
- error is raised.
- filename_prefix (`str`, *optional*):
- A prefix to add to the names of the files saved by the tokenizer.
- push_to_hub (`bool`, *optional*, defaults to `False`):
- Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
- repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
- namespace).
- kwargs (`Dict[str, Any]`, *optional*):
- Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
- Returns:
- A tuple of `str`: The files saved.
- """
- use_auth_token = kwargs.pop("use_auth_token", None)
- if use_auth_token is not None:
- warnings.warn(
- "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
- FutureWarning,
- )
- if kwargs.get("token", None) is not None:
- raise ValueError(
- "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
- )
- kwargs["token"] = use_auth_token
- if os.path.isfile(save_directory):
- logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
- return
- os.makedirs(save_directory, exist_ok=True)
- if push_to_hub:
- commit_message = kwargs.pop("commit_message", None)
- repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
- repo_id = self._create_repo(repo_id, **kwargs)
- files_timestamps = self._get_files_timestamps(save_directory)
- special_tokens_map_file = os.path.join(
- save_directory, (filename_prefix + "-" if filename_prefix else "") + SPECIAL_TOKENS_MAP_FILE
- )
- tokenizer_config_file = os.path.join(
- save_directory, (filename_prefix + "-" if filename_prefix else "") + TOKENIZER_CONFIG_FILE
- )
- tokenizer_config = copy.deepcopy(self.init_kwargs)
- # Let's save the init kwargs
- target_keys = set(self.init_kwargs.keys())
- # Let's save the special tokens map (only the strings)
- target_keys.update(["model_max_length", "clean_up_tokenization_spaces"])
- for k in target_keys:
- if hasattr(self, k):
- tokenizer_config[k] = getattr(self, k)
- # Let's make sure we properly save the special tokens.
- tokenizer_config.update(self.special_tokens_map)
- if self.chat_template is not None:
- if isinstance(self.chat_template, dict):
- # Chat template dicts are saved to the config as lists of dicts with fixed key names.
- # They will be reconstructed as a single dict during loading.
- tokenizer_config["chat_template"] = [{"name": k, "template": v} for k, v in self.chat_template.items()]
- else:
- tokenizer_config["chat_template"] = self.chat_template
- if len(self.init_inputs) > 0:
- tokenizer_config["init_inputs"] = copy.deepcopy(self.init_inputs)
- for file_id in self.vocab_files_names.keys():
- tokenizer_config.pop(file_id, None)
- # no typefields, this way old fast and slow can load it
- tokenizer_config = self.convert_added_tokens(tokenizer_config, add_type_field=True, save=True)
- # Process added tokens seperatly: allows previous versions to ignore it!
- added_tokens = {}
- for key, value in self.added_tokens_decoder.items():
- added_tokens[key] = value.__getstate__()
- tokenizer_config["added_tokens_decoder"] = added_tokens
- # Add tokenizer class to the tokenizer config to be able to reload it with from_pretrained
- tokenizer_class = self.__class__.__name__
- # Remove the Fast at the end unless we have a special `PreTrainedTokenizerFast`
- if tokenizer_class.endswith("Fast") and tokenizer_class != "PreTrainedTokenizerFast":
- tokenizer_class = tokenizer_class[:-4]
- tokenizer_config["tokenizer_class"] = tokenizer_class
- if getattr(self, "_auto_map", None) is not None:
- tokenizer_config["auto_map"] = self._auto_map
- if getattr(self, "_processor_class", None) is not None:
- tokenizer_config["processor_class"] = self._processor_class
- # If we have a custom model, we copy the file defining it in the folder and set the attributes so it can be
- # loaded from the Hub.
- if self._auto_class is not None:
- custom_object_save(self, save_directory, config=tokenizer_config)
- # remove private information
- if "name_or_path" in tokenizer_config:
- tokenizer_config.pop("name_or_path")
- tokenizer_config.pop("special_tokens_map_file", None)
- tokenizer_config.pop("tokenizer_file", None)
- if "device_map" in tokenizer_config:
- tokenizer_config.pop("device_map")
- with open(tokenizer_config_file, "w", encoding="utf-8") as f:
- out_str = json.dumps(tokenizer_config, indent=2, sort_keys=True, ensure_ascii=False) + "\n"
- f.write(out_str)
- logger.info(f"tokenizer config file saved in {tokenizer_config_file}")
- # Sanitize AddedTokens in special_tokens_map
- # kept for forward compatibility, will be removed in transoformers 5. Typefields are not saved for FC, special should not be save either
- write_dict = self.convert_added_tokens(self.special_tokens_map_extended, save=True, add_type_field=False)
- with open(special_tokens_map_file, "w", encoding="utf-8") as f:
- out_str = json.dumps(write_dict, indent=2, sort_keys=True, ensure_ascii=False) + "\n"
- f.write(out_str)
- logger.info(f"Special tokens file saved in {special_tokens_map_file}")
- file_names = (tokenizer_config_file, special_tokens_map_file)
- save_files = self._save_pretrained(
- save_directory=save_directory,
- file_names=file_names,
- legacy_format=legacy_format,
- filename_prefix=filename_prefix,
- )
- if push_to_hub:
- self._upload_modified_files(
- save_directory,
- repo_id,
- files_timestamps,
- commit_message=commit_message,
- token=kwargs.get("token"),
- )
- return save_files
- def _save_pretrained(
- self,
- save_directory: Union[str, os.PathLike],
- file_names: Tuple[str],
- legacy_format: Optional[bool] = None,
- filename_prefix: Optional[str] = None,
- ) -> Tuple[str]:
- """
- Save a tokenizer using the slow-tokenizer/legacy format: vocabulary + added tokens.
- Fast tokenizers can also be saved in a unique JSON file containing {config + vocab + added-tokens} using the
- specific [`~tokenization_utils_fast.PreTrainedTokenizerFast._save_pretrained`]
- """
- if legacy_format is False:
- raise ValueError(
- "Only fast tokenizers (instances of PreTrainedTokenizerFast) can be saved in non legacy format."
- )
- save_directory = str(save_directory)
- added_tokens_file = os.path.join(
- save_directory, (filename_prefix + "-" if filename_prefix else "") + ADDED_TOKENS_FILE
- )
- # the new get_added_vocab() also returns special tokens and tokens that have an index < vocab_size
- added_vocab = {tok: index for tok, index in self.added_tokens_encoder.items() if index >= self.vocab_size}
- if added_vocab:
- with open(added_tokens_file, "w", encoding="utf-8") as f:
- out_str = json.dumps(added_vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n"
- f.write(out_str)
- logger.info(f"added tokens file saved in {added_tokens_file}")
- vocab_files = self.save_vocabulary(save_directory, filename_prefix=filename_prefix)
- return file_names + vocab_files + (added_tokens_file,)
- def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
- """
- Save only the vocabulary of the tokenizer (vocabulary + added tokens).
- This method won't save the configuration and special token mappings of the tokenizer. Use
- [`~PreTrainedTokenizerFast._save_pretrained`] to save the whole state of the tokenizer.
- Args:
- save_directory (`str`):
- The directory in which to save the vocabulary.
- filename_prefix (`str`, *optional*):
- An optional prefix to add to the named of the saved files.
- Returns:
- `Tuple(str)`: Paths to the files saved.
- """
- raise NotImplementedError
- def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> List[str]:
- """
- Converts a string into a sequence of tokens, replacing unknown tokens with the `unk_token`.
- Args:
- text (`str`):
- The sequence to be encoded.
- pair (`str`, *optional*):
- A second sequence to be encoded with the first.
- add_special_tokens (`bool`, *optional*, defaults to `False`):
- Whether or not to add the special tokens associated with the corresponding model.
- kwargs (additional keyword arguments, *optional*):
- Will be passed to the underlying model specific encode method. See details in
- [`~PreTrainedTokenizerBase.__call__`]
- Returns:
- `List[str]`: The list of tokens.
- """
- raise NotImplementedError
- @add_end_docstrings(
- ENCODE_KWARGS_DOCSTRING,
- """
- **kwargs: Passed along to the `.tokenize()` method.
- """,
- """
- Returns:
- `List[int]`, `torch.Tensor`, `tf.Tensor` or `np.ndarray`: The tokenized ids of the text.
- """,
- )
- def encode(
- self,
- text: Union[TextInput, PreTokenizedInput, EncodedInput],
- text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
- add_special_tokens: bool = True,
- padding: Union[bool, str, PaddingStrategy] = False,
- truncation: Union[bool, str, TruncationStrategy] = None,
- max_length: Optional[int] = None,
- stride: int = 0,
- padding_side: Optional[bool] = None,
- return_tensors: Optional[Union[str, TensorType]] = None,
- **kwargs,
- ) -> List[int]:
- """
- Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary.
- Same as doing `self.convert_tokens_to_ids(self.tokenize(text))`.
- Args:
- text (`str`, `List[str]` or `List[int]`):
- The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the
- `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
- method).
- text_pair (`str`, `List[str]` or `List[int]`, *optional*):
- Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using
- the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
- method).
- """
- encoded_inputs = self.encode_plus(
- text,
- text_pair=text_pair,
- add_special_tokens=add_special_tokens,
- padding=padding,
- truncation=truncation,
- max_length=max_length,
- stride=stride,
- padding_side=padding_side,
- return_tensors=return_tensors,
- **kwargs,
- )
- return encoded_inputs["input_ids"]
- def num_special_tokens_to_add(self, pair: bool = False) -> int:
- raise NotImplementedError
- def _get_padding_truncation_strategies(
- self, padding=False, truncation=None, max_length=None, pad_to_multiple_of=None, verbose=True, **kwargs
- ):
- """
- Find the correct padding/truncation strategy with backward compatibility for old arguments (truncation_strategy
- and pad_to_max_length) and behaviors.
- """
- old_truncation_strategy = kwargs.pop("truncation_strategy", "do_not_truncate")
- old_pad_to_max_length = kwargs.pop("pad_to_max_length", False)
- # Backward compatibility for previous behavior, maybe we should deprecate it:
- # If you only set max_length, it activates truncation for max_length
- if max_length is not None and padding is False and truncation is None:
- if verbose:
- if not self.deprecation_warnings.get("Truncation-not-explicitly-activated", False):
- logger.warning(
- "Truncation was not explicitly activated but `max_length` is provided a specific value, please"
- " use `truncation=True` to explicitly truncate examples to max length. Defaulting to"
- " 'longest_first' truncation strategy. If you encode pairs of sequences (GLUE-style) with the"
- " tokenizer you can select this strategy more precisely by providing a specific strategy to"
- " `truncation`."
- )
- self.deprecation_warnings["Truncation-not-explicitly-activated"] = True
- truncation = "longest_first"
- # Get padding strategy
- if padding is False and old_pad_to_max_length:
- if verbose:
- warnings.warn(
- "The `pad_to_max_length` argument is deprecated and will be removed in a future version, "
- "use `padding=True` or `padding='longest'` to pad to the longest sequence in the batch, or "
- "use `padding='max_length'` to pad to a max length. In this case, you can give a specific "
- "length with `max_length` (e.g. `max_length=45`) or leave max_length to None to pad to the "
- "maximal input size of the model (e.g. 512 for Bert).",
- FutureWarning,
- )
- if max_length is None:
- padding_strategy = PaddingStrategy.LONGEST
- else:
- padding_strategy = PaddingStrategy.MAX_LENGTH
- elif padding is not False:
- if padding is True:
- if verbose:
- if max_length is not None and (
- truncation is None or truncation is False or truncation == "do_not_truncate"
- ):
- warnings.warn(
- "`max_length` is ignored when `padding`=`True` and there is no truncation strategy. "
- "To pad to max length, use `padding='max_length'`."
- )
- if old_pad_to_max_length is not False:
- warnings.warn("Though `pad_to_max_length` = `True`, it is ignored because `padding`=`True`.")
- padding_strategy = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
- elif not isinstance(padding, PaddingStrategy):
- padding_strategy = PaddingStrategy(padding)
- elif isinstance(padding, PaddingStrategy):
- padding_strategy = padding
- else:
- padding_strategy = PaddingStrategy.DO_NOT_PAD
- # Get truncation strategy
- if truncation is None and old_truncation_strategy != "do_not_truncate":
- if verbose:
- warnings.warn(
- "The `truncation_strategy` argument is deprecated and will be removed in a future version, use"
- " `truncation=True` to truncate examples to a max length. You can give a specific length with"
- " `max_length` (e.g. `max_length=45`) or leave max_length to None to truncate to the maximal input"
- " size of the model (e.g. 512 for Bert). If you have pairs of inputs, you can give a specific"
- " truncation strategy selected among `truncation='only_first'` (will only truncate the first"
- " sentence in the pairs) `truncation='only_second'` (will only truncate the second sentence in the"
- " pairs) or `truncation='longest_first'` (will iteratively remove tokens from the longest sentence"
- " in the pairs).",
- FutureWarning,
- )
- truncation_strategy = TruncationStrategy(old_truncation_strategy)
- elif truncation is not False and truncation is not None:
- if truncation is True:
- truncation_strategy = (
- TruncationStrategy.LONGEST_FIRST
- ) # Default to truncate the longest sequences in pairs of inputs
- elif not isinstance(truncation, TruncationStrategy):
- truncation_strategy = TruncationStrategy(truncation)
- elif isinstance(truncation, TruncationStrategy):
- truncation_strategy = truncation
- else:
- truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE
- # Set max length if needed
- if max_length is None:
- if padding_strategy == PaddingStrategy.MAX_LENGTH:
- if self.model_max_length > LARGE_INTEGER:
- if verbose:
- if not self.deprecation_warnings.get("Asking-to-pad-to-max_length", False):
- logger.warning(
- "Asking to pad to max_length but no maximum length is provided and the model has no"
- " predefined maximum length. Default to no padding."
- )
- self.deprecation_warnings["Asking-to-pad-to-max_length"] = True
- padding_strategy = PaddingStrategy.DO_NOT_PAD
- else:
- max_length = self.model_max_length
- if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE:
- if self.model_max_length > LARGE_INTEGER:
- if verbose:
- if not self.deprecation_warnings.get("Asking-to-truncate-to-max_length", False):
- logger.warning(
- "Asking to truncate to max_length but no maximum length is provided and the model has"
- " no predefined maximum length. Default to no truncation."
- )
- self.deprecation_warnings["Asking-to-truncate-to-max_length"] = True
- truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE
- else:
- max_length = self.model_max_length
- # Test if we have a padding token
- if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.pad_token is None or self.pad_token_id < 0):
- raise ValueError(
- "Asking to pad but the tokenizer does not have a padding token. "
- "Please select a token to use as `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` "
- "or add a new pad token via `tokenizer.add_special_tokens({'pad_token': '[PAD]'})`."
- )
- # Check that we will truncate to a multiple of pad_to_multiple_of if both are provided
- if (
- truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE
- and padding_strategy != PaddingStrategy.DO_NOT_PAD
- and pad_to_multiple_of is not None
- and max_length is not None
- and (max_length % pad_to_multiple_of != 0)
- ):
- raise ValueError(
- "Truncation and padding are both activated but "
- f"truncation length ({max_length}) is not a multiple of pad_to_multiple_of ({pad_to_multiple_of})."
- )
- return padding_strategy, truncation_strategy, max_length, kwargs
- @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
- def __call__(
- self,
- text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
- text_pair: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None,
- text_target: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
- text_pair_target: Optional[
- Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]
- ] = None,
- add_special_tokens: bool = True,
- padding: Union[bool, str, PaddingStrategy] = False,
- truncation: Union[bool, str, TruncationStrategy] = None,
- max_length: Optional[int] = None,
- stride: int = 0,
- is_split_into_words: bool = False,
- pad_to_multiple_of: Optional[int] = None,
- padding_side: Optional[bool] = None,
- return_tensors: Optional[Union[str, TensorType]] = None,
- return_token_type_ids: Optional[bool] = None,
- return_attention_mask: Optional[bool] = None,
- return_overflowing_tokens: bool = False,
- return_special_tokens_mask: bool = False,
- return_offsets_mapping: bool = False,
- return_length: bool = False,
- verbose: bool = True,
- **kwargs,
- ) -> BatchEncoding:
- """
- Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
- sequences.
- Args:
- text (`str`, `List[str]`, `List[List[str]]`, *optional*):
- The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
- (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
- `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
- text_pair (`str`, `List[str]`, `List[List[str]]`, *optional*):
- The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
- (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
- `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
- text_target (`str`, `List[str]`, `List[List[str]]`, *optional*):
- The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a
- list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized),
- you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
- text_pair_target (`str`, `List[str]`, `List[List[str]]`, *optional*):
- The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a
- list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized),
- you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
- """
- # To avoid duplicating
- all_kwargs = {
- "add_special_tokens": add_special_tokens,
- "padding": padding,
- "truncation": truncation,
- "max_length": max_length,
- "stride": stride,
- "is_split_into_words": is_split_into_words,
- "pad_to_multiple_of": pad_to_multiple_of,
- "padding_side": padding_side,
- "return_tensors": return_tensors,
- "return_token_type_ids": return_token_type_ids,
- "return_attention_mask": return_attention_mask,
- "return_overflowing_tokens": return_overflowing_tokens,
- "return_special_tokens_mask": return_special_tokens_mask,
- "return_offsets_mapping": return_offsets_mapping,
- "return_length": return_length,
- "split_special_tokens": kwargs.pop("split_special_tokens", self.split_special_tokens),
- "verbose": verbose,
- }
- all_kwargs.update(kwargs)
- if text is None and text_target is None:
- raise ValueError("You need to specify either `text` or `text_target`.")
- if text is not None:
- # The context manager will send the inputs as normal texts and not text_target, but we shouldn't change the
- # input mode in this case.
- if not self._in_target_context_manager:
- self._switch_to_input_mode()
- encodings = self._call_one(text=text, text_pair=text_pair, **all_kwargs)
- if text_target is not None:
- self._switch_to_target_mode()
- target_encodings = self._call_one(text=text_target, text_pair=text_pair_target, **all_kwargs)
- # Leave back tokenizer in input mode
- self._switch_to_input_mode()
- if text_target is None:
- return encodings
- elif text is None:
- return target_encodings
- else:
- encodings["labels"] = target_encodings["input_ids"]
- return encodings
- def _call_one(
- self,
- text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
- text_pair: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None,
- add_special_tokens: bool = True,
- padding: Union[bool, str, PaddingStrategy] = False,
- truncation: Union[bool, str, TruncationStrategy] = None,
- max_length: Optional[int] = None,
- stride: int = 0,
- is_split_into_words: bool = False,
- pad_to_multiple_of: Optional[int] = None,
- padding_side: Optional[bool] = None,
- return_tensors: Optional[Union[str, TensorType]] = None,
- return_token_type_ids: Optional[bool] = None,
- return_attention_mask: Optional[bool] = None,
- return_overflowing_tokens: bool = False,
- return_special_tokens_mask: bool = False,
- return_offsets_mapping: bool = False,
- return_length: bool = False,
- verbose: bool = True,
- split_special_tokens: bool = False,
- **kwargs,
- ) -> BatchEncoding:
- # Input type checking for clearer error
- def _is_valid_text_input(t):
- if isinstance(t, str):
- # Strings are fine
- return True
- elif isinstance(t, (list, tuple)):
- # List are fine as long as they are...
- if len(t) == 0:
- # ... empty
- return True
- elif isinstance(t[0], str):
- # ... list of strings
- return True
- elif isinstance(t[0], (list, tuple)):
- # ... list with an empty list or with a list of strings
- return len(t[0]) == 0 or isinstance(t[0][0], str)
- else:
- return False
- else:
- return False
- if not _is_valid_text_input(text):
- raise ValueError(
- "text input must be of type `str` (single example), `List[str]` (batch or single pretokenized example) "
- "or `List[List[str]]` (batch of pretokenized examples)."
- )
- if text_pair is not None and not _is_valid_text_input(text_pair):
- raise ValueError(
- "text input must be of type `str` (single example), `List[str]` (batch or single pretokenized example) "
- "or `List[List[str]]` (batch of pretokenized examples)."
- )
- if is_split_into_words:
- is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
- else:
- is_batched = isinstance(text, (list, tuple))
- if is_batched:
- if isinstance(text_pair, str):
- raise TypeError(
- "when tokenizing batches of text, `text_pair` must be a list or tuple with the same length as"
- " `text`."
- )
- if text_pair is not None and len(text) != len(text_pair):
- raise ValueError(
- f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:"
- f" {len(text_pair)}."
- )
- batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
- return self.batch_encode_plus(
- batch_text_or_text_pairs=batch_text_or_text_pairs,
- add_special_tokens=add_special_tokens,
- padding=padding,
- truncation=truncation,
- max_length=max_length,
- stride=stride,
- is_split_into_words=is_split_into_words,
- pad_to_multiple_of=pad_to_multiple_of,
- padding_side=padding_side,
- return_tensors=return_tensors,
- return_token_type_ids=return_token_type_ids,
- return_attention_mask=return_attention_mask,
- return_overflowing_tokens=return_overflowing_tokens,
- return_special_tokens_mask=return_special_tokens_mask,
- return_offsets_mapping=return_offsets_mapping,
- return_length=return_length,
- verbose=verbose,
- split_special_tokens=split_special_tokens,
- **kwargs,
- )
- else:
- return self.encode_plus(
- text=text,
- text_pair=text_pair,
- add_special_tokens=add_special_tokens,
- padding=padding,
- truncation=truncation,
- max_length=max_length,
- stride=stride,
- is_split_into_words=is_split_into_words,
- pad_to_multiple_of=pad_to_multiple_of,
- padding_side=padding_side,
- return_tensors=return_tensors,
- return_token_type_ids=return_token_type_ids,
- return_attention_mask=return_attention_mask,
- return_overflowing_tokens=return_overflowing_tokens,
- return_special_tokens_mask=return_special_tokens_mask,
- return_offsets_mapping=return_offsets_mapping,
- return_length=return_length,
- verbose=verbose,
- split_special_tokens=split_special_tokens,
- **kwargs,
- )
- @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
- def encode_plus(
- self,
- text: Union[TextInput, PreTokenizedInput, EncodedInput],
- text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
- add_special_tokens: bool = True,
- padding: Union[bool, str, PaddingStrategy] = False,
- truncation: Union[bool, str, TruncationStrategy] = None,
- max_length: Optional[int] = None,
- stride: int = 0,
- is_split_into_words: bool = False,
- pad_to_multiple_of: Optional[int] = None,
- padding_side: Optional[bool] = None,
- return_tensors: Optional[Union[str, TensorType]] = None,
- return_token_type_ids: Optional[bool] = None,
- return_attention_mask: Optional[bool] = None,
- return_overflowing_tokens: bool = False,
- return_special_tokens_mask: bool = False,
- return_offsets_mapping: bool = False,
- return_length: bool = False,
- verbose: bool = True,
- **kwargs,
- ) -> BatchEncoding:
- """
- Tokenize and prepare for the model a sequence or a pair of sequences.
- <Tip warning={true}>
- This method is deprecated, `__call__` should be used instead.
- </Tip>
- Args:
- text (`str`, `List[str]` or (for non-fast tokenizers) `List[int]`):
- The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the
- `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
- method).
- text_pair (`str`, `List[str]` or `List[int]`, *optional*):
- Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using
- the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
- method).
- """
- # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
- padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
- padding=padding,
- truncation=truncation,
- max_length=max_length,
- pad_to_multiple_of=pad_to_multiple_of,
- verbose=verbose,
- **kwargs,
- )
- return self._encode_plus(
- text=text,
- text_pair=text_pair,
- add_special_tokens=add_special_tokens,
- padding_strategy=padding_strategy,
- truncation_strategy=truncation_strategy,
- max_length=max_length,
- stride=stride,
- is_split_into_words=is_split_into_words,
- pad_to_multiple_of=pad_to_multiple_of,
- padding_side=padding_side,
- return_tensors=return_tensors,
- return_token_type_ids=return_token_type_ids,
- return_attention_mask=return_attention_mask,
- return_overflowing_tokens=return_overflowing_tokens,
- return_special_tokens_mask=return_special_tokens_mask,
- return_offsets_mapping=return_offsets_mapping,
- return_length=return_length,
- verbose=verbose,
- split_special_tokens=kwargs.pop("split_special_tokens", self.split_special_tokens),
- **kwargs,
- )
- def _encode_plus(
- self,
- text: Union[TextInput, PreTokenizedInput, EncodedInput],
- text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
- add_special_tokens: bool = True,
- padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
- truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
- max_length: Optional[int] = None,
- stride: int = 0,
- is_split_into_words: bool = False,
- pad_to_multiple_of: Optional[int] = None,
- padding_side: Optional[bool] = None,
- return_tensors: Optional[Union[str, TensorType]] = None,
- return_token_type_ids: Optional[bool] = None,
- return_attention_mask: Optional[bool] = None,
- return_overflowing_tokens: bool = False,
- return_special_tokens_mask: bool = False,
- return_offsets_mapping: bool = False,
- return_length: bool = False,
- verbose: bool = True,
- split_special_tokens: bool = False,
- **kwargs,
- ) -> BatchEncoding:
- raise NotImplementedError
- @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
- def batch_encode_plus(
- self,
- batch_text_or_text_pairs: Union[
- List[TextInput],
- List[TextInputPair],
- List[PreTokenizedInput],
- List[PreTokenizedInputPair],
- List[EncodedInput],
- List[EncodedInputPair],
- ],
- add_special_tokens: bool = True,
- padding: Union[bool, str, PaddingStrategy] = False,
- truncation: Union[bool, str, TruncationStrategy] = None,
- max_length: Optional[int] = None,
- stride: int = 0,
- is_split_into_words: bool = False,
- pad_to_multiple_of: Optional[int] = None,
- padding_side: Optional[bool] = None,
- return_tensors: Optional[Union[str, TensorType]] = None,
- return_token_type_ids: Optional[bool] = None,
- return_attention_mask: Optional[bool] = None,
- return_overflowing_tokens: bool = False,
- return_special_tokens_mask: bool = False,
- return_offsets_mapping: bool = False,
- return_length: bool = False,
- verbose: bool = True,
- split_special_tokens: bool = False,
- **kwargs,
- ) -> BatchEncoding:
- """
- Tokenize and prepare for the model a list of sequences or a list of pairs of sequences.
- <Tip warning={true}>
- This method is deprecated, `__call__` should be used instead.
- </Tip>
- Args:
- batch_text_or_text_pairs (`List[str]`, `List[Tuple[str, str]]`, `List[List[str]]`, `List[Tuple[List[str], List[str]]]`, and for not-fast tokenizers, also `List[List[int]]`, `List[Tuple[List[int], List[int]]]`):
- Batch of sequences or pair of sequences to be encoded. This can be a list of
- string/string-sequences/int-sequences or a list of pair of string/string-sequences/int-sequence (see
- details in `encode_plus`).
- """
- # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
- padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
- padding=padding,
- truncation=truncation,
- max_length=max_length,
- pad_to_multiple_of=pad_to_multiple_of,
- verbose=verbose,
- **kwargs,
- )
- return self._batch_encode_plus(
- batch_text_or_text_pairs=batch_text_or_text_pairs,
- add_special_tokens=add_special_tokens,
- padding_strategy=padding_strategy,
- truncation_strategy=truncation_strategy,
- max_length=max_length,
- stride=stride,
- is_split_into_words=is_split_into_words,
- pad_to_multiple_of=pad_to_multiple_of,
- padding_side=padding_side,
- return_tensors=return_tensors,
- return_token_type_ids=return_token_type_ids,
- return_attention_mask=return_attention_mask,
- return_overflowing_tokens=return_overflowing_tokens,
- return_special_tokens_mask=return_special_tokens_mask,
- return_offsets_mapping=return_offsets_mapping,
- return_length=return_length,
- verbose=verbose,
- split_special_tokens=split_special_tokens,
- **kwargs,
- )
- def _batch_encode_plus(
- self,
- batch_text_or_text_pairs: Union[
- List[TextInput],
- List[TextInputPair],
- List[PreTokenizedInput],
- List[PreTokenizedInputPair],
- List[EncodedInput],
- List[EncodedInputPair],
- ],
- add_special_tokens: bool = True,
- padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
- truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
- max_length: Optional[int] = None,
- stride: int = 0,
- is_split_into_words: bool = False,
- pad_to_multiple_of: Optional[int] = None,
- padding_side: Optional[bool] = None,
- return_tensors: Optional[Union[str, TensorType]] = None,
- return_token_type_ids: Optional[bool] = None,
- return_attention_mask: Optional[bool] = None,
- return_overflowing_tokens: bool = False,
- return_special_tokens_mask: bool = False,
- return_offsets_mapping: bool = False,
- return_length: bool = False,
- verbose: bool = True,
- split_special_tokens: bool = False,
- **kwargs,
- ) -> BatchEncoding:
- raise NotImplementedError
- def pad(
- self,
- encoded_inputs: Union[
- BatchEncoding,
- List[BatchEncoding],
- Dict[str, EncodedInput],
- Dict[str, List[EncodedInput]],
- List[Dict[str, EncodedInput]],
- ],
- padding: Union[bool, str, PaddingStrategy] = True,
- max_length: Optional[int] = None,
- pad_to_multiple_of: Optional[int] = None,
- padding_side: Optional[bool] = None,
- return_attention_mask: Optional[bool] = None,
- return_tensors: Optional[Union[str, TensorType]] = None,
- verbose: bool = True,
- ) -> BatchEncoding:
- """
- Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length
- in the batch.
- Padding side (left/right) padding token ids are defined at the tokenizer level (with `self.padding_side`,
- `self.pad_token_id` and `self.pad_token_type_id`).
- Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the
- text followed by a call to the `pad` method to get a padded encoding.
- <Tip>
- If the `encoded_inputs` passed are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the
- result will use the same type unless you provide a different tensor type with `return_tensors`. In the case of
- PyTorch tensors, you will lose the specific device of your tensors however.
- </Tip>
- Args:
- encoded_inputs ([`BatchEncoding`], list of [`BatchEncoding`], `Dict[str, List[int]]`, `Dict[str, List[List[int]]` or `List[Dict[str, List[int]]]`):
- Tokenized inputs. Can represent one input ([`BatchEncoding`] or `Dict[str, List[int]]`) or a batch of
- tokenized inputs (list of [`BatchEncoding`], *Dict[str, List[List[int]]]* or *List[Dict[str,
- List[int]]]*) so you can use this method during preprocessing as well as in a PyTorch Dataloader
- collate function.
- Instead of `List[int]` you can have tensors (numpy arrays, PyTorch tensors or TensorFlow tensors), see
- the note above for the return type.
- padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
- Select a strategy to pad the returned sequences (according to the model's padding side and padding
- index) among:
- - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
- sequence if provided).
- - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
- acceptable input length for the model if that argument is not provided.
- - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
- lengths).
- max_length (`int`, *optional*):
- Maximum length of the returned list and optionally padding length (see above).
- pad_to_multiple_of (`int`, *optional*):
- If set will pad the sequence to a multiple of the provided value.
- This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
- `>= 7.5` (Volta).
- padding_side (`str`, *optional*):
- The side on which the model should have padding applied. Should be selected between ['right', 'left'].
- Default value is picked from the class attribute of the same name.
- return_attention_mask (`bool`, *optional*):
- Whether to return the attention mask. If left to the default, will return the attention mask according
- to the specific tokenizer's default, defined by the `return_outputs` attribute.
- [What are attention masks?](../glossary#attention-mask)
- return_tensors (`str` or [`~utils.TensorType`], *optional*):
- If set, will return tensors instead of list of python integers. Acceptable values are:
- - `'tf'`: Return TensorFlow `tf.constant` objects.
- - `'pt'`: Return PyTorch `torch.Tensor` objects.
- - `'np'`: Return Numpy `np.ndarray` objects.
- verbose (`bool`, *optional*, defaults to `True`):
- Whether or not to print more information and warnings.
- """
- if self.__class__.__name__.endswith("Fast"):
- if not self.deprecation_warnings.get("Asking-to-pad-a-fast-tokenizer", False):
- logger.warning_advice(
- f"You're using a {self.__class__.__name__} tokenizer. Please note that with a fast tokenizer,"
- " using the `__call__` method is faster than using a method to encode the text followed by a call"
- " to the `pad` method to get a padded encoding."
- )
- self.deprecation_warnings["Asking-to-pad-a-fast-tokenizer"] = True
- # If we have a list of dicts, let's convert it in a dict of lists
- # We do this to allow using this method as a collate_fn function in PyTorch Dataloader
- if isinstance(encoded_inputs, (list, tuple)) and isinstance(encoded_inputs[0], Mapping):
- encoded_inputs = {key: [example[key] for example in encoded_inputs] for key in encoded_inputs[0].keys()}
- # The model's main input name, usually `input_ids`, has been passed for padding
- if self.model_input_names[0] not in encoded_inputs:
- raise ValueError(
- "You should supply an encoding or a list of encodings to this method "
- f"that includes {self.model_input_names[0]}, but you provided {list(encoded_inputs.keys())}"
- )
- required_input = encoded_inputs[self.model_input_names[0]]
- if required_input is None or (isinstance(required_input, Sized) and len(required_input) == 0):
- if return_attention_mask:
- encoded_inputs["attention_mask"] = []
- return encoded_inputs
- # If we have PyTorch/TF/NumPy tensors/arrays as inputs, we cast them as python objects
- # and rebuild them afterwards if no return_tensors is specified
- # Note that we lose the specific device the tensor may be on for PyTorch
- first_element = required_input[0]
- if isinstance(first_element, (list, tuple)):
- # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
- for item in required_input:
- if len(item) != 0:
- first_element = item[0]
- break
- # At this state, if `first_element` is still a list/tuple, it's an empty one so there is nothing to do.
- if not isinstance(first_element, (int, list, tuple)):
- if is_tf_tensor(first_element):
- return_tensors = "tf" if return_tensors is None else return_tensors
- elif is_torch_tensor(first_element):
- return_tensors = "pt" if return_tensors is None else return_tensors
- elif isinstance(first_element, np.ndarray):
- return_tensors = "np" if return_tensors is None else return_tensors
- else:
- raise ValueError(
- f"type of {first_element} unknown: {type(first_element)}. "
- "Should be one of a python, numpy, pytorch or tensorflow object."
- )
- for key, value in encoded_inputs.items():
- encoded_inputs[key] = to_py_obj(value)
- # Convert padding_strategy in PaddingStrategy
- padding_strategy, _, max_length, _ = self._get_padding_truncation_strategies(
- padding=padding, max_length=max_length, verbose=verbose
- )
- required_input = encoded_inputs[self.model_input_names[0]]
- if required_input and not isinstance(required_input[0], (list, tuple)):
- encoded_inputs = self._pad(
- encoded_inputs,
- max_length=max_length,
- padding_strategy=padding_strategy,
- pad_to_multiple_of=pad_to_multiple_of,
- padding_side=padding_side,
- return_attention_mask=return_attention_mask,
- )
- return BatchEncoding(encoded_inputs, tensor_type=return_tensors)
- batch_size = len(required_input)
- assert all(
- len(v) == batch_size for v in encoded_inputs.values()
- ), "Some items in the output dictionary have a different batch size than others."
- if padding_strategy == PaddingStrategy.LONGEST:
- max_length = max(len(inputs) for inputs in required_input)
- padding_strategy = PaddingStrategy.MAX_LENGTH
- batch_outputs = {}
- for i in range(batch_size):
- inputs = {k: v[i] for k, v in encoded_inputs.items()}
- outputs = self._pad(
- inputs,
- max_length=max_length,
- padding_strategy=padding_strategy,
- pad_to_multiple_of=pad_to_multiple_of,
- padding_side=padding_side,
- return_attention_mask=return_attention_mask,
- )
- for key, value in outputs.items():
- if key not in batch_outputs:
- batch_outputs[key] = []
- batch_outputs[key].append(value)
- return BatchEncoding(batch_outputs, tensor_type=return_tensors)
- def create_token_type_ids_from_sequences(
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
- ) -> List[int]:
- """
- Create the token type IDs corresponding to the sequences passed. [What are token type
- IDs?](../glossary#token-type-ids)
- Should be overridden in a subclass if the model has a special way of building those.
- Args:
- token_ids_0 (`List[int]`): The first tokenized sequence.
- token_ids_1 (`List[int]`, *optional*): The second tokenized sequence.
- Returns:
- `List[int]`: The token type ids.
- """
- if token_ids_1 is None:
- return len(token_ids_0) * [0]
- return [0] * len(token_ids_0) + [1] * len(token_ids_1)
- def build_inputs_with_special_tokens(
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
- ) -> List[int]:
- """
- Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
- adding special tokens.
- This implementation does not add special tokens and this method should be overridden in a subclass.
- Args:
- token_ids_0 (`List[int]`): The first tokenized sequence.
- token_ids_1 (`List[int]`, *optional*): The second tokenized sequence.
- Returns:
- `List[int]`: The model input with special tokens.
- """
- if token_ids_1 is None:
- return token_ids_0
- return token_ids_0 + token_ids_1
- @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
- def prepare_for_model(
- self,
- ids: List[int],
- pair_ids: Optional[List[int]] = None,
- add_special_tokens: bool = True,
- padding: Union[bool, str, PaddingStrategy] = False,
- truncation: Union[bool, str, TruncationStrategy] = None,
- max_length: Optional[int] = None,
- stride: int = 0,
- pad_to_multiple_of: Optional[int] = None,
- padding_side: Optional[bool] = None,
- return_tensors: Optional[Union[str, TensorType]] = None,
- return_token_type_ids: Optional[bool] = None,
- return_attention_mask: Optional[bool] = None,
- return_overflowing_tokens: bool = False,
- return_special_tokens_mask: bool = False,
- return_offsets_mapping: bool = False,
- return_length: bool = False,
- verbose: bool = True,
- prepend_batch_axis: bool = False,
- **kwargs,
- ) -> BatchEncoding:
- """
- Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
- adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
- manages a moving window (with user defined stride) for overflowing tokens. Please Note, for *pair_ids*
- different than `None` and *truncation_strategy = longest_first* or `True`, it is not possible to return
- overflowing tokens. Such a combination of arguments will raise an error.
- Args:
- ids (`List[int]`):
- Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and
- `convert_tokens_to_ids` methods.
- pair_ids (`List[int]`, *optional*):
- Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`
- and `convert_tokens_to_ids` methods.
- """
- # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
- padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
- padding=padding,
- truncation=truncation,
- max_length=max_length,
- pad_to_multiple_of=pad_to_multiple_of,
- verbose=verbose,
- **kwargs,
- )
- pair = bool(pair_ids is not None)
- len_ids = len(ids)
- len_pair_ids = len(pair_ids) if pair else 0
- if return_token_type_ids and not add_special_tokens:
- raise ValueError(
- "Asking to return token_type_ids while setting add_special_tokens to False "
- "results in an undefined behavior. Please set add_special_tokens to True or "
- "set return_token_type_ids to None."
- )
- if (
- return_overflowing_tokens
- and truncation_strategy == TruncationStrategy.LONGEST_FIRST
- and pair_ids is not None
- ):
- raise ValueError(
- "Not possible to return overflowing tokens for pair of sequences with the "
- "`longest_first`. Please select another truncation strategy than `longest_first`, "
- "for instance `only_second` or `only_first`."
- )
- # Load from model defaults
- if return_token_type_ids is None:
- return_token_type_ids = "token_type_ids" in self.model_input_names
- if return_attention_mask is None:
- return_attention_mask = "attention_mask" in self.model_input_names
- encoded_inputs = {}
- # Compute the total size of the returned encodings
- total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
- # Truncation: Handle max sequence length
- overflowing_tokens = []
- if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length:
- ids, pair_ids, overflowing_tokens = self.truncate_sequences(
- ids,
- pair_ids=pair_ids,
- num_tokens_to_remove=total_len - max_length,
- truncation_strategy=truncation_strategy,
- stride=stride,
- )
- if return_overflowing_tokens:
- encoded_inputs["overflowing_tokens"] = overflowing_tokens
- encoded_inputs["num_truncated_tokens"] = total_len - max_length
- # Add special tokens
- if add_special_tokens:
- sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
- token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
- else:
- sequence = ids + pair_ids if pair else ids
- token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else [])
- # Build output dictionary
- encoded_inputs["input_ids"] = sequence
- if return_token_type_ids:
- encoded_inputs["token_type_ids"] = token_type_ids
- if return_special_tokens_mask:
- if add_special_tokens:
- encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids)
- else:
- encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
- # Check lengths
- self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose)
- # Padding
- if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
- encoded_inputs = self.pad(
- encoded_inputs,
- max_length=max_length,
- padding=padding_strategy.value,
- pad_to_multiple_of=pad_to_multiple_of,
- padding_side=padding_side,
- return_attention_mask=return_attention_mask,
- )
- if return_length:
- encoded_inputs["length"] = len(encoded_inputs["input_ids"])
- batch_outputs = BatchEncoding(
- encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis
- )
- return batch_outputs
- def truncate_sequences(
- self,
- ids: List[int],
- pair_ids: Optional[List[int]] = None,
- num_tokens_to_remove: int = 0,
- truncation_strategy: Union[str, TruncationStrategy] = "longest_first",
- stride: int = 0,
- ) -> Tuple[List[int], List[int], List[int]]:
- """
- Truncates a sequence pair in-place following the strategy.
- Args:
- ids (`List[int]`):
- Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and
- `convert_tokens_to_ids` methods.
- pair_ids (`List[int]`, *optional*):
- Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`
- and `convert_tokens_to_ids` methods.
- num_tokens_to_remove (`int`, *optional*, defaults to 0):
- Number of tokens to remove using the truncation strategy.
- truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `'longest_first'`):
- The strategy to follow for truncation. Can be:
- - `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
- maximum acceptable input length for the model if that argument is not provided. This will truncate
- token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a
- batch of pairs) is provided.
- - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
- maximum acceptable input length for the model if that argument is not provided. This will only
- truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
- maximum acceptable input length for the model if that argument is not provided. This will only
- truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- - `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater
- than the model maximum admissible input size).
- stride (`int`, *optional*, defaults to 0):
- If set to a positive number, the overflowing tokens returned will contain some tokens from the main
- sequence returned. The value of this argument defines the number of additional tokens.
- Returns:
- `Tuple[List[int], List[int], List[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of
- overflowing tokens. Note: The *longest_first* strategy returns empty list of overflowing tokens if a pair
- of sequences (or a batch of pairs) is provided.
- """
- if num_tokens_to_remove <= 0:
- return ids, pair_ids, []
- if not isinstance(truncation_strategy, TruncationStrategy):
- truncation_strategy = TruncationStrategy(truncation_strategy)
- overflowing_tokens = []
- if truncation_strategy == TruncationStrategy.ONLY_FIRST or (
- truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is None
- ):
- if len(ids) > num_tokens_to_remove:
- window_len = min(len(ids), stride + num_tokens_to_remove)
- if self.truncation_side == "left":
- overflowing_tokens = ids[:window_len]
- ids = ids[num_tokens_to_remove:]
- elif self.truncation_side == "right":
- overflowing_tokens = ids[-window_len:]
- ids = ids[:-num_tokens_to_remove]
- else:
- raise ValueError(f"invalid truncation strategy: {self.truncation_side}, use 'left' or 'right'.")
- else:
- error_msg = (
- f"We need to remove {num_tokens_to_remove} to truncate the input "
- f"but the first sequence has a length {len(ids)}. "
- )
- if truncation_strategy == TruncationStrategy.ONLY_FIRST:
- error_msg = (
- error_msg + "Please select another truncation strategy than "
- f"{truncation_strategy}, for instance 'longest_first' or 'only_second'."
- )
- logger.error(error_msg)
- elif truncation_strategy == TruncationStrategy.LONGEST_FIRST:
- logger.warning(
- "Be aware, overflowing tokens are not returned for the setting you have chosen,"
- f" i.e. sequence pairs with the '{TruncationStrategy.LONGEST_FIRST.value}' "
- "truncation strategy. So the returned list will always be empty even if some "
- "tokens have been removed."
- )
- len_pair_ids = len(pair_ids) if pair_ids is not None else 0
- len_ids = len(ids)
- first_remove = min(abs(len_pair_ids - len_ids), num_tokens_to_remove)
- second_remove = num_tokens_to_remove - first_remove
- if len_ids > len_pair_ids:
- ids_to_move = first_remove + second_remove // 2
- pair_ids_to_move = second_remove - second_remove // 2
- else:
- ids_to_move = second_remove // 2
- pair_ids_to_move = first_remove + second_remove - (second_remove // 2)
- if self.truncation_side == "right":
- ids = ids[:-ids_to_move] if ids_to_move > 0 else ids
- pair_ids = pair_ids[:-pair_ids_to_move] if pair_ids is not None and pair_ids_to_move > 0 else pair_ids
- elif self.truncation_side == "left":
- ids = ids[ids_to_move:]
- pair_ids = pair_ids[pair_ids_to_move:] if pair_ids is not None else None
- else:
- raise ValueError(f"invalid truncation strategy:{self.truncation_side}")
- elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None:
- if len(pair_ids) > num_tokens_to_remove:
- window_len = min(len(pair_ids), stride + num_tokens_to_remove)
- if self.truncation_side == "right":
- overflowing_tokens = pair_ids[-window_len:]
- pair_ids = pair_ids[:-num_tokens_to_remove]
- elif self.truncation_side == "left":
- overflowing_tokens = pair_ids[:window_len]
- pair_ids = pair_ids[num_tokens_to_remove:]
- else:
- raise ValueError(f"invalid truncation strategy:{self.truncation_side}")
- else:
- logger.error(
- f"We need to remove {num_tokens_to_remove} to truncate the input "
- f"but the second sequence has a length {len(pair_ids)}. "
- f"Please select another truncation strategy than {truncation_strategy}, "
- "for instance 'longest_first' or 'only_first'."
- )
- return (ids, pair_ids, overflowing_tokens)
- def _pad(
- self,
- encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
- max_length: Optional[int] = None,
- padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
- pad_to_multiple_of: Optional[int] = None,
- padding_side: Optional[bool] = None,
- return_attention_mask: Optional[bool] = None,
- ) -> dict:
- """
- Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
- Args:
- encoded_inputs:
- Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
- max_length: maximum length of the returned list and optionally padding length (see below).
- Will truncate by taking into account the special tokens.
- padding_strategy: PaddingStrategy to use for padding.
- - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- - PaddingStrategy.DO_NOT_PAD: Do not pad
- The tokenizer padding sides are defined in `padding_side` argument:
- - 'left': pads on the left of the sequences
- - 'right': pads on the right of the sequences
- pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
- This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
- `>= 7.5` (Volta).
- padding_side:
- The side on which the model should have padding applied. Should be selected between ['right', 'left'].
- Default value is picked from the class attribute of the same name.
- return_attention_mask:
- (optional) Set to False to avoid returning attention mask (default: set to model specifics)
- """
- # Load from model defaults
- if return_attention_mask is None:
- return_attention_mask = "attention_mask" in self.model_input_names
- required_input = encoded_inputs[self.model_input_names[0]]
- if padding_strategy == PaddingStrategy.LONGEST:
- max_length = len(required_input)
- if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
- max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
- needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
- # Initialize attention mask if not present.
- if return_attention_mask and "attention_mask" not in encoded_inputs:
- encoded_inputs["attention_mask"] = [1] * len(required_input)
- if needs_to_be_padded:
- difference = max_length - len(required_input)
- padding_side = padding_side if padding_side is not None else self.padding_side
- if padding_side == "right":
- if return_attention_mask:
- encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
- if "token_type_ids" in encoded_inputs:
- encoded_inputs["token_type_ids"] = (
- encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
- )
- if "special_tokens_mask" in encoded_inputs:
- encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
- encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
- elif padding_side == "left":
- if return_attention_mask:
- encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
- if "token_type_ids" in encoded_inputs:
- encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
- "token_type_ids"
- ]
- if "special_tokens_mask" in encoded_inputs:
- encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
- encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
- else:
- raise ValueError(f"Invalid padding strategy:{padding_side}")
- return encoded_inputs
- def convert_tokens_to_string(self, tokens: List[str]) -> str:
- """
- Converts a sequence of tokens in a single string. The most simple way to do it is `" ".join(tokens)` but we
- often want to remove sub-word tokenization artifacts at the same time.
- Args:
- tokens (`List[str]`): The token to join in a string.
- Returns:
- `str`: The joined tokens.
- """
- raise NotImplementedError
- def batch_decode(
- self,
- sequences: Union[List[int], List[List[int]], "np.ndarray", "torch.Tensor", "tf.Tensor"],
- skip_special_tokens: bool = False,
- clean_up_tokenization_spaces: bool = None,
- **kwargs,
- ) -> List[str]:
- """
- Convert a list of lists of token ids into a list of strings by calling decode.
- Args:
- sequences (`Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]`):
- List of tokenized input ids. Can be obtained using the `__call__` method.
- skip_special_tokens (`bool`, *optional*, defaults to `False`):
- Whether or not to remove special tokens in the decoding.
- clean_up_tokenization_spaces (`bool`, *optional*):
- Whether or not to clean up the tokenization spaces. If `None`, will default to
- `self.clean_up_tokenization_spaces`.
- kwargs (additional keyword arguments, *optional*):
- Will be passed to the underlying model specific decode method.
- Returns:
- `List[str]`: The list of decoded sentences.
- """
- return [
- self.decode(
- seq,
- skip_special_tokens=skip_special_tokens,
- clean_up_tokenization_spaces=clean_up_tokenization_spaces,
- **kwargs,
- )
- for seq in sequences
- ]
- def decode(
- self,
- token_ids: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"],
- skip_special_tokens: bool = False,
- clean_up_tokenization_spaces: bool = None,
- **kwargs,
- ) -> str:
- """
- Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special
- tokens and clean up tokenization spaces.
- Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.
- Args:
- token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):
- List of tokenized input ids. Can be obtained using the `__call__` method.
- skip_special_tokens (`bool`, *optional*, defaults to `False`):
- Whether or not to remove special tokens in the decoding.
- clean_up_tokenization_spaces (`bool`, *optional*):
- Whether or not to clean up the tokenization spaces. If `None`, will default to
- `self.clean_up_tokenization_spaces`.
- kwargs (additional keyword arguments, *optional*):
- Will be passed to the underlying model specific decode method.
- Returns:
- `str`: The decoded sentence.
- """
- # Convert inputs to python lists
- token_ids = to_py_obj(token_ids)
- return self._decode(
- token_ids=token_ids,
- skip_special_tokens=skip_special_tokens,
- clean_up_tokenization_spaces=clean_up_tokenization_spaces,
- **kwargs,
- )
- def _decode(
- self,
- token_ids: Union[int, List[int]],
- skip_special_tokens: bool = False,
- clean_up_tokenization_spaces: bool = None,
- **kwargs,
- ) -> str:
- raise NotImplementedError
- def get_special_tokens_mask(
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
- ) -> List[int]:
- """
- Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
- special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
- Args:
- token_ids_0 (`List[int]`):
- List of ids of the first sequence.
- token_ids_1 (`List[int]`, *optional*):
- List of ids of the second sequence.
- already_has_special_tokens (`bool`, *optional*, defaults to `False`):
- Whether or not the token list is already formatted with special tokens for the model.
- Returns:
- A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
- """
- assert already_has_special_tokens and token_ids_1 is None, (
- "You cannot use ``already_has_special_tokens=False`` with this tokenizer. "
- "Please use a slow (full python) tokenizer to activate this argument. "
- "Or set `return_special_tokens_mask=True` when calling the encoding method "
- "to get the special tokens mask in any tokenizer. "
- )
- all_special_ids = self.all_special_ids # cache the property
- special_tokens_mask = [1 if token in all_special_ids else 0 for token in token_ids_0]
- return special_tokens_mask
- @staticmethod
- def clean_up_tokenization(out_string: str) -> str:
- """
- Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms.
- Args:
- out_string (`str`): The text to clean up.
- Returns:
- `str`: The cleaned-up string.
- """
- out_string = (
- out_string.replace(" .", ".")
- .replace(" ?", "?")
- .replace(" !", "!")
- .replace(" ,", ",")
- .replace(" ' ", "'")
- .replace(" n't", "n't")
- .replace(" 'm", "'m")
- .replace(" 's", "'s")
- .replace(" 've", "'ve")
- .replace(" 're", "'re")
- )
- return out_string
- def _eventual_warn_about_too_long_sequence(self, ids: List[int], max_length: Optional[int], verbose: bool):
- """
- Depending on the input and internal state we might trigger a warning about a sequence that is too long for its
- corresponding model
- Args:
- ids (`List[str]`): The ids produced by the tokenization
- max_length (`int`, *optional*): The max_length desired (does not trigger a warning if it is set)
- verbose (`bool`): Whether or not to print more information and warnings.
- """
- if max_length is None and len(ids) > self.model_max_length and verbose:
- if not self.deprecation_warnings.get("sequence-length-is-longer-than-the-specified-maximum", False):
- logger.warning(
- "Token indices sequence length is longer than the specified maximum sequence length "
- f"for this model ({len(ids)} > {self.model_max_length}). Running this sequence through the model "
- "will result in indexing errors"
- )
- self.deprecation_warnings["sequence-length-is-longer-than-the-specified-maximum"] = True
- def _switch_to_input_mode(self):
- """
- Private method to put the tokenizer in input mode (when it has different modes for input/outputs)
- """
- pass
- def _switch_to_target_mode(self):
- """
- Private method to put the tokenizer in target mode (when it has different modes for input/outputs)
- """
- pass
- @contextmanager
- def as_target_tokenizer(self):
- """
- Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to
- sequence-to-sequence models that need a slightly different processing for the labels.
- """
- warnings.warn(
- "`as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your "
- "labels by using the argument `text_target` of the regular `__call__` method (either in the same call as "
- "your input texts if you use the same keyword arguments, or in a separate call."
- )
- self._switch_to_target_mode()
- self._in_target_context_manager = True
- yield
- self._in_target_context_manager = False
- self._switch_to_input_mode()
- @classmethod
- def register_for_auto_class(cls, auto_class="AutoTokenizer"):
- """
- Register this class with a given auto class. This should only be used for custom tokenizers as the ones in the
- library are already mapped with `AutoTokenizer`.
- <Tip warning={true}>
- This API is experimental and may have some slight breaking changes in the next releases.
- </Tip>
- Args:
- auto_class (`str` or `type`, *optional*, defaults to `"AutoTokenizer"`):
- The auto class to register this new tokenizer with.
- """
- if not isinstance(auto_class, str):
- auto_class = auto_class.__name__
- import transformers.models.auto as auto_module
- if not hasattr(auto_module, auto_class):
- raise ValueError(f"{auto_class} is not a valid auto class.")
- cls._auto_class = auto_class
- def prepare_seq2seq_batch(
- self,
- src_texts: List[str],
- tgt_texts: Optional[List[str]] = None,
- max_length: Optional[int] = None,
- max_target_length: Optional[int] = None,
- padding: str = "longest",
- return_tensors: str = None,
- truncation: bool = True,
- **kwargs,
- ) -> BatchEncoding:
- """
- Prepare model inputs for translation. For best performance, translate one sentence at a time.
- Arguments:
- src_texts (`List[str]`):
- List of documents to summarize or source language texts.
- tgt_texts (`list`, *optional*):
- List of summaries or target language texts.
- max_length (`int`, *optional*):
- Controls the maximum length for encoder inputs (documents to summarize or source language texts) If
- left unset or set to `None`, this will use the predefined model maximum length if a maximum length is
- required by one of the truncation/padding parameters. If the model has no specific maximum input length
- (like XLNet) truncation/padding to a maximum length will be deactivated.
- max_target_length (`int`, *optional*):
- Controls the maximum length of decoder inputs (target language texts or summaries) If left unset or set
- to `None`, this will use the max_length value.
- padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
- Activates and controls padding. Accepts the following values:
- - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
- sequence if provided).
- - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
- acceptable input length for the model if that argument is not provided.
- - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
- lengths).
- return_tensors (`str` or [`~utils.TensorType`], *optional*):
- If set, will return tensors instead of list of python integers. Acceptable values are:
- - `'tf'`: Return TensorFlow `tf.constant` objects.
- - `'pt'`: Return PyTorch `torch.Tensor` objects.
- - `'np'`: Return Numpy `np.ndarray` objects.
- truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `True`):
- Activates and controls truncation. Accepts the following values:
- - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
- to the maximum acceptable input length for the model if that argument is not provided. This will
- truncate token by token, removing a token from the longest sequence in the pair if a pair of
- sequences (or a batch of pairs) is provided.
- - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
- maximum acceptable input length for the model if that argument is not provided. This will only
- truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
- maximum acceptable input length for the model if that argument is not provided. This will only
- truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
- greater than the model maximum admissible input size).
- **kwargs:
- Additional keyword arguments passed along to `self.__call__`.
- Return:
- [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
- - **input_ids** -- List of token ids to be fed to the encoder.
- - **attention_mask** -- List of indices specifying which tokens should be attended to by the model.
- - **labels** -- List of token ids for tgt_texts.
- The full set of keys `[input_ids, attention_mask, labels]`, will only be returned if tgt_texts is passed.
- Otherwise, input_ids, attention_mask will be the only keys.
- """
- # docstyle-ignore
- formatted_warning = """
- `prepare_seq2seq_batch` is deprecated and will be removed in version 5 of HuggingFace Transformers. Use the regular
- `__call__` method to prepare your inputs and targets.
- Here is a short example:
- model_inputs = tokenizer(src_texts, text_target=tgt_texts, ...)
- If you either need to use different keyword arguments for the source and target texts, you should do two calls like
- this:
- model_inputs = tokenizer(src_texts, ...)
- labels = tokenizer(text_target=tgt_texts, ...)
- model_inputs["labels"] = labels["input_ids"]
- See the documentation of your specific tokenizer for more details on the specific arguments to the tokenizer of choice.
- For a more complete example, see the implementation of `prepare_seq2seq_batch`.
- """
- warnings.warn(formatted_warning, FutureWarning)
- # mBART-specific kwargs that should be ignored by other models.
- kwargs.pop("src_lang", None)
- kwargs.pop("tgt_lang", None)
- if max_length is None:
- max_length = self.model_max_length
- model_inputs = self(
- src_texts,
- add_special_tokens=True,
- return_tensors=return_tensors,
- max_length=max_length,
- padding=padding,
- truncation=truncation,
- **kwargs,
- )
- if tgt_texts is None:
- return model_inputs
- # Process tgt_texts
- if max_target_length is None:
- max_target_length = max_length
- with self.as_target_tokenizer():
- labels = self(
- tgt_texts,
- add_special_tokens=True,
- return_tensors=return_tensors,
- padding=padding,
- max_length=max_target_length,
- truncation=truncation,
- **kwargs,
- )
- model_inputs["labels"] = labels["input_ids"]
- return model_inputs
- def get_fast_tokenizer_file(tokenization_files: List[str]) -> str:
- """
- Get the tokenization file to use for this version of transformers.
- Args:
- tokenization_files (`List[str]`): The list of available configuration files.
- Returns:
- `str`: The tokenization file to use.
- """
- tokenizer_files_map = {}
- for file_name in tokenization_files:
- search = _re_tokenizer_file.search(file_name)
- if search is not None:
- v = search.groups()[0]
- tokenizer_files_map[v] = file_name
- available_versions = sorted(tokenizer_files_map.keys())
- # Defaults to FULL_TOKENIZER_FILE and then try to look at some newer versions.
- tokenizer_file = FULL_TOKENIZER_FILE
- transformers_version = version.parse(__version__)
- for v in available_versions:
- if version.parse(v) <= transformers_version:
- tokenizer_file = tokenizer_files_map[v]
- else:
- # No point going further since the versions are sorted.
- break
- return tokenizer_file
- # To update the docstring, we need to copy the method, otherwise we change the original docstring.
- PreTrainedTokenizerBase.push_to_hub = copy_func(PreTrainedTokenizerBase.push_to_hub)
- if PreTrainedTokenizerBase.push_to_hub.__doc__ is not None:
- PreTrainedTokenizerBase.push_to_hub.__doc__ = PreTrainedTokenizerBase.push_to_hub.__doc__.format(
- object="tokenizer", object_class="AutoTokenizer", object_files="tokenizer files"
- )
|