| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568 |
- """Functional interface."""
- from typing import Callable, List, Optional, Tuple, Union
- import math
- import warnings
- import importlib
- try:
- import numpy as np
- except ModuleNotFoundError:
- np = None
- import torch
- from torch import _VF
- from torch import sym_int as _sym_int
- from torch._C import _infer_size, _add_docstr
- from torch._torch_docs import reproducibility_notes, tf32_notes, sparse_support_notes
- # A workaround to support both TorchScript and MyPy:
- from typing import TYPE_CHECKING
- if TYPE_CHECKING:
- from torch.types import _dtype as DType
- else:
- # The JIT doesn't understand Union, nor torch.dtype here
- DType = int
- from .._jit_internal import boolean_dispatch, _overload, BroadcastingList1, BroadcastingList2, BroadcastingList3
- from ..overrides import (
- has_torch_function, has_torch_function_unary, has_torch_function_variadic,
- handle_torch_function)
- from . import _reduction as _Reduction
- from . import grad # noqa: F401
- from .modules import utils
- from .modules.utils import _single, _pair, _triple, _list_with_default
- Tensor = torch.Tensor
- conv1d = _add_docstr(
- torch.conv1d,
- r"""
- conv1d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor
- Applies a 1D convolution over an input signal composed of several input
- planes.
- {tf32_note}
- See :class:`~torch.nn.Conv1d` for details and output shape.
- Note:
- {cudnn_reproducibility_note}
- Note:
- This operator supports complex data types i.e. ``complex32, complex64, complex128``.
- """.format(
- **reproducibility_notes, **tf32_notes
- )
- + r"""
- Args:
- input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iW)`
- weight: filters of shape :math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kW)`
- bias: optional bias of shape :math:`(\text{out\_channels})`. Default: ``None``
- stride: the stride of the convolving kernel. Can be a single number or
- a one-element tuple `(sW,)`. Default: 1
- padding: implicit paddings on both sides of the input. Can be a string {'valid', 'same'},
- single number or a one-element tuple `(padW,)`. Default: 0
- ``padding='valid'`` is the same as no padding. ``padding='same'`` pads
- the input so the output has the same shape as the input. However, this mode
- doesn't support any stride values other than 1.
- .. warning::
- For ``padding='same'``, if the ``weight`` is even-length and
- ``dilation`` is odd in any dimension, a full :func:`pad` operation
- may be needed internally. Lowering performance.
- dilation: the spacing between kernel elements. Can be a single number or
- a one-element tuple `(dW,)`. Default: 1
- groups: split input into groups, :math:`\text{in\_channels}` should be divisible by
- the number of groups. Default: 1
- Examples::
- >>> inputs = torch.randn(33, 16, 30)
- >>> filters = torch.randn(20, 16, 5)
- >>> F.conv1d(inputs, filters)
- """,
- )
- conv2d = _add_docstr(
- torch.conv2d,
- r"""
- conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor
- Applies a 2D convolution over an input image composed of several input
- planes.
- {tf32_note}
- See :class:`~torch.nn.Conv2d` for details and output shape.
- Note:
- {cudnn_reproducibility_note}
- Note:
- This operator supports complex data types i.e. ``complex32, complex64, complex128``.
- """.format(
- **reproducibility_notes, **tf32_notes
- )
- + r"""
- Args:
- input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`
- weight: filters of shape :math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kH , kW)`
- bias: optional bias tensor of shape :math:`(\text{out\_channels})`. Default: ``None``
- stride: the stride of the convolving kernel. Can be a single number or a
- tuple `(sH, sW)`. Default: 1
- padding: implicit paddings on both sides of the input. Can be a string {'valid', 'same'},
- single number or a tuple `(padH, padW)`. Default: 0
- ``padding='valid'`` is the same as no padding. ``padding='same'`` pads
- the input so the output has the same shape as the input. However, this mode
- doesn't support any stride values other than 1.
- .. warning::
- For ``padding='same'``, if the ``weight`` is even-length and
- ``dilation`` is odd in any dimension, a full :func:`pad` operation
- may be needed internally. Lowering performance.
- dilation: the spacing between kernel elements. Can be a single number or
- a tuple `(dH, dW)`. Default: 1
- groups: split input into groups, both :math:`\text{in\_channels}` and :math:`\text{out\_channels}`
- should be divisible by the number of groups. Default: 1
- Examples::
- >>> # With square kernels and equal stride
- >>> filters = torch.randn(8, 4, 3, 3)
- >>> inputs = torch.randn(1, 4, 5, 5)
- >>> F.conv2d(inputs, filters, padding=1)
- """,
- ) # noqa: E501
- conv3d = _add_docstr(
- torch.conv3d,
- r"""
- conv3d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor
- Applies a 3D convolution over an input image composed of several input
- planes.
- {tf32_note}
- See :class:`~torch.nn.Conv3d` for details and output shape.
- Note:
- {cudnn_reproducibility_note}
- Note:
- This operator supports complex data types i.e. ``complex32, complex64, complex128``.
- """.format(
- **reproducibility_notes, **tf32_notes
- )
- + r"""
- Args:
- input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iT , iH , iW)`
- weight: filters of shape :math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kT , kH , kW)`
- bias: optional bias tensor of shape :math:`(\text{out\_channels})`. Default: None
- stride: the stride of the convolving kernel. Can be a single number or a
- tuple `(sT, sH, sW)`. Default: 1
- padding: implicit paddings on both sides of the input. Can be a string {'valid', 'same'},
- single number or a tuple `(padT, padH, padW)`. Default: 0
- ``padding='valid'`` is the same as no padding. ``padding='same'`` pads
- the input so the output has the same shape as the input. However, this mode
- doesn't support any stride values other than 1.
- .. warning::
- For ``padding='same'``, if the ``weight`` is even-length and
- ``dilation`` is odd in any dimension, a full :func:`pad` operation
- may be needed internally. Lowering performance.
- dilation: the spacing between kernel elements. Can be a single number or
- a tuple `(dT, dH, dW)`. Default: 1
- groups: split input into groups, :math:`\text{in\_channels}` should be divisible by
- the number of groups. Default: 1
- Examples::
- >>> filters = torch.randn(33, 16, 3, 3, 3)
- >>> inputs = torch.randn(20, 16, 50, 10, 20)
- >>> F.conv3d(inputs, filters)
- """,
- ) # noqa: E501
- conv_transpose1d = _add_docstr(
- torch.conv_transpose1d,
- r"""
- conv_transpose1d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor
- Applies a 1D transposed convolution operator over an input signal
- composed of several input planes, sometimes also called "deconvolution".
- {tf32_note}
- See :class:`~torch.nn.ConvTranspose1d` for details and output shape.
- Note:
- {cudnn_reproducibility_note}
- """.format(
- **reproducibility_notes, **tf32_notes
- )
- + r"""
- Args:
- input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iW)`
- weight: filters of shape :math:`(\text{in\_channels} , \frac{\text{out\_channels}}{\text{groups}} , kW)`
- bias: optional bias of shape :math:`(\text{out\_channels})`. Default: None
- stride: the stride of the convolving kernel. Can be a single number or a
- tuple ``(sW,)``. Default: 1
- padding: ``dilation * (kernel_size - 1) - padding`` zero-padding will be added to both
- sides of each dimension in the input. Can be a single number or a tuple
- ``(padW,)``. Default: 0
- output_padding: additional size added to one side of each dimension in the
- output shape. Can be a single number or a tuple ``(out_padW)``. Default: 0
- groups: split input into groups, :math:`\text{in\_channels}` should be divisible by the
- number of groups. Default: 1
- dilation: the spacing between kernel elements. Can be a single number or
- a tuple ``(dW,)``. Default: 1
- Examples::
- >>> inputs = torch.randn(20, 16, 50)
- >>> weights = torch.randn(16, 33, 5)
- >>> F.conv_transpose1d(inputs, weights)
- """,
- )
- conv_transpose2d = _add_docstr(
- torch.conv_transpose2d,
- r"""
- conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor
- Applies a 2D transposed convolution operator over an input image
- composed of several input planes, sometimes also called "deconvolution".
- {tf32_note}
- See :class:`~torch.nn.ConvTranspose2d` for details and output shape.
- Note:
- {cudnn_reproducibility_note}
- """.format(
- **reproducibility_notes, **tf32_notes
- )
- + r"""
- Args:
- input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`
- weight: filters of shape :math:`(\text{in\_channels} , \frac{\text{out\_channels}}{\text{groups}} , kH , kW)`
- bias: optional bias of shape :math:`(\text{out\_channels})`. Default: None
- stride: the stride of the convolving kernel. Can be a single number or a
- tuple ``(sH, sW)``. Default: 1
- padding: ``dilation * (kernel_size - 1) - padding`` zero-padding will be added to both
- sides of each dimension in the input. Can be a single number or a tuple
- ``(padH, padW)``. Default: 0
- output_padding: additional size added to one side of each dimension in the
- output shape. Can be a single number or a tuple ``(out_padH, out_padW)``.
- Default: 0
- groups: split input into groups, :math:`\text{in\_channels}` should be divisible by the
- number of groups. Default: 1
- dilation: the spacing between kernel elements. Can be a single number or
- a tuple ``(dH, dW)``. Default: 1
- Examples::
- >>> # With square kernels and equal stride
- >>> inputs = torch.randn(1, 4, 5, 5)
- >>> weights = torch.randn(4, 8, 3, 3)
- >>> F.conv_transpose2d(inputs, weights, padding=1)
- """,
- ) # noqa: E501
- conv_transpose3d = _add_docstr(
- torch.conv_transpose3d,
- r"""
- conv_transpose3d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor
- Applies a 3D transposed convolution operator over an input image
- composed of several input planes, sometimes also called "deconvolution"
- {tf32_note}
- See :class:`~torch.nn.ConvTranspose3d` for details and output shape.
- Note:
- {cudnn_reproducibility_note}
- """.format(
- **reproducibility_notes, **tf32_notes
- )
- + r"""
- Args:
- input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iT , iH , iW)`
- weight: filters of shape :math:`(\text{in\_channels} , \frac{\text{out\_channels}}{\text{groups}} , kT , kH , kW)`
- bias: optional bias of shape :math:`(\text{out\_channels})`. Default: None
- stride: the stride of the convolving kernel. Can be a single number or a
- tuple ``(sT, sH, sW)``. Default: 1
- padding: ``dilation * (kernel_size - 1) - padding`` zero-padding will be added to both
- sides of each dimension in the input. Can be a single number or a tuple
- ``(padT, padH, padW)``. Default: 0
- output_padding: additional size added to one side of each dimension in the
- output shape. Can be a single number or a tuple
- ``(out_padT, out_padH, out_padW)``. Default: 0
- groups: split input into groups, :math:`\text{in\_channels}` should be divisible by the
- number of groups. Default: 1
- dilation: the spacing between kernel elements. Can be a single number or
- a tuple `(dT, dH, dW)`. Default: 1
- Examples::
- >>> inputs = torch.randn(20, 16, 50, 10, 20)
- >>> weights = torch.randn(16, 33, 3, 3, 3)
- >>> F.conv_transpose3d(inputs, weights)
- """,
- ) # noqa: E501
- conv_tbc = _add_docstr(
- torch.conv_tbc,
- r"""
- Applies a 1-dimensional sequence convolution over an input sequence.
- Input and output dimensions are (Time, Batch, Channels) - hence TBC.
- Args:
- input: input tensor of shape :math:`(\text{sequence length} \times batch \times \text{in\_channels})`
- weight: filter of shape (:math:`\text{kernel width} \times \text{in\_channels} \times \text{out\_channels}`)
- bias: bias of shape (:math:`\text{out\_channels}`)
- pad: number of timesteps to pad. Default: 0
- """,
- )
- # Pooling
- avg_pool1d = _add_docstr(
- torch.avg_pool1d,
- r"""
- avg_pool1d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True) -> Tensor
- Applies a 1D average pooling over an input signal composed of several
- input planes.
- See :class:`~torch.nn.AvgPool1d` for details and output shape.
- Args:
- input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iW)`
- kernel_size: the size of the window. Can be a single number or a
- tuple `(kW,)`
- stride: the stride of the window. Can be a single number or a tuple
- `(sW,)`. Default: :attr:`kernel_size`
- padding: implicit zero paddings on both sides of the input. Can be a
- single number or a tuple `(padW,)`. Default: 0
- ceil_mode: when True, will use `ceil` instead of `floor` to compute the
- output shape. Default: ``False``
- count_include_pad: when True, will include the zero-padding in the
- averaging calculation. Default: ``True``
- Examples::
- >>> # pool of square window of size=3, stride=2
- >>> input = torch.tensor([[[1, 2, 3, 4, 5, 6, 7]]], dtype=torch.float32)
- >>> F.avg_pool1d(input, kernel_size=3, stride=2)
- tensor([[[ 2., 4., 6.]]])
- """,
- )
- avg_pool2d = _add_docstr(
- torch._C._nn.avg_pool2d,
- r"""
- avg_pool2d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None) -> Tensor
- Applies 2D average-pooling operation in :math:`kH \times kW` regions by step size
- :math:`sH \times sW` steps. The number of output features is equal to the number of
- input planes.
- See :class:`~torch.nn.AvgPool2d` for details and output shape.
- Args:
- input: input tensor :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`
- kernel_size: size of the pooling region. Can be a single number or a
- tuple `(kH, kW)`
- stride: stride of the pooling operation. Can be a single number or a
- tuple `(sH, sW)`. Default: :attr:`kernel_size`
- padding: implicit zero paddings on both sides of the input. Can be a
- single number or a tuple `(padH, padW)`. Default: 0
- ceil_mode: when True, will use `ceil` instead of `floor` in the formula
- to compute the output shape. Default: ``False``
- count_include_pad: when True, will include the zero-padding in the
- averaging calculation. Default: ``True``
- divisor_override: if specified, it will be used as divisor, otherwise
- size of the pooling region will be used. Default: None
- """,
- )
- avg_pool3d = _add_docstr(
- torch._C._nn.avg_pool3d,
- r"""
- avg_pool3d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None) -> Tensor
- Applies 3D average-pooling operation in :math:`kT \times kH \times kW` regions by step
- size :math:`sT \times sH \times sW` steps. The number of output features is equal to
- :math:`\lfloor\frac{\text{input planes}}{sT}\rfloor`.
- See :class:`~torch.nn.AvgPool3d` for details and output shape.
- Args:
- input: input tensor :math:`(\text{minibatch} , \text{in\_channels} , iT \times iH , iW)`
- kernel_size: size of the pooling region. Can be a single number or a
- tuple `(kT, kH, kW)`
- stride: stride of the pooling operation. Can be a single number or a
- tuple `(sT, sH, sW)`. Default: :attr:`kernel_size`
- padding: implicit zero paddings on both sides of the input. Can be a
- single number or a tuple `(padT, padH, padW)`, Default: 0
- ceil_mode: when True, will use `ceil` instead of `floor` in the formula
- to compute the output shape
- count_include_pad: when True, will include the zero-padding in the
- averaging calculation
- divisor_override: if specified, it will be used as divisor, otherwise
- size of the pooling region will be used. Default: None
- """,
- )
- def fractional_max_pool2d_with_indices(
- input: Tensor, kernel_size: BroadcastingList2[int],
- output_size: Optional[BroadcastingList2[int]] = None,
- output_ratio: Optional[BroadcastingList2[float]] = None,
- return_indices: bool = False,
- _random_samples: Optional[Tensor] = None
- ) -> Tuple[Tensor, Tensor]: # noqa: D400
- r"""
- fractional_max_pool2d(input, kernel_size, output_size=None, output_ratio=None, return_indices=False, _random_samples=None)
- Applies 2D fractional max pooling over an input signal composed of several input planes.
- Fractional MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham
- The max-pooling operation is applied in :math:`kH \times kW` regions by a stochastic
- step size determined by the target output size.
- The number of output features is equal to the number of input planes.
- Args:
- kernel_size: the size of the window to take a max over.
- Can be a single number :math:`k` (for a square kernel of :math:`k \times k`)
- or a tuple `(kH, kW)`
- output_size: the target output size of the image of the form :math:`oH \times oW`.
- Can be a tuple `(oH, oW)` or a single number :math:`oH` for a square image :math:`oH \times oH`
- output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given.
- This has to be a number or tuple in the range (0, 1)
- return_indices: if ``True``, will return the indices along with the outputs.
- Useful to pass to :func:`~torch.nn.functional.max_unpool2d`.
- Examples::
- >>> input = torch.randn(20, 16, 50, 32)
- >>> # pool of square window of size=3, and target output size 13x12
- >>> F.fractional_max_pool2d(input, 3, output_size=(13, 12))
- >>> # pool of square window and target output size being half of input image size
- >>> F.fractional_max_pool2d(input, 3, output_ratio=(0.5, 0.5))
- .. _Fractional MaxPooling:
- http://arxiv.org/abs/1412.6071
- """
- if has_torch_function_variadic(input, _random_samples):
- return handle_torch_function(
- fractional_max_pool2d_with_indices,
- (input, _random_samples),
- input,
- kernel_size,
- output_size=output_size,
- output_ratio=output_ratio,
- return_indices=return_indices,
- _random_samples=_random_samples,
- )
- if output_size is None and output_ratio is None:
- raise ValueError("fractional_max_pool2d requires specifying either an output_size or an output_ratio")
- if output_size is None:
- assert output_ratio is not None
- if len(output_ratio) > 2:
- raise ValueError("fractional_max_pool2d requires output_ratio to either be a single Int or tuple of Ints.")
- _output_ratio = _pair(output_ratio)
- output_size = [int(input.size(-2) * _output_ratio[0]), int(input.size(-1) * _output_ratio[1])]
- if _random_samples is None:
- n_batch = 1 if input.dim() == 3 else input.size(0)
- _random_samples = torch.rand(n_batch, input.size(-3), 2, dtype=input.dtype, device=input.device)
- return torch._C._nn.fractional_max_pool2d(input, kernel_size, output_size, _random_samples)
- def _fractional_max_pool2d(
- input: Tensor, kernel_size: BroadcastingList2[int],
- output_size: Optional[BroadcastingList2[int]] = None,
- output_ratio: Optional[BroadcastingList2[float]] = None,
- return_indices: bool = False,
- _random_samples: Optional[Tensor] = None
- ) -> Tensor:
- if has_torch_function_variadic(input, _random_samples):
- return handle_torch_function(
- fractional_max_pool2d,
- (input, _random_samples),
- input,
- kernel_size,
- output_size=output_size,
- output_ratio=output_ratio,
- return_indices=return_indices,
- _random_samples=_random_samples,
- )
- return fractional_max_pool2d_with_indices(
- input, kernel_size, output_size, output_ratio, return_indices, _random_samples
- )[0]
- fractional_max_pool2d = boolean_dispatch(
- arg_name="return_indices",
- arg_index=4,
- default=False,
- if_true=fractional_max_pool2d_with_indices,
- if_false=_fractional_max_pool2d,
- module_name=__name__,
- func_name="fractional_max_pool2d",
- )
- def fractional_max_pool3d_with_indices(
- input: Tensor, kernel_size: BroadcastingList3[int],
- output_size: Optional[BroadcastingList3[int]] = None,
- output_ratio: Optional[BroadcastingList3[float]] = None,
- return_indices: bool = False,
- _random_samples: Optional[Tensor] = None
- ) -> Tuple[Tensor, Tensor]: # noqa: D400
- r"""
- fractional_max_pool3d(input, kernel_size, output_size=None, output_ratio=None, return_indices=False, _random_samples=None)
- Applies 3D fractional max pooling over an input signal composed of several input planes.
- Fractional MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham
- The max-pooling operation is applied in :math:`kT \times kH \times kW` regions by a stochastic
- step size determined by the target output size.
- The number of output features is equal to the number of input planes.
- Args:
- kernel_size: the size of the window to take a max over.
- Can be a single number :math:`k` (for a square kernel of :math:`k \times k \times k`)
- or a tuple `(kT, kH, kW)`
- output_size: the target output size of the form :math:`oT \times oH \times oW`.
- Can be a tuple `(oT, oH, oW)` or a single number :math:`oH` for a cubic output
- :math:`oH \times oH \times oH`
- output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given.
- This has to be a number or tuple in the range (0, 1)
- return_indices: if ``True``, will return the indices along with the outputs.
- Useful to pass to :func:`~torch.nn.functional.max_unpool3d`.
- Shape:
- - Input: :math:`(N, C, T_{in}, H_{in}, W_{in})` or :math:`(C, T_{in}, H_{in}, W_{in})`.
- - Output: :math:`(N, C, T_{out}, H_{out}, W_{out})` or :math:`(C, T_{out}, H_{out}, W_{out})`, where
- :math:`(T_{out}, H_{out}, W_{out})=\text{output\_size}` or
- :math:`(T_{out}, H_{out}, W_{out})=\text{output\_ratio} \times (T_{in}, H_{in}, W_{in})`
- Examples::
- >>> input = torch.randn(20, 16, 50, 32, 16)
- >>> # pool of cubic window of size=3, and target output size 13x12x11
- >>> F.fractional_max_pool3d(input, 3, output_size=(13, 12, 11))
- >>> # pool of cubic window and target output size being half of input size
- >>> F.fractional_max_pool3d(input, 3, output_ratio=(0.5, 0.5, 0.5))
- .. _Fractional MaxPooling:
- http://arxiv.org/abs/1412.6071
- """
- if has_torch_function_variadic(input, _random_samples):
- return handle_torch_function(
- fractional_max_pool3d_with_indices,
- (input, _random_samples),
- input,
- kernel_size,
- output_size=output_size,
- output_ratio=output_ratio,
- return_indices=return_indices,
- _random_samples=_random_samples,
- )
- if output_size is None and output_ratio is None:
- raise ValueError("fractional_max_pool3d requires specifying either an output_size or an output_ratio")
- if output_size is None:
- assert output_ratio is not None
- _output_ratio = _triple(output_ratio)
- output_size = [
- int(input.size(-3) * _output_ratio[0]),
- int(input.size(-2) * _output_ratio[1]),
- int(input.size(-1) * _output_ratio[2]),
- ]
- if _random_samples is None:
- n_batch = 1 if input.dim() == 4 else input.size(0)
- _random_samples = torch.rand(n_batch, input.size(-4), 3, dtype=input.dtype, device=input.device)
- return torch._C._nn.fractional_max_pool3d(input, kernel_size, output_size, _random_samples)
- def _fractional_max_pool3d(
- input: Tensor, kernel_size: BroadcastingList3[int],
- output_size: Optional[BroadcastingList3[int]] = None,
- output_ratio: Optional[BroadcastingList3[float]] = None,
- return_indices: bool = False,
- _random_samples: Optional[Tensor] = None
- ) -> Tensor:
- if has_torch_function_variadic(input, _random_samples):
- return handle_torch_function(
- fractional_max_pool3d,
- (input, _random_samples),
- input,
- kernel_size,
- output_size=output_size,
- output_ratio=output_ratio,
- return_indices=return_indices,
- _random_samples=_random_samples,
- )
- return fractional_max_pool3d_with_indices(
- input, kernel_size, output_size, output_ratio, return_indices, _random_samples
- )[0]
- fractional_max_pool3d = boolean_dispatch(
- arg_name="return_indices",
- arg_index=4,
- default=False,
- if_true=fractional_max_pool3d_with_indices,
- if_false=_fractional_max_pool3d,
- module_name=__name__,
- func_name="fractional_max_pool3d",
- )
- def max_pool1d_with_indices(
- input: Tensor, kernel_size: BroadcastingList1[int],
- stride: Optional[BroadcastingList1[int]] = None,
- padding: BroadcastingList1[int] = 0,
- dilation: BroadcastingList1[int] = 1,
- ceil_mode: bool = False,
- return_indices: bool = False
- ) -> Tuple[Tensor, Tensor]: # noqa: D400
- r"""
- max_pool1d(input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False, return_indices=False)
- Applies a 1D max pooling over an input signal composed of several input
- planes.
- .. note::
- The order of :attr:`ceil_mode` and :attr:`return_indices` is different from
- what seen in :class:`~torch.nn.MaxPool1d`, and will change in a future release.
- See :class:`~torch.nn.MaxPool1d` for details.
- Args:
- input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iW)`, minibatch dim optional.
- kernel_size: the size of the window. Can be a single number or a
- tuple `(kW,)`
- stride: the stride of the window. Can be a single number or a tuple
- `(sW,)`. Default: :attr:`kernel_size`
- padding: Implicit negative infinity padding to be added on both sides, must be >= 0 and <= kernel_size / 2.
- dilation: The stride between elements within a sliding window, must be > 0.
- ceil_mode: If ``True``, will use `ceil` instead of `floor` to compute the output shape. This
- ensures that every element in the input tensor is covered by a sliding window.
- return_indices: If ``True``, will return the argmax along with the max values.
- Useful for :class:`torch.nn.functional.max_unpool1d` later
- """
- if has_torch_function_unary(input):
- return handle_torch_function(
- max_pool1d_with_indices,
- (input,),
- input,
- kernel_size,
- stride=stride,
- padding=padding,
- dilation=dilation,
- ceil_mode=ceil_mode,
- return_indices=return_indices,
- )
- if stride is None:
- stride = torch.jit.annotate(List[int], [])
- return torch.max_pool1d_with_indices(input, kernel_size, stride, padding, dilation, ceil_mode)
- def _max_pool1d(
- input: Tensor, kernel_size: BroadcastingList1[int],
- stride: Optional[BroadcastingList1[int]] = None,
- padding: BroadcastingList1[int] = 0,
- dilation: BroadcastingList1[int] = 1,
- ceil_mode: bool = False,
- return_indices: bool = False
- ) -> Tensor:
- if has_torch_function_unary(input):
- return handle_torch_function(
- max_pool1d,
- (input,),
- input,
- kernel_size,
- stride=stride,
- padding=padding,
- dilation=dilation,
- ceil_mode=ceil_mode,
- return_indices=return_indices,
- )
- if stride is None:
- stride = torch.jit.annotate(List[int], [])
- return torch.max_pool1d(input, kernel_size, stride, padding, dilation, ceil_mode)
- max_pool1d = boolean_dispatch(
- arg_name="return_indices",
- arg_index=6,
- default=False,
- if_true=max_pool1d_with_indices,
- if_false=_max_pool1d,
- module_name=__name__,
- func_name="max_pool1d",
- )
- def max_pool2d_with_indices(
- input: Tensor, kernel_size: BroadcastingList2[int],
- stride: Optional[BroadcastingList2[int]] = None,
- padding: BroadcastingList2[int] = 0,
- dilation: BroadcastingList2[int] = 1,
- ceil_mode: bool = False,
- return_indices: bool = False
- ) -> Tuple[Tensor, Tensor]: # noqa: D400
- r"""
- max_pool2d(input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False, return_indices=False)
- Applies a 2D max pooling over an input signal composed of several input
- planes.
- .. note::
- The order of :attr:`ceil_mode` and :attr:`return_indices` is different from
- what seen in :class:`~torch.nn.MaxPool2d`, and will change in a future release.
- See :class:`~torch.nn.MaxPool2d` for details.
- Args:
- input: input tensor :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`, minibatch dim optional.
- kernel_size: size of the pooling region. Can be a single number or a
- tuple `(kH, kW)`
- stride: stride of the pooling operation. Can be a single number or a
- tuple `(sH, sW)`. Default: :attr:`kernel_size`
- padding: Implicit negative infinity padding to be added on both sides, must be >= 0 and <= kernel_size / 2.
- dilation: The stride between elements within a sliding window, must be > 0.
- ceil_mode: If ``True``, will use `ceil` instead of `floor` to compute the output shape. This
- ensures that every element in the input tensor is covered by a sliding window.
- return_indices: If ``True``, will return the argmax along with the max values.
- Useful for :class:`torch.nn.functional.max_unpool2d` later
- """
- if has_torch_function_unary(input):
- return handle_torch_function(
- max_pool2d_with_indices,
- (input,),
- input,
- kernel_size,
- stride=stride,
- padding=padding,
- dilation=dilation,
- ceil_mode=ceil_mode,
- return_indices=return_indices,
- )
- if stride is None:
- stride = torch.jit.annotate(List[int], [])
- return torch._C._nn.max_pool2d_with_indices(input, kernel_size, stride, padding, dilation, ceil_mode)
- def _max_pool2d(
- input: Tensor, kernel_size: BroadcastingList2[int],
- stride: Optional[BroadcastingList2[int]] = None,
- padding: BroadcastingList2[int] = 0,
- dilation: BroadcastingList2[int] = 1,
- ceil_mode: bool = False,
- return_indices: bool = False
- ) -> Tensor:
- if has_torch_function_unary(input):
- return handle_torch_function(
- max_pool2d,
- (input,),
- input,
- kernel_size,
- stride=stride,
- padding=padding,
- dilation=dilation,
- ceil_mode=ceil_mode,
- return_indices=return_indices,
- )
- if stride is None:
- stride = torch.jit.annotate(List[int], [])
- return torch.max_pool2d(input, kernel_size, stride, padding, dilation, ceil_mode)
- max_pool2d = boolean_dispatch(
- arg_name="return_indices",
- arg_index=6,
- default=False,
- if_true=max_pool2d_with_indices,
- if_false=_max_pool2d,
- module_name=__name__,
- func_name="max_pool2d",
- )
- def max_pool3d_with_indices(
- input: Tensor, kernel_size: BroadcastingList3[int],
- stride: Optional[BroadcastingList3[int]] = None,
- padding: BroadcastingList3[int] = 0,
- dilation: BroadcastingList3[int] = 1,
- ceil_mode: bool = False,
- return_indices: bool = False
- ) -> Tuple[Tensor, Tensor]: # noqa: D400
- r"""
- max_pool3d(input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False, return_indices=False)
- Applies a 3D max pooling over an input signal composed of several input
- planes.
- .. note::
- The order of :attr:`ceil_mode` and :attr:`return_indices` is different from
- what seen in :class:`~torch.nn.MaxPool3d`, and will change in a future release.
- See :class:`~torch.nn.MaxPool3d` for details.
- Args:
- input: input tensor :math:`(\text{minibatch} , \text{in\_channels} , iD, iH , iW)`, minibatch dim optional.
- kernel_size: size of the pooling region. Can be a single number or a
- tuple `(kT, kH, kW)`
- stride: stride of the pooling operation. Can be a single number or a
- tuple `(sT, sH, sW)`. Default: :attr:`kernel_size`
- padding: Implicit negative infinity padding to be added on both sides, must be >= 0 and <= kernel_size / 2.
- dilation: The stride between elements within a sliding window, must be > 0.
- ceil_mode: If ``True``, will use `ceil` instead of `floor` to compute the output shape. This
- ensures that every element in the input tensor is covered by a sliding window.
- return_indices: If ``True``, will return the argmax along with the max values.
- Useful for :class:`torch.nn.functional.max_unpool3d` later
- """
- if has_torch_function_unary(input):
- return handle_torch_function(
- max_pool3d_with_indices,
- (input,),
- input,
- kernel_size,
- stride=stride,
- padding=padding,
- dilation=dilation,
- ceil_mode=ceil_mode,
- return_indices=return_indices,
- )
- if stride is None:
- stride = torch.jit.annotate(List[int], [])
- return torch._C._nn.max_pool3d_with_indices(input, kernel_size, stride, padding, dilation, ceil_mode)
- def _max_pool3d(
- input: Tensor, kernel_size: BroadcastingList3[int],
- stride: Optional[BroadcastingList3[int]] = None,
- padding: BroadcastingList3[int] = 0,
- dilation: BroadcastingList3[int] = 1,
- ceil_mode: bool = False,
- return_indices: bool = False
- ) -> Tensor:
- if has_torch_function_unary(input):
- return handle_torch_function(
- max_pool3d,
- (input,),
- input,
- kernel_size,
- stride=stride,
- padding=padding,
- dilation=dilation,
- ceil_mode=ceil_mode,
- return_indices=return_indices,
- )
- if stride is None:
- stride = torch.jit.annotate(List[int], [])
- return torch.max_pool3d(input, kernel_size, stride, padding, dilation, ceil_mode)
- max_pool3d = boolean_dispatch(
- arg_name="return_indices",
- arg_index=6,
- default=False,
- if_true=max_pool3d_with_indices,
- if_false=_max_pool3d,
- module_name=__name__,
- func_name="max_pool3d",
- )
- def _unpool_output_size(
- input: Tensor, kernel_size: List[int], stride: List[int], padding: List[int], output_size: Optional[List[int]]
- ) -> List[int]:
- input_size = input.size()
- default_size = torch.jit.annotate(List[int], [])
- for d in range(len(kernel_size)):
- default_size.append((input_size[-len(kernel_size) + d] - 1) * stride[d] + kernel_size[d] - 2 * padding[d])
- if output_size is None:
- ret = default_size
- else:
- if len(output_size) == len(kernel_size) + 2:
- output_size = output_size[2:]
- if len(output_size) != len(kernel_size):
- raise ValueError(
- "output_size should be a sequence containing "
- f"{len(kernel_size)} or {len(kernel_size) + 2} elements, but it has a length of '{len(output_size)}'"
- )
- for d in range(len(kernel_size)):
- min_size = default_size[d] - stride[d]
- max_size = default_size[d] + stride[d]
- if not (min_size < output_size[d] < max_size):
- raise ValueError(
- f'invalid output_size "{output_size}" (dim {d} must be between {min_size} and {max_size})'
- )
- ret = output_size
- return ret
- def max_unpool1d(
- input: Tensor, indices: Tensor,
- kernel_size: BroadcastingList1[int],
- stride: Optional[BroadcastingList1[int]] = None,
- padding: BroadcastingList1[int] = 0,
- output_size: Optional[BroadcastingList1[int]] = None
- ) -> Tensor:
- r"""Compute a partial inverse of :class:`MaxPool1d`.
- See :class:`~torch.nn.MaxUnpool1d` for details.
- """
- if has_torch_function_unary(input):
- return handle_torch_function(
- max_unpool1d,
- (input,),
- input,
- indices,
- kernel_size,
- stride=stride,
- padding=padding,
- output_size=output_size,
- )
- kernel_size = _single(kernel_size)
- if stride is not None:
- _stride = _single(stride)
- else:
- _stride = kernel_size
- padding = _single(padding)
- output_size = _unpool_output_size(input, kernel_size, _stride, padding, output_size)
- if isinstance(output_size, list):
- output_size = output_size + [1]
- else:
- output_size = output_size + (1,)
- return torch._C._nn.max_unpool2d(input.unsqueeze(-1), indices.unsqueeze(-1), output_size).squeeze(-1)
- def max_unpool2d(
- input: Tensor, indices: Tensor,
- kernel_size: BroadcastingList2[int],
- stride: Optional[BroadcastingList2[int]] = None,
- padding: BroadcastingList2[int] = 0,
- output_size: Optional[BroadcastingList2[int]] = None
- ) -> Tensor:
- r"""Compute a partial inverse of :class:`MaxPool2d`.
- See :class:`~torch.nn.MaxUnpool2d` for details.
- """
- if has_torch_function_unary(input):
- return handle_torch_function(
- max_unpool2d,
- (input,),
- input,
- indices,
- kernel_size,
- stride=stride,
- padding=padding,
- output_size=output_size,
- )
- kernel_size = _pair(kernel_size)
- if stride is not None:
- _stride = _pair(stride)
- else:
- _stride = kernel_size
- padding = _pair(padding)
- output_size = _unpool_output_size(input, kernel_size, _stride, padding, output_size)
- return torch._C._nn.max_unpool2d(input, indices, output_size)
- def max_unpool3d(
- input: Tensor, indices: Tensor,
- kernel_size: BroadcastingList3[int],
- stride: Optional[BroadcastingList3[int]] = None,
- padding: BroadcastingList3[int] = 0,
- output_size: Optional[BroadcastingList3[int]] = None
- ) -> Tensor:
- r"""Compute a partial inverse of :class:`MaxPool3d`.
- See :class:`~torch.nn.MaxUnpool3d` for details.
- """
- if has_torch_function_unary(input):
- return handle_torch_function(
- max_unpool3d,
- (input,),
- input,
- indices,
- kernel_size,
- stride=stride,
- padding=padding,
- output_size=output_size,
- )
- kernel_size = _triple(kernel_size)
- if stride is not None:
- _stride = _triple(stride)
- else:
- _stride = kernel_size
- padding = _triple(padding)
- output_size = _unpool_output_size(input, kernel_size, _stride, padding, output_size)
- return torch._C._nn.max_unpool3d(input, indices, output_size, _stride, padding)
- def lp_pool3d(
- input: Tensor, norm_type: Union[int, float],
- kernel_size: BroadcastingList3[int],
- stride: Optional[BroadcastingList3[int]] = None,
- ceil_mode: bool = False
- ) -> Tensor:
- r"""
- Apply a 3D power-average pooling over an input signal composed of several input planes.
- If the sum of all inputs to the power of `p` is
- zero, the gradient is set to zero as well.
- See :class:`~torch.nn.LPPool3d` for details.
- """
- if has_torch_function_unary(input):
- return handle_torch_function(
- lp_pool3d, (input,), input, norm_type, kernel_size, stride=stride, ceil_mode=ceil_mode
- )
- kd, kw, kh = utils._triple(kernel_size)
- if stride is not None:
- out = avg_pool3d(input.pow(norm_type), kernel_size, stride, 0, ceil_mode)
- else:
- out = avg_pool3d(input.pow(norm_type), kernel_size, padding=0, ceil_mode=ceil_mode)
- return (torch.sign(out) * relu(torch.abs(out))).mul(kd * kw * kh).pow(1.0 / norm_type)
- def lp_pool2d(
- input: Tensor, norm_type: Union[int, float],
- kernel_size: BroadcastingList2[int],
- stride: Optional[BroadcastingList2[int]] = None,
- ceil_mode: bool = False
- ) -> Tensor:
- r"""
- Apply a 2D power-average pooling over an input signal composed of several input planes.
- If the sum of all inputs to the power of `p` is
- zero, the gradient is set to zero as well.
- See :class:`~torch.nn.LPPool2d` for details.
- """
- if has_torch_function_unary(input):
- return handle_torch_function(
- lp_pool2d, (input,), input, norm_type, kernel_size, stride=stride, ceil_mode=ceil_mode
- )
- kw, kh = utils._pair(kernel_size)
- if stride is not None:
- out = avg_pool2d(input.pow(norm_type), kernel_size, stride, 0, ceil_mode)
- else:
- out = avg_pool2d(input.pow(norm_type), kernel_size, padding=0, ceil_mode=ceil_mode)
- return (torch.sign(out) * relu(torch.abs(out))).mul(kw * kh).pow(1.0 / norm_type)
- def lp_pool1d(
- input: Tensor, norm_type: Union[int, float],
- kernel_size: int,
- stride: Optional[BroadcastingList1[int]] = None,
- ceil_mode: bool = False
- ) -> Tensor:
- r"""Apply a 1D power-average pooling over an input signal composed of several input planes.
- If the sum of all inputs to the power of `p` is
- zero, the gradient is set to zero as well.
- See :class:`~torch.nn.LPPool1d` for details.
- """
- if has_torch_function_unary(input):
- return handle_torch_function(
- lp_pool1d, (input,), input, norm_type, kernel_size, stride=stride, ceil_mode=ceil_mode
- )
- if stride is not None:
- out = avg_pool1d(input.pow(norm_type), kernel_size, stride, 0, ceil_mode)
- else:
- out = avg_pool1d(input.pow(norm_type), kernel_size, padding=0, ceil_mode=ceil_mode)
- return (torch.sign(out) * relu(torch.abs(out))).mul(kernel_size).pow(1.0 / norm_type)
- def adaptive_max_pool1d_with_indices(
- input: Tensor, output_size: BroadcastingList1[int], return_indices: bool = False
- ) -> Tuple[Tensor, Tensor]: # noqa: D400
- r"""
- adaptive_max_pool1d(input, output_size, return_indices=False)
- Applies a 1D adaptive max pooling over an input signal composed of
- several input planes.
- See :class:`~torch.nn.AdaptiveMaxPool1d` for details and output shape.
- Args:
- output_size: the target output size (single integer)
- return_indices: whether to return pooling indices. Default: ``False``
- """
- if has_torch_function_unary(input):
- return handle_torch_function(
- adaptive_max_pool1d_with_indices, (input,), input, output_size, return_indices=return_indices
- )
- return torch.adaptive_max_pool1d(input, output_size)
- def _adaptive_max_pool1d(input: Tensor, output_size: BroadcastingList1[int], return_indices: bool = False) -> Tensor:
- if has_torch_function_unary(input):
- return handle_torch_function(
- adaptive_max_pool1d, (input,), input, output_size, return_indices=return_indices
- )
- return adaptive_max_pool1d_with_indices(input, output_size)[0]
- adaptive_max_pool1d = boolean_dispatch(
- arg_name="return_indices",
- arg_index=2,
- default=False,
- if_true=adaptive_max_pool1d_with_indices,
- if_false=_adaptive_max_pool1d,
- module_name=__name__,
- func_name="adaptive_max_pool1d",
- )
- def adaptive_max_pool2d_with_indices(
- input: Tensor, output_size: BroadcastingList2[int],
- return_indices: bool = False
- ) -> Tuple[Tensor, Tensor]: # noqa: D400
- r"""adaptive_max_pool2d(input, output_size, return_indices=False)
- Applies a 2D adaptive max pooling over an input signal composed of
- several input planes.
- See :class:`~torch.nn.AdaptiveMaxPool2d` for details and output shape.
- Args:
- output_size: the target output size (single integer or
- double-integer tuple)
- return_indices: whether to return pooling indices. Default: ``False``
- """
- if has_torch_function_unary(input):
- return handle_torch_function(
- adaptive_max_pool2d_with_indices, (input,), input, output_size, return_indices=return_indices
- )
- output_size = _list_with_default(output_size, input.size())
- return torch._C._nn.adaptive_max_pool2d(input, output_size)
- def _adaptive_max_pool2d(input: Tensor, output_size: BroadcastingList2[int], return_indices: bool = False) -> Tensor:
- if has_torch_function_unary(input):
- return handle_torch_function(
- adaptive_max_pool2d, (input,), input, output_size, return_indices=return_indices
- )
- return adaptive_max_pool2d_with_indices(input, output_size)[0]
- adaptive_max_pool2d = boolean_dispatch(
- arg_name="return_indices",
- arg_index=2,
- default=False,
- if_true=adaptive_max_pool2d_with_indices,
- if_false=_adaptive_max_pool2d,
- module_name=__name__,
- func_name="adaptive_max_pool2d",
- )
- def adaptive_max_pool3d_with_indices(
- input: Tensor, output_size: BroadcastingList3[int],
- return_indices: bool = False
- ) -> Tuple[Tensor, Tensor]: # noqa: D400
- r"""
- adaptive_max_pool3d(input, output_size, return_indices=False)
- Applies a 3D adaptive max pooling over an input signal composed of
- several input planes.
- See :class:`~torch.nn.AdaptiveMaxPool3d` for details and output shape.
- Args:
- output_size: the target output size (single integer or
- triple-integer tuple)
- return_indices: whether to return pooling indices. Default: ``False``
- """
- if has_torch_function_unary(input):
- return handle_torch_function(
- adaptive_max_pool3d_with_indices, (input,), input, output_size, return_indices=return_indices
- )
- output_size = _list_with_default(output_size, input.size())
- return torch._C._nn.adaptive_max_pool3d(input, output_size)
- def _adaptive_max_pool3d(input: Tensor, output_size: BroadcastingList3[int], return_indices: bool = False) -> Tensor:
- if has_torch_function_unary(input):
- return handle_torch_function(
- adaptive_max_pool3d, (input,), input, output_size, return_indices=return_indices
- )
- return adaptive_max_pool3d_with_indices(input, output_size)[0]
- adaptive_max_pool3d = boolean_dispatch(
- arg_name="return_indices",
- arg_index=2,
- default=False,
- if_true=adaptive_max_pool3d_with_indices,
- if_false=_adaptive_max_pool3d,
- module_name=__name__,
- func_name="adaptive_max_pool3d",
- )
- adaptive_avg_pool1d = _add_docstr(
- torch.adaptive_avg_pool1d,
- r"""
- adaptive_avg_pool1d(input, output_size) -> Tensor
- Applies a 1D adaptive average pooling over an input signal composed of
- several input planes.
- See :class:`~torch.nn.AdaptiveAvgPool1d` for details and output shape.
- Args:
- output_size: the target output size (single integer)
- """,
- )
- def adaptive_avg_pool2d(input: Tensor, output_size: BroadcastingList2[int]) -> Tensor:
- r"""Apply a 2D adaptive average pooling over an input signal composed of several input planes.
- See :class:`~torch.nn.AdaptiveAvgPool2d` for details and output shape.
- Args:
- output_size: the target output size (single integer or
- double-integer tuple)
- """
- if has_torch_function_unary(input):
- return handle_torch_function(adaptive_avg_pool2d, (input,), input, output_size)
- _output_size = _list_with_default(output_size, input.size())
- return torch._C._nn.adaptive_avg_pool2d(input, _output_size)
- def adaptive_avg_pool3d(input: Tensor, output_size: BroadcastingList3[int]) -> Tensor:
- r"""Apply a 3D adaptive average pooling over an input signal composed of several input planes.
- See :class:`~torch.nn.AdaptiveAvgPool3d` for details and output shape.
- Args:
- output_size: the target output size (single integer or
- triple-integer tuple)
- """
- if has_torch_function_unary(input):
- return handle_torch_function(adaptive_avg_pool3d, (input,), input, output_size)
- _output_size = _list_with_default(output_size, input.size())
- return torch._C._nn.adaptive_avg_pool3d(input, _output_size)
- # Activation functions
- def dropout(input: Tensor, p: float = 0.5, training: bool = True, inplace: bool = False) -> Tensor:
- r"""During training, randomly zeroes some elements of the input tensor with probability :attr:`p`.
- Uses samples from a Bernoulli distribution.
- See :class:`~torch.nn.Dropout` for details.
- Args:
- p: probability of an element to be zeroed. Default: 0.5
- training: apply dropout if is ``True``. Default: ``True``
- inplace: If set to ``True``, will do this operation in-place. Default: ``False``
- """
- if has_torch_function_unary(input):
- return handle_torch_function(dropout, (input,), input, p=p, training=training, inplace=inplace)
- if p < 0.0 or p > 1.0:
- raise ValueError(f"dropout probability has to be between 0 and 1, but got {p}")
- return _VF.dropout_(input, p, training) if inplace else _VF.dropout(input, p, training)
- def alpha_dropout(input: Tensor, p: float = 0.5, training: bool = False, inplace: bool = False) -> Tensor:
- r"""Apply alpha dropout to the input.
- See :class:`~torch.nn.AlphaDropout` for details.
- """
- if has_torch_function_unary(input):
- return handle_torch_function(alpha_dropout, (input,), input, p=p, training=training, inplace=inplace)
- if p < 0.0 or p > 1.0:
- raise ValueError(f"dropout probability has to be between 0 and 1, but got {p}")
- return _VF.alpha_dropout_(input, p, training) if inplace else _VF.alpha_dropout(input, p, training)
- def dropout1d(input: Tensor, p: float = 0.5, training: bool = True, inplace: bool = False) -> Tensor:
- r"""Randomly zero out entire channels (a channel is a 1D feature map).
- For example, the :math:`j`-th channel of the :math:`i`-th sample in the
- batched input is a 1D tensor :math:`\text{input}[i, j]` of the input tensor.
- Each channel will be zeroed out independently on every forward call with
- probability :attr:`p` using samples from a Bernoulli distribution.
- See :class:`~torch.nn.Dropout1d` for details.
- Args:
- p: probability of a channel to be zeroed. Default: 0.5
- training: apply dropout if is ``True``. Default: ``True``
- inplace: If set to ``True``, will do this operation in-place. Default: ``False``
- """
- if has_torch_function_unary(input):
- return handle_torch_function(dropout1d, (input,), input, p=p, training=training, inplace=inplace)
- if p < 0.0 or p > 1.0:
- raise ValueError(f"dropout probability has to be between 0 and 1, but got {p}")
- inp_dim = input.dim()
- if inp_dim not in (2, 3):
- raise RuntimeError(f"dropout1d: Expected 2D or 3D input, but received a {inp_dim}D input. "
- "Note that dropout1d exists to provide channel-wise dropout on inputs with 1 "
- "spatial dimension, a channel dimension, and an optional batch dimension "
- "(i.e. 2D or 3D inputs).")
- is_batched = inp_dim == 3
- if not is_batched:
- input = input.unsqueeze_(0) if inplace else input.unsqueeze(0)
- result = _VF.feature_dropout_(input, p, training) if inplace else _VF.feature_dropout(input, p, training)
- if not is_batched:
- result = result.squeeze_(0) if inplace else result.squeeze(0)
- return result
- def dropout2d(input: Tensor, p: float = 0.5, training: bool = True, inplace: bool = False) -> Tensor:
- r"""Randomly zero out entire channels (a channel is a 2D feature map).
- For example, the :math:`j`-th channel of the :math:`i`-th sample in the
- batched input is a 2D tensor :math:`\text{input}[i, j]` of the input tensor.
- Each channel will be zeroed out independently on every forward call with
- probability :attr:`p` using samples from a Bernoulli distribution.
- See :class:`~torch.nn.Dropout2d` for details.
- Args:
- p: probability of a channel to be zeroed. Default: 0.5
- training: apply dropout if is ``True``. Default: ``True``
- inplace: If set to ``True``, will do this operation in-place. Default: ``False``
- """
- if has_torch_function_unary(input):
- return handle_torch_function(dropout2d, (input,), input, p=p, training=training, inplace=inplace)
- if p < 0.0 or p > 1.0:
- raise ValueError(f"dropout probability has to be between 0 and 1, but got {p}")
- inp_dim = input.dim()
- if inp_dim not in (3, 4):
- warn_msg = (f"dropout2d: Received a {inp_dim}-D input to dropout2d, which is deprecated "
- "and will result in an error in a future release. To retain the behavior "
- "and silence this warning, please use dropout instead. Note that dropout2d "
- "exists to provide channel-wise dropout on inputs with 2 spatial dimensions, "
- "a channel dimension, and an optional batch dimension (i.e. 3D or 4D inputs).")
- warnings.warn(warn_msg)
- # TODO: Properly support no-batch-dim inputs. For now, these are NOT supported; passing
- # a 3D input will perform dropout1d behavior instead. This was done historically and the
- # behavior is maintained here for now.
- # See https://github.com/pytorch/pytorch/issues/77081
- if inp_dim == 3:
- warnings.warn("dropout2d: Received a 3D input to dropout2d and assuming that channel-wise "
- "1D dropout behavior is desired - input is interpreted as shape (N, C, L), where C "
- "is the channel dim. This behavior will change in a future release to interpret the "
- "input as one without a batch dimension, i.e. shape (C, H, W). To maintain the 1D "
- "channel-wise dropout behavior, please switch to using dropout1d instead.")
- result = _VF.feature_dropout_(input, p, training) if inplace else _VF.feature_dropout(input, p, training)
- return result
- def dropout3d(input: Tensor, p: float = 0.5, training: bool = True, inplace: bool = False) -> Tensor:
- r"""Randomly zero out entire channels (a channel is a 3D feature map).
- For example, the :math:`j`-th channel of the :math:`i`-th sample in the
- batched input is a 3D tensor :math:`\text{input}[i, j]` of the input tensor.
- Each channel will be zeroed out independently on every forward call with
- probability :attr:`p` using samples from a Bernoulli distribution.
- See :class:`~torch.nn.Dropout3d` for details.
- Args:
- p: probability of a channel to be zeroed. Default: 0.5
- training: apply dropout if is ``True``. Default: ``True``
- inplace: If set to ``True``, will do this operation in-place. Default: ``False``
- """
- if has_torch_function_unary(input):
- return handle_torch_function(dropout3d, (input,), input, p=p, training=training, inplace=inplace)
- if p < 0.0 or p > 1.0:
- raise ValueError(f"dropout probability has to be between 0 and 1, but got {p}")
- inp_dim = input.dim()
- if inp_dim not in (4, 5):
- warn_msg = (f"dropout3d: Received a {inp_dim}-D input to dropout3d, which is deprecated "
- "and will result in an error in a future release. To retain the behavior "
- "and silence this warning, please use dropout instead. Note that dropout3d "
- "exists to provide channel-wise dropout on inputs with 3 spatial dimensions, "
- "a channel dimension, and an optional batch dimension (i.e. 4D or 5D inputs).")
- warnings.warn(warn_msg)
- is_batched = inp_dim == 5
- if not is_batched:
- input = input.unsqueeze_(0) if inplace else input.unsqueeze(0)
- result = _VF.feature_dropout_(input, p, training) if inplace else _VF.feature_dropout(input, p, training)
- if not is_batched:
- result = result.squeeze_(0) if inplace else result.squeeze(0)
- return result
- def feature_alpha_dropout(input: Tensor, p: float = 0.5, training: bool = False, inplace: bool = False) -> Tensor:
- r"""Randomly masks out entire channels (a channel is a feature map).
- For example, the :math:`j`-th channel of the :math:`i`-th sample in the batch input
- is a tensor :math:`\text{input}[i, j]` of the input tensor. Instead of
- setting activations to zero, as in regular Dropout, the activations are set
- to the negative saturation value of the SELU activation function.
- Each element will be masked independently on every forward call with
- probability :attr:`p` using samples from a Bernoulli distribution.
- The elements to be masked are randomized on every forward call, and scaled
- and shifted to maintain zero mean and unit variance.
- See :class:`~torch.nn.FeatureAlphaDropout` for details.
- Args:
- p: dropout probability of a channel to be zeroed. Default: 0.5
- training: apply dropout if is ``True``. Default: ``True``
- inplace: If set to ``True``, will do this operation in-place. Default: ``False``
- """
- if has_torch_function_unary(input):
- return handle_torch_function(
- feature_alpha_dropout, (input,), input, p=p, training=training, inplace=inplace
- )
- if p < 0.0 or p > 1.0:
- raise ValueError(f"dropout probability has to be between 0 and 1, but got {p}")
- return _VF.feature_alpha_dropout_(input, p, training) if inplace else _VF.feature_alpha_dropout(input, p, training)
- def _threshold(input: Tensor, threshold: float, value: float, inplace: bool = False) -> Tensor:
- r"""Apply a threshold to each element of the input Tensor.
- See :class:`~torch.nn.Threshold` for more details.
- """
- if has_torch_function_unary(input):
- return handle_torch_function(_threshold, (input,), input, threshold, value, inplace=inplace)
- if inplace:
- result = _VF.threshold_(input, threshold, value)
- else:
- result = _VF.threshold(input, threshold, value)
- return result
- # We define this function as _threshold because it takes an argument
- # named threshold, which clobbers the recursive reference to the
- # function needed for __torch_function__ support
- threshold = _threshold
- threshold_ = _add_docstr(
- _VF.threshold_,
- r"""
- threshold_(input, threshold, value) -> Tensor
- In-place version of :func:`~threshold`.
- """,
- )
- def relu(input: Tensor, inplace: bool = False) -> Tensor: # noqa: D400,D402
- r"""relu(input, inplace=False) -> Tensor
- Applies the rectified linear unit function element-wise. See
- :class:`~torch.nn.ReLU` for more details.
- """
- if has_torch_function_unary(input):
- return handle_torch_function(relu, (input,), input, inplace=inplace)
- if inplace:
- result = torch.relu_(input)
- else:
- result = torch.relu(input)
- return result
- relu_ = _add_docstr(
- torch.relu_,
- r"""
- relu_(input) -> Tensor
- In-place version of :func:`~relu`.
- """,
- )
- def glu(input: Tensor, dim: int = -1) -> Tensor: # noqa: D400,D402
- r"""
- glu(input, dim=-1) -> Tensor
- The gated linear unit. Computes:
- .. math ::
- \text{GLU}(a, b) = a \otimes \sigma(b)
- where `input` is split in half along `dim` to form `a` and `b`, :math:`\sigma`
- is the sigmoid function and :math:`\otimes` is the element-wise product between matrices.
- See `Language Modeling with Gated Convolutional Networks <https://arxiv.org/abs/1612.08083>`_.
- Args:
- input (Tensor): input tensor
- dim (int): dimension on which to split the input. Default: -1
- """
- if has_torch_function_unary(input):
- return handle_torch_function(glu, (input,), input, dim=dim)
- if input.dim() == 0:
- raise RuntimeError("glu does not support scalars because halving size must be even")
- return torch._C._nn.glu(input, dim)
- def hardtanh(input: Tensor, min_val: float = -1., max_val: float = 1., inplace: bool = False) -> Tensor: # noqa: D400,D402
- r"""
- hardtanh(input, min_val=-1., max_val=1., inplace=False) -> Tensor
- Applies the HardTanh function element-wise. See :class:`~torch.nn.Hardtanh` for more
- details.
- """
- if has_torch_function_unary(input):
- return handle_torch_function(hardtanh, (input,), input, min_val=min_val, max_val=max_val, inplace=inplace)
- if min_val > max_val:
- raise ValueError("min_val cannot be greater than max_val")
- if inplace:
- result = torch._C._nn.hardtanh_(input, min_val, max_val)
- else:
- result = torch._C._nn.hardtanh(input, min_val, max_val)
- return result
- hardtanh_ = _add_docstr(
- torch._C._nn.hardtanh_,
- r"""
- hardtanh_(input, min_val=-1., max_val=1.) -> Tensor
- In-place version of :func:`~hardtanh`.
- """,
- )
- def relu6(input: Tensor, inplace: bool = False) -> Tensor: # noqa: D400,D402
- r"""relu6(input, inplace=False) -> Tensor
- Applies the element-wise function :math:`\text{ReLU6}(x) = \min(\max(0,x), 6)`.
- See :class:`~torch.nn.ReLU6` for more details.
- """
- if has_torch_function_unary(input):
- return handle_torch_function(relu6, (input,), input, inplace=inplace)
- if inplace:
- result = torch._C._nn.relu6_(input)
- else:
- result = torch._C._nn.relu6(input)
- return result
- def elu(input: Tensor, alpha: float = 1.0, inplace: bool = False) -> Tensor:
- r"""Apply the Exponential Linear Unit (ELU) function element-wise.
- See :class:`~torch.nn.ELU` for more details.
- """
- if has_torch_function_unary(input):
- return handle_torch_function(elu, (input,), input, alpha=alpha, inplace=inplace)
- if inplace:
- result = torch._C._nn.elu_(input, alpha)
- else:
- result = torch._C._nn.elu(input, alpha)
- return result
- elu_ = _add_docstr(
- torch._C._nn.elu_,
- r"""
- elu_(input, alpha=1.) -> Tensor
- In-place version of :func:`~elu`.
- """,
- )
- def selu(input: Tensor, inplace: bool = False) -> Tensor: # noqa: D400,D402
- r"""selu(input, inplace=False) -> Tensor
- Applies element-wise,
- :math:`\text{SELU}(x) = scale * (\max(0,x) + \min(0, \alpha * (\exp(x) - 1)))`,
- with :math:`\alpha=1.6732632423543772848170429916717` and
- :math:`scale=1.0507009873554804934193349852946`.
- See :class:`~torch.nn.SELU` for more details.
- """
- if has_torch_function_unary(input):
- return handle_torch_function(selu, (input,), input, inplace=inplace)
- if inplace:
- result = torch.selu_(input)
- else:
- result = torch.selu(input)
- return result
- selu_ = _add_docstr(
- torch.selu_,
- r"""
- selu_(input) -> Tensor
- In-place version of :func:`~selu`.
- """,
- )
- def celu(input: Tensor, alpha: float = 1.0, inplace: bool = False) -> Tensor: # noqa: D400,D402
- r"""celu(input, alpha=1., inplace=False) -> Tensor
- Applies element-wise,
- :math:`\text{CELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x/\alpha) - 1))`.
- See :class:`~torch.nn.CELU` for more details.
- """
- if has_torch_function_unary(input):
- return handle_torch_function(celu, (input,), input, alpha=alpha, inplace=inplace)
- if inplace:
- result = torch.celu_(input, alpha)
- else:
- result = torch.celu(input, alpha)
- return result
- celu_ = _add_docstr(
- torch.celu_,
- r"""
- celu_(input, alpha=1.) -> Tensor
- In-place version of :func:`~celu`.
- """,
- )
- def leaky_relu(input: Tensor, negative_slope: float = 0.01, inplace: bool = False) -> Tensor: # noqa: D400,D402
- r"""
- leaky_relu(input, negative_slope=0.01, inplace=False) -> Tensor
- Applies element-wise,
- :math:`\text{LeakyReLU}(x) = \max(0, x) + \text{negative\_slope} * \min(0, x)`
- See :class:`~torch.nn.LeakyReLU` for more details.
- """
- if has_torch_function_unary(input):
- return handle_torch_function(leaky_relu, (input,), input, negative_slope=negative_slope, inplace=inplace)
- if inplace:
- result = torch._C._nn.leaky_relu_(input, negative_slope)
- else:
- result = torch._C._nn.leaky_relu(input, negative_slope)
- return result
- leaky_relu_ = _add_docstr(
- torch._C._nn.leaky_relu_,
- r"""
- leaky_relu_(input, negative_slope=0.01) -> Tensor
- In-place version of :func:`~leaky_relu`.
- """,
- )
- prelu = _add_docstr(
- torch.prelu,
- r"""prelu(input, weight) -> Tensor
- Applies element-wise the function
- :math:`\text{PReLU}(x) = \max(0,x) + \text{weight} * \min(0,x)` where weight is a
- learnable parameter.
- .. note::
- `weight` is expected to be a scalar or 1-D tensor. If `weight` is 1-D,
- its size must match the number of input channels, determined by
- `input.size(1)` when `input.dim() >= 2`, otherwise 1.
- In the 1-D case, note that when `input` has dim > 2, `weight` can be expanded
- to the shape of `input` in a way that is not possible using normal
- :ref:`broadcasting semantics<broadcasting-semantics>`.
- See :class:`~torch.nn.PReLU` for more details.
- """)
- def rrelu(
- input: Tensor, lower: float = 1.0 / 8, upper: float = 1.0 / 3, training: bool = False, inplace: bool = False
- ) -> Tensor: # noqa: D400,D402
- r"""rrelu(input, lower=1./8, upper=1./3, training=False, inplace=False) -> Tensor
- Randomized leaky ReLU.
- See :class:`~torch.nn.RReLU` for more details.
- """
- if has_torch_function_unary(input):
- return handle_torch_function(
- rrelu, (input,), input, lower=lower, upper=upper, training=training, inplace=inplace
- )
- if inplace:
- result = torch.rrelu_(input, lower, upper, training)
- else:
- result = torch.rrelu(input, lower, upper, training)
- return result
- rrelu_ = _add_docstr(
- torch.rrelu_,
- r"""
- rrelu_(input, lower=1./8, upper=1./3, training=False) -> Tensor
- In-place version of :func:`~rrelu`.
- """,
- )
- logsigmoid = _add_docstr(
- torch._C._nn.log_sigmoid,
- r"""
- logsigmoid(input) -> Tensor
- Applies element-wise :math:`\text{LogSigmoid}(x_i) = \log \left(\frac{1}{1 + \exp(-x_i)}\right)`
- See :class:`~torch.nn.LogSigmoid` for more details.
- """,
- )
- gelu = _add_docstr(
- torch._C._nn.gelu,
- r"""
- gelu(input, approximate = 'none') -> Tensor
- When the approximate argument is 'none', it applies element-wise the function
- :math:`\text{GELU}(x) = x * \Phi(x)`
- where :math:`\Phi(x)` is the Cumulative Distribution Function for Gaussian Distribution.
- When the approximate argument is 'tanh', Gelu is estimated with
- .. math::
- \text{GELU}(x) = 0.5 * x * (1 + \text{Tanh}(\sqrt{2 / \pi} * (x + 0.044715 * x^3)))
- See `Gaussian Error Linear Units (GELUs) <https://arxiv.org/abs/1606.08415>`_.
- """)
- hardshrink = _add_docstr(
- torch.hardshrink,
- r"""
- hardshrink(input, lambd=0.5) -> Tensor
- Applies the hard shrinkage function element-wise
- See :class:`~torch.nn.Hardshrink` for more details.
- """)
- def tanhshrink(input): # noqa: D400,D402
- r"""tanhshrink(input) -> Tensor
- Applies element-wise, :math:`\text{Tanhshrink}(x) = x - \text{Tanh}(x)`
- See :class:`~torch.nn.Tanhshrink` for more details.
- """
- if has_torch_function_unary(input):
- return handle_torch_function(tanhshrink, (input,), input)
- return input - input.tanh()
- def softsign(input): # noqa: D400,D402
- r"""softsign(input) -> Tensor
- Applies element-wise, the function :math:`\text{SoftSign}(x) = \frac{x}{1 + |x|}`
- See :class:`~torch.nn.Softsign` for more details.
- """
- if has_torch_function_unary(input):
- return handle_torch_function(softsign, (input,), input)
- return input / (input.abs() + 1)
- softplus = _add_docstr(
- torch._C._nn.softplus,
- r"""
- softplus(input, beta=1, threshold=20) -> Tensor
- Applies element-wise, the function :math:`\text{Softplus}(x) = \frac{1}{\beta} * \log(1 + \exp(\beta * x))`.
- For numerical stability the implementation reverts to the linear function
- when :math:`input \times \beta > threshold`.
- See :class:`~torch.nn.Softplus` for more details.
- """,
- )
- def _get_softmax_dim(name: str, ndim: int, stacklevel: int) -> int:
- warnings.warn(
- f"Implicit dimension choice for {name} has been deprecated. "
- "Change the call to include dim=X as an argument.",
- stacklevel=stacklevel,
- )
- if ndim == 0 or ndim == 1 or ndim == 3:
- ret = 0
- else:
- ret = 1
- return ret
- def softmin(input: Tensor, dim: Optional[int] = None, _stacklevel: int = 3, dtype: Optional[DType] = None) -> Tensor:
- r"""Apply a softmin function.
- Note that :math:`\text{Softmin}(x) = \text{Softmax}(-x)`. See softmax definition for mathematical formula.
- See :class:`~torch.nn.Softmin` for more details.
- Args:
- input (Tensor): input
- dim (int): A dimension along which softmin will be computed (so every slice
- along dim will sum to 1).
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- If specified, the input tensor is casted to :attr:`dtype` before the operation
- is performed. This is useful for preventing data type overflows. Default: None.
- """
- if has_torch_function_unary(input):
- return handle_torch_function(softmin, (input,), input, dim=dim, _stacklevel=_stacklevel, dtype=dtype)
- if dim is None:
- dim = _get_softmax_dim("softmin", input.dim(), _stacklevel)
- if dtype is None:
- ret = (-input).softmax(dim)
- else:
- ret = (-input).softmax(dim, dtype=dtype)
- return ret
- def softmax(input: Tensor, dim: Optional[int] = None, _stacklevel: int = 3, dtype: Optional[DType] = None) -> Tensor:
- r"""Apply a softmax function.
- Softmax is defined as:
- :math:`\text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}`
- It is applied to all slices along dim, and will re-scale them so that the elements
- lie in the range `[0, 1]` and sum to 1.
- See :class:`~torch.nn.Softmax` for more details.
- Args:
- input (Tensor): input
- dim (int): A dimension along which softmax will be computed.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- If specified, the input tensor is casted to :attr:`dtype` before the operation
- is performed. This is useful for preventing data type overflows. Default: None.
- .. note::
- This function doesn't work directly with NLLLoss,
- which expects the Log to be computed between the Softmax and itself.
- Use log_softmax instead (it's faster and has better numerical properties).
- """
- if has_torch_function_unary(input):
- return handle_torch_function(softmax, (input,), input, dim=dim, _stacklevel=_stacklevel, dtype=dtype)
- if dim is None:
- dim = _get_softmax_dim("softmax", input.dim(), _stacklevel)
- if dtype is None:
- ret = input.softmax(dim)
- else:
- ret = input.softmax(dim, dtype=dtype)
- return ret
- def gumbel_softmax(logits: Tensor, tau: float = 1, hard: bool = False, eps: float = 1e-10, dim: int = -1) -> Tensor:
- r"""
- Sample from the Gumbel-Softmax distribution (`Link 1`_ `Link 2`_) and optionally discretize.
- Args:
- logits: `[..., num_features]` unnormalized log probabilities
- tau: non-negative scalar temperature
- hard: if ``True``, the returned samples will be discretized as one-hot vectors,
- but will be differentiated as if it is the soft sample in autograd
- dim (int): A dimension along which softmax will be computed. Default: -1.
- Returns:
- Sampled tensor of same shape as `logits` from the Gumbel-Softmax distribution.
- If ``hard=True``, the returned samples will be one-hot, otherwise they will
- be probability distributions that sum to 1 across `dim`.
- .. note::
- This function is here for legacy reasons, may be removed from nn.Functional in the future.
- .. note::
- The main trick for `hard` is to do `y_hard - y_soft.detach() + y_soft`
- It achieves two things:
- - makes the output value exactly one-hot
- (since we add then subtract y_soft value)
- - makes the gradient equal to y_soft gradient
- (since we strip all other gradients)
- Examples::
- >>> logits = torch.randn(20, 32)
- >>> # Sample soft categorical using reparametrization trick:
- >>> F.gumbel_softmax(logits, tau=1, hard=False)
- >>> # Sample hard categorical using "Straight-through" trick:
- >>> F.gumbel_softmax(logits, tau=1, hard=True)
- .. _Link 1:
- https://arxiv.org/abs/1611.00712
- .. _Link 2:
- https://arxiv.org/abs/1611.01144
- """
- if has_torch_function_unary(logits):
- return handle_torch_function(gumbel_softmax, (logits,), logits, tau=tau, hard=hard, eps=eps, dim=dim)
- if eps != 1e-10:
- warnings.warn("`eps` parameter is deprecated and has no effect.")
- gumbels = (
- -torch.empty_like(logits, memory_format=torch.legacy_contiguous_format).exponential_().log()
- ) # ~Gumbel(0,1)
- gumbels = (logits + gumbels) / tau # ~Gumbel(logits,tau)
- y_soft = gumbels.softmax(dim)
- if hard:
- # Straight through.
- index = y_soft.max(dim, keepdim=True)[1]
- y_hard = torch.zeros_like(logits, memory_format=torch.legacy_contiguous_format).scatter_(dim, index, 1.0)
- ret = y_hard - y_soft.detach() + y_soft
- else:
- # Reparametrization trick.
- ret = y_soft
- return ret
- def log_softmax(input: Tensor, dim: Optional[int] = None, _stacklevel: int = 3, dtype: Optional[DType] = None) -> Tensor:
- r"""Apply a softmax followed by a logarithm.
- While mathematically equivalent to log(softmax(x)), doing these two
- operations separately is slower and numerically unstable. This function
- uses an alternative formulation to compute the output and gradient correctly.
- See :class:`~torch.nn.LogSoftmax` for more details.
- Args:
- input (Tensor): input
- dim (int): A dimension along which log_softmax will be computed.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- If specified, the input tensor is cast to :attr:`dtype` before the operation
- is performed. This is useful for preventing data type overflows. Default: None.
- """
- if has_torch_function_unary(input):
- return handle_torch_function(log_softmax, (input,), input, dim=dim, _stacklevel=_stacklevel, dtype=dtype)
- if dim is None:
- dim = _get_softmax_dim("log_softmax", input.dim(), _stacklevel)
- if dtype is None:
- ret = input.log_softmax(dim)
- else:
- ret = input.log_softmax(dim, dtype=dtype)
- return ret
- softshrink = _add_docstr(
- torch._C._nn.softshrink,
- r"""
- softshrink(input, lambd=0.5) -> Tensor
- Applies the soft shrinkage function elementwise
- See :class:`~torch.nn.Softshrink` for more details.
- """,
- )
- def tanh(input): # noqa: D400,D402
- r"""tanh(input) -> Tensor
- Applies element-wise,
- :math:`\text{Tanh}(x) = \tanh(x) = \frac{\exp(x) - \exp(-x)}{\exp(x) + \exp(-x)}`
- See :class:`~torch.nn.Tanh` for more details.
- """
- return input.tanh()
- def sigmoid(input): # noqa: D400,D402
- r"""sigmoid(input) -> Tensor
- Applies the element-wise function :math:`\text{Sigmoid}(x) = \frac{1}{1 + \exp(-x)}`
- See :class:`~torch.nn.Sigmoid` for more details.
- """
- return input.sigmoid()
- def hardsigmoid(input: Tensor, inplace: bool = False) -> Tensor:
- r"""Apply the Hardsigmoid function element-wise.
- .. math::
- \text{Hardsigmoid}(x) = \begin{cases}
- 0 & \text{if~} x \le -3, \\
- 1 & \text{if~} x \ge +3, \\
- x / 6 + 1 / 2 & \text{otherwise}
- \end{cases}
- Args:
- inplace: If set to ``True``, will do this operation in-place. Default: ``False``
- See :class:`~torch.nn.Hardsigmoid` for more details.
- """
- if has_torch_function_unary(input):
- return handle_torch_function(hardsigmoid, (input,), input, inplace=inplace)
- if inplace:
- return torch._C._nn.hardsigmoid_(input)
- return torch._C._nn.hardsigmoid(input)
- linear = _add_docstr(
- torch._C._nn.linear,
- r"""
- linear(input, weight, bias=None) -> Tensor
- Applies a linear transformation to the incoming data: :math:`y = xA^T + b`.
- This operation supports 2-D :attr:`weight` with :ref:`sparse layout<sparse-docs>`
- {sparse_beta_warning}
- This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
- Shape:
- - Input: :math:`(*, in\_features)` where `*` means any number of
- additional dimensions, including none
- - Weight: :math:`(out\_features, in\_features)` or :math:`(in\_features)`
- - Bias: :math:`(out\_features)` or :math:`()`
- - Output: :math:`(*, out\_features)` or :math:`(*)`, based on the shape of the weight
- """.format(**sparse_support_notes))
- bilinear = _add_docstr(
- torch.bilinear,
- r"""
- bilinear(input1, input2, weight, bias=None) -> Tensor
- Applies a bilinear transformation to the incoming data:
- :math:`y = x_1^T A x_2 + b`
- Shape:
- - input1: :math:`(N, *, H_{in1})` where :math:`H_{in1}=\text{in1\_features}`
- and :math:`*` means any number of additional dimensions.
- All but the last dimension of the inputs should be the same.
- - input2: :math:`(N, *, H_{in2})` where :math:`H_{in2}=\text{in2\_features}`
- - weight: :math:`(\text{out\_features}, \text{in1\_features},
- \text{in2\_features})`
- - bias: :math:`(\text{out\_features})`
- - output: :math:`(N, *, H_{out})` where :math:`H_{out}=\text{out\_features}`
- and all but the last dimension are the same shape as the input.
- """)
- def silu(input: Tensor, inplace: bool = False) -> Tensor:
- r"""Apply the Sigmoid Linear Unit (SiLU) function, element-wise.
- The SiLU function is also known as the swish function.
- .. math::
- \text{silu}(x) = x * \sigma(x), \text{where } \sigma(x) \text{ is the logistic sigmoid.}
- .. note::
- See `Gaussian Error Linear Units (GELUs) <https://arxiv.org/abs/1606.08415>`_
- where the SiLU (Sigmoid Linear Unit) was originally coined, and see
- `Sigmoid-Weighted Linear Units for Neural Network Function Approximation
- in Reinforcement Learning <https://arxiv.org/abs/1702.03118>`_ and `Swish:
- a Self-Gated Activation Function <https://arxiv.org/abs/1710.05941v1>`_
- where the SiLU was experimented with later.
- See :class:`~torch.nn.SiLU` for more details.
- """
- if has_torch_function_unary(input):
- return handle_torch_function(silu, (input,), input, inplace=inplace)
- if inplace:
- return torch._C._nn.silu_(input)
- return torch._C._nn.silu(input)
- def mish(input: Tensor, inplace: bool = False) -> Tensor:
- r"""Apply the Mish function, element-wise.
- Mish: A Self Regularized Non-Monotonic Neural Activation Function.
- .. math::
- \text{Mish}(x) = x * \text{Tanh}(\text{Softplus}(x))
- .. note::
- See `Mish: A Self Regularized Non-Monotonic Neural Activation Function <https://arxiv.org/abs/1908.08681>`_
- See :class:`~torch.nn.Mish` for more details.
- """
- if has_torch_function_unary(input):
- return handle_torch_function(mish, (input,), input, inplace=inplace)
- if inplace:
- return torch._C._nn.mish_(input)
- return torch._C._nn.mish(input)
- def hardswish(input: Tensor, inplace: bool = False) -> Tensor:
- r"""Apply hardswish function, element-wise.
- Follows implementation as described in the paper:
- `Searching for MobileNetV3`_.
- .. math::
- \text{Hardswish}(x) = \begin{cases}
- 0 & \text{if~} x \le -3, \\
- x & \text{if~} x \ge +3, \\
- x \cdot (x + 3) /6 & \text{otherwise}
- \end{cases}
- See :class:`~torch.nn.Hardswish` for more details.
- .. _`Searching for MobileNetV3`:
- https://arxiv.org/abs/1905.02244
- """
- if has_torch_function_unary(input):
- return handle_torch_function(hardswish, (input,), input, inplace=inplace)
- if inplace:
- return torch._C._nn.hardswish_(input)
- return torch._C._nn.hardswish(input)
- def _no_grad_embedding_renorm_(weight: Tensor, input: Tensor, max_norm: float, norm_type: float) -> Tuple[Tensor, Tensor]:
- torch.embedding_renorm_(weight.detach(), input, max_norm, norm_type)
- def embedding(
- input: Tensor,
- weight: Tensor,
- padding_idx: Optional[int] = None,
- max_norm: Optional[float] = None,
- norm_type: float = 2.0,
- scale_grad_by_freq: bool = False,
- sparse: bool = False,
- ) -> Tensor:
- r"""Generate a simple lookup table that looks up embeddings in a fixed dictionary and size.
- This module is often used to retrieve word embeddings using indices.
- The input to the module is a list of indices, and the embedding matrix,
- and the output is the corresponding word embeddings.
- See :class:`torch.nn.Embedding` for more details.
- .. note::
- Note that the analytical gradients of this function with respect to
- entries in :attr:`weight` at the row specified by :attr:`padding_idx`
- are expected to differ from the numerical ones.
- .. note::
- Note that `:class:`torch.nn.Embedding` differs from this function in
- that it initializes the row of :attr:`weight` specified by
- :attr:`padding_idx` to all zeros on construction.
- Args:
- input (LongTensor): Tensor containing indices into the embedding matrix
- weight (Tensor): The embedding matrix with number of rows equal to the maximum possible index + 1,
- and number of columns equal to the embedding size
- padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the gradient;
- therefore, the embedding vector at :attr:`padding_idx` is not updated during training,
- i.e. it remains as a fixed "pad".
- max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm`
- is renormalized to have norm :attr:`max_norm`.
- Note: this will modify :attr:`weight` in-place.
- norm_type (float, optional): The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``.
- scale_grad_by_freq (bool, optional): If given, this will scale gradients by the inverse of frequency of
- the words in the mini-batch. Default ``False``.
- sparse (bool, optional): If ``True``, gradient w.r.t. :attr:`weight` will be a sparse tensor. See Notes under
- :class:`torch.nn.Embedding` for more details regarding sparse gradients.
- Shape:
- - Input: LongTensor of arbitrary shape containing the indices to extract
- - Weight: Embedding matrix of floating point type with shape `(V, embedding_dim)`,
- where V = maximum index + 1 and embedding_dim = the embedding size
- - Output: `(*, embedding_dim)`, where `*` is the input shape
- Examples::
- >>> # a batch of 2 samples of 4 indices each
- >>> input = torch.tensor([[1, 2, 4, 5], [4, 3, 2, 9]])
- >>> # an embedding matrix containing 10 tensors of size 3
- >>> embedding_matrix = torch.rand(10, 3)
- >>> # xdoctest: +IGNORE_WANT("non-deterministic")
- >>> F.embedding(input, embedding_matrix)
- tensor([[[ 0.8490, 0.9625, 0.6753],
- [ 0.9666, 0.7761, 0.6108],
- [ 0.6246, 0.9751, 0.3618],
- [ 0.4161, 0.2419, 0.7383]],
- [[ 0.6246, 0.9751, 0.3618],
- [ 0.0237, 0.7794, 0.0528],
- [ 0.9666, 0.7761, 0.6108],
- [ 0.3385, 0.8612, 0.1867]]])
- >>> # example with padding_idx
- >>> weights = torch.rand(10, 3)
- >>> weights[0, :].zero_()
- >>> embedding_matrix = weights
- >>> input = torch.tensor([[0, 2, 0, 5]])
- >>> F.embedding(input, embedding_matrix, padding_idx=0)
- tensor([[[ 0.0000, 0.0000, 0.0000],
- [ 0.5609, 0.5384, 0.8720],
- [ 0.0000, 0.0000, 0.0000],
- [ 0.6262, 0.2438, 0.7471]]])
- """
- if has_torch_function_variadic(input, weight):
- return handle_torch_function(
- embedding,
- (input, weight),
- input,
- weight,
- padding_idx=padding_idx,
- max_norm=max_norm,
- norm_type=norm_type,
- scale_grad_by_freq=scale_grad_by_freq,
- sparse=sparse,
- )
- if padding_idx is not None:
- if padding_idx > 0:
- assert padding_idx < weight.size(0), "Padding_idx must be within num_embeddings"
- elif padding_idx < 0:
- assert padding_idx >= -weight.size(0), "Padding_idx must be within num_embeddings"
- padding_idx = weight.size(0) + padding_idx
- else:
- padding_idx = -1
- if max_norm is not None:
- # Note [embedding_renorm contiguous]
- # `embedding_renorm_` will call .contiguous() on input anyways, so we
- # call it here and take advantage of the improved locality in the
- # `embedding` call below too.
- input = input.contiguous()
- # Note [embedding_renorm set_grad_enabled]
- # XXX: equivalent to
- # with torch.no_grad():
- # torch.embedding_renorm_
- # remove once script supports set_grad_enabled
- _no_grad_embedding_renorm_(weight, input, max_norm, norm_type)
- return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
- def embedding_bag(
- input: Tensor,
- weight: Tensor,
- offsets: Optional[Tensor] = None,
- max_norm: Optional[float] = None,
- norm_type: float = 2,
- scale_grad_by_freq: bool = False,
- mode: str = "mean",
- sparse: bool = False,
- per_sample_weights: Optional[Tensor] = None,
- include_last_offset: bool = False,
- padding_idx: Optional[int] = None,
- ) -> Tensor:
- r"""Compute sums, means or maxes of `bags` of embeddings.
- Calculation is done without instantiating the intermediate embeddings.
- See :class:`torch.nn.EmbeddingBag` for more details.
- Note:
- {backward_reproducibility_note}
- Args:
- input (LongTensor): Tensor containing bags of indices into the embedding matrix
- weight (Tensor): The embedding matrix with number of rows equal to the maximum possible index + 1,
- and number of columns equal to the embedding size
- offsets (LongTensor, optional): Only used when :attr:`input` is 1D. :attr:`offsets` determines
- the starting index position of each bag (sequence) in :attr:`input`.
- max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm`
- is renormalized to have norm :attr:`max_norm`.
- Note: this will modify :attr:`weight` in-place.
- norm_type (float, optional): The ``p`` in the ``p``-norm to compute for the :attr:`max_norm` option.
- Default ``2``.
- scale_grad_by_freq (bool, optional): if given, this will scale gradients by the inverse of frequency of
- the words in the mini-batch. Default ``False``.
- Note: this option is not supported when ``mode="max"``.
- mode (str, optional): ``"sum"``, ``"mean"`` or ``"max"``. Specifies the way to reduce the bag.
- Default: ``"mean"``
- sparse (bool, optional): if ``True``, gradient w.r.t. :attr:`weight` will be a sparse tensor. See Notes under
- :class:`torch.nn.Embedding` for more details regarding sparse gradients.
- Note: this option is not supported when ``mode="max"``.
- per_sample_weights (Tensor, optional): a tensor of float / double weights, or None
- to indicate all weights should be taken to be 1. If specified, :attr:`per_sample_weights`
- must have exactly the same shape as input and is treated as having the same
- :attr:`offsets`, if those are not None.
- include_last_offset (bool, optional): if ``True``, the size of offsets is equal to the number of bags + 1.
- The last element is the size of the input, or the ending index position of the last bag (sequence).
- padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the
- gradient; therefore, the embedding vector at :attr:`padding_idx` is not updated
- during training, i.e. it remains as a fixed "pad". Note that the embedding
- vector at :attr:`padding_idx` is excluded from the reduction.
- Shape:
- - :attr:`input` (LongTensor) and :attr:`offsets` (LongTensor, optional)
- - If :attr:`input` is 2D of shape `(B, N)`, it will be treated as ``B`` bags (sequences)
- each of fixed length ``N``, and this will return ``B`` values aggregated in a way
- depending on the :attr:`mode`. :attr:`offsets` is ignored and required to be ``None`` in this case.
- - If :attr:`input` is 1D of shape `(N)`, it will be treated as a concatenation of
- multiple bags (sequences). :attr:`offsets` is required to be a 1D tensor containing
- the starting index positions of each bag in :attr:`input`. Therefore, for :attr:`offsets`
- of shape `(B)`, :attr:`input` will be viewed as having ``B`` bags.
- Empty bags (i.e., having 0-length) will have returned vectors filled by zeros.
- - :attr:`weight` (Tensor): the learnable weights of the module of shape `(num_embeddings, embedding_dim)`
- - :attr:`per_sample_weights` (Tensor, optional). Has the same shape as :attr:`input`.
- - :attr:`output`: aggregated embedding values of shape `(B, embedding_dim)`
- Examples::
- >>> # an Embedding module containing 10 tensors of size 3
- >>> embedding_matrix = torch.rand(10, 3)
- >>> # a batch of 2 samples of 4 indices each
- >>> input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
- >>> offsets = torch.tensor([0, 4])
- >>> # xdoctest: +IGNORE_WANT("non-deterministic")
- >>> F.embedding_bag(input, embedding_matrix, offsets)
- tensor([[ 0.3397, 0.3552, 0.5545],
- [ 0.5893, 0.4386, 0.5882]])
- >>> # example with padding_idx
- >>> embedding_matrix = torch.rand(10, 3)
- >>> input = torch.tensor([2, 2, 2, 2, 4, 3, 2, 9])
- >>> offsets = torch.tensor([0, 4])
- >>> F.embedding_bag(input, embedding_matrix, offsets, padding_idx=2, mode='sum')
- tensor([[ 0.0000, 0.0000, 0.0000],
- [-0.7082, 3.2145, -2.6251]])
- """
- if has_torch_function_variadic(input, weight, offsets, per_sample_weights):
- return handle_torch_function(
- embedding_bag,
- (input, weight, offsets, per_sample_weights),
- input,
- weight,
- offsets=offsets,
- max_norm=max_norm,
- norm_type=norm_type,
- scale_grad_by_freq=scale_grad_by_freq,
- mode=mode,
- sparse=sparse,
- per_sample_weights=per_sample_weights,
- include_last_offset=include_last_offset,
- padding_idx=padding_idx,
- )
- # Check for backward compatibility.
- # Used to be embedding_bag(weight, input, ...)
- # Now is embedding_bag(input, weight, ...)
- if weight.dtype == torch.long and input.is_floating_point():
- warnings.warn(
- "Argument order of nn.functional.embedding_bag was changed. "
- "Usage `embedding_bag(weight, input, ...)` is deprecated, "
- "and should now be `embedding_bag(input, weight, ...)`."
- )
- weight, input = input, weight
- if per_sample_weights is not None and input.size() != per_sample_weights.size():
- raise ValueError(
- f"embedding_bag: If per_sample_weights ({per_sample_weights.shape}) is not None, "
- f"then it must have the same shape as the input ({input.shape})"
- )
- if not weight.dim() == 2:
- raise ValueError(
- f"weight has to be a 2D Tensor, but got Tensor of dimension {weight.dim()}"
- )
- if input.dim() == 2:
- if offsets is not None:
- type_str = "<unknown>"
- # TODO: Remove this once script supports type() calls
- if not torch.jit.is_scripting():
- type_str = str(type(offsets))
- raise ValueError(
- "if input is 2D, then offsets has to be None"
- ", as input is treated is a mini-batch of"
- " fixed length sequences. However, found "
- f"offsets of type {type_str}"
- )
- offsets = torch.arange(0, input.numel(), input.size(1), dtype=input.dtype, device=input.device)
- input = input.reshape(-1)
- if per_sample_weights is not None:
- per_sample_weights = per_sample_weights.reshape(-1)
- elif input.dim() == 1:
- if offsets is None:
- raise ValueError("offsets has to be a 1D Tensor but got None")
- if offsets.dim() != 1:
- raise ValueError("offsets has to be a 1D Tensor")
- else:
- raise ValueError(f"input has to be 1D or 2D Tensor, but got Tensor of dimension {input.dim()}")
- if mode == "sum":
- mode_enum = 0
- elif mode == "mean":
- mode_enum = 1
- elif mode == "max":
- mode_enum = 2
- if scale_grad_by_freq:
- raise ValueError("max mode does not support scaling the gradient by the frequency")
- if sparse:
- raise ValueError("max mode does not support sparse weights")
- else:
- raise ValueError("mode has to be one of sum, mean or max")
- if max_norm is not None:
- # XXX: equivalent to
- # with torch.no_grad():
- # torch.nembedding_renorm_
- # remove once script supports set_grad_enabled
- _no_grad_embedding_renorm_(weight, input, max_norm, norm_type)
- if per_sample_weights is not None and mode != "sum":
- raise NotImplementedError(
- "embedding_bag: per_sample_weights was not None. "
- "per_sample_weights is only supported for mode='sum' "
- f"(got mode='{mode}'). Please open a feature request on GitHub."
- )
- ret, _, _, _ = torch.embedding_bag(
- weight, input, offsets, scale_grad_by_freq, mode_enum, sparse, per_sample_weights, include_last_offset, padding_idx
- )
- return ret
- if embedding_bag.__doc__:
- embedding_bag.__doc__ = embedding_bag.__doc__.format(**reproducibility_notes)
- def _verify_batch_size(size: List[int]) -> None:
- # XXX: JIT script does not support the reduce from functools, and mul op is a
- # builtin, which cannot be used as a value to a func yet, so rewrite this size
- # check to a simple equivalent for loop
- #
- # TODO: make use of reduce like below when JIT is ready with the missing features:
- # from operator import mul
- # from functools import reduce
- #
- # if reduce(mul, size[2:], size[0]) == 1
- size_prods = size[0]
- for i in range(len(size) - 2):
- size_prods *= size[i + 2]
- if size_prods == 1:
- raise ValueError(f"Expected more than 1 value per channel when training, got input size {size}")
- def batch_norm(
- input: Tensor,
- running_mean: Optional[Tensor],
- running_var: Optional[Tensor],
- weight: Optional[Tensor] = None,
- bias: Optional[Tensor] = None,
- training: bool = False,
- momentum: float = 0.1,
- eps: float = 1e-5,
- ) -> Tensor:
- r"""Apply Batch Normalization for each channel across a batch of data.
- See :class:`~torch.nn.BatchNorm1d`, :class:`~torch.nn.BatchNorm2d`,
- :class:`~torch.nn.BatchNorm3d` for details.
- """
- if has_torch_function_variadic(input, running_mean, running_var, weight, bias):
- return handle_torch_function(
- batch_norm,
- (input, running_mean, running_var, weight, bias),
- input,
- running_mean,
- running_var,
- weight=weight,
- bias=bias,
- training=training,
- momentum=momentum,
- eps=eps,
- )
- if training:
- _verify_batch_size(input.size())
- return torch.batch_norm(
- input, weight, bias, running_mean, running_var, training, momentum, eps, torch.backends.cudnn.enabled
- )
- def _verify_spatial_size(size: List[int]) -> None:
- # Verify that there is > 1 spatial element for instance norm calculation.
- size_prods = 1
- for i in range(2, len(size)):
- size_prods *= size[i]
- if size_prods == 1:
- raise ValueError(f"Expected more than 1 spatial element when training, got input size {size}")
- def instance_norm(
- input: Tensor,
- running_mean: Optional[Tensor] = None,
- running_var: Optional[Tensor] = None,
- weight: Optional[Tensor] = None,
- bias: Optional[Tensor] = None,
- use_input_stats: bool = True,
- momentum: float = 0.1,
- eps: float = 1e-5,
- ) -> Tensor:
- r"""Apply Instance Normalization independently for each channel in every data sample within a batch.
- See :class:`~torch.nn.InstanceNorm1d`, :class:`~torch.nn.InstanceNorm2d`,
- :class:`~torch.nn.InstanceNorm3d` for details.
- """
- if has_torch_function_variadic(input, running_mean, running_var, weight, bias):
- return handle_torch_function(
- instance_norm,
- (input, running_mean, running_var, weight, bias),
- input,
- running_mean=running_mean,
- running_var=running_var,
- weight=weight,
- bias=bias,
- use_input_stats=use_input_stats,
- momentum=momentum,
- eps=eps,
- )
- if use_input_stats:
- _verify_spatial_size(input.size())
- return torch.instance_norm(
- input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, torch.backends.cudnn.enabled
- )
- def layer_norm(
- input: Tensor,
- normalized_shape: List[int],
- weight: Optional[Tensor] = None,
- bias: Optional[Tensor] = None,
- eps: float = 1e-5,
- ) -> Tensor:
- r"""Apply Layer Normalization for last certain number of dimensions.
- See :class:`~torch.nn.LayerNorm` for details.
- """
- if has_torch_function_variadic(input, weight, bias):
- return handle_torch_function(
- layer_norm, (input, weight, bias), input, normalized_shape, weight=weight, bias=bias, eps=eps
- )
- return torch.layer_norm(input, normalized_shape, weight, bias, eps, torch.backends.cudnn.enabled)
- def rms_norm(
- input: Tensor,
- normalized_shape: List[int],
- weight: Optional[Tensor] = None,
- eps: Optional[float] = None,
- ) -> Tensor:
- r"""Apply Root Mean Square Layer Normalization.
- See :class:`~torch.nn.RMSNorm` for details.
- """
- if has_torch_function_variadic(input, weight):
- return handle_torch_function(
- rms_norm, (input, weight), input, normalized_shape, weight=weight, eps=eps
- )
- return torch.rms_norm(input, normalized_shape, weight, eps)
- def group_norm(
- input: Tensor, num_groups: int, weight: Optional[Tensor] = None, bias: Optional[Tensor] = None, eps: float = 1e-5
- ) -> Tensor:
- r"""Apply Group Normalization for last certain number of dimensions.
- See :class:`~torch.nn.GroupNorm` for details.
- """
- if has_torch_function_variadic(input, weight, bias):
- return handle_torch_function(group_norm, (input, weight, bias,), input, num_groups, weight=weight, bias=bias, eps=eps)
- if input.dim() < 2:
- raise RuntimeError(f"Expected at least 2 dimensions for input tensor but received {input.dim()}")
- _verify_batch_size([input.size(0) * input.size(1) // num_groups, num_groups] + list(input.size()[2:]))
- return torch.group_norm(input, num_groups, weight, bias, eps, torch.backends.cudnn.enabled)
- def local_response_norm(input: Tensor, size: int, alpha: float = 1e-4, beta: float = 0.75, k: float = 1.0) -> Tensor:
- r"""Apply local response normalization over an input signal.
- The input signal is composed of several input planes, where channels occupy the second dimension.
- Normalization is applied across channels.
- See :class:`~torch.nn.LocalResponseNorm` for details.
- """
- if has_torch_function_unary(input):
- return handle_torch_function(local_response_norm, (input,), input, size, alpha=alpha, beta=beta, k=k)
- dim = input.dim()
- if dim < 3:
- raise ValueError(
- f"Expected 3D or higher dimensionality input (got {dim} dimensions)"
- )
- if input.numel() == 0:
- return input
- div = input.mul(input)
- if dim == 3:
- div = div.unsqueeze(1)
- div = pad(div, (0, 0, size // 2, (size - 1) // 2))
- div = avg_pool2d(div, (size, 1), stride=1).squeeze(1)
- else:
- sizes = input.size()
- div = div.view(sizes[0], 1, sizes[1], sizes[2], -1)
- div = pad(div, (0, 0, 0, 0, size // 2, (size - 1) // 2))
- div = avg_pool3d(div, (size, 1, 1), stride=1).squeeze(1)
- div = div.view(sizes)
- div = div.mul(alpha).add(k).pow(beta)
- return input / div
- # loss
- def ctc_loss(
- log_probs: Tensor,
- targets: Tensor,
- input_lengths: Tensor,
- target_lengths: Tensor,
- blank: int = 0,
- reduction: str = "mean",
- zero_infinity: bool = False,
- ) -> Tensor:
- r"""Apply the Connectionist Temporal Classification loss.
- See :class:`~torch.nn.CTCLoss` for details.
- Note:
- {cudnn_reproducibility_note}
- Note:
- {backward_reproducibility_note}
- Args:
- log_probs: :math:`(T, N, C)` or :math:`(T, C)` where `C = number of characters in alphabet including blank`,
- `T = input length`, and `N = batch size`.
- The logarithmized probabilities of the outputs
- (e.g. obtained with :func:`torch.nn.functional.log_softmax`).
- targets: :math:`(N, S)` or `(sum(target_lengths))`.
- Targets cannot be blank. In the second form, the targets are assumed to be concatenated.
- input_lengths: :math:`(N)` or :math:`()`.
- Lengths of the inputs (must each be :math:`\leq T`)
- target_lengths: :math:`(N)` or :math:`()`.
- Lengths of the targets
- blank (int, optional):
- Blank label. Default :math:`0`.
- reduction (str, optional): Specifies the reduction to apply to the output:
- ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
- ``'mean'``: the output losses will be divided by the target lengths and
- then the mean over the batch is taken, ``'sum'``: the output will be
- summed. Default: ``'mean'``
- zero_infinity (bool, optional):
- Whether to zero infinite losses and the associated gradients.
- Default: ``False``
- Infinite losses mainly occur when the inputs are too short
- to be aligned to the targets.
- Example::
- >>> log_probs = torch.randn(50, 16, 20).log_softmax(2).detach().requires_grad_()
- >>> targets = torch.randint(1, 20, (16, 30), dtype=torch.long)
- >>> input_lengths = torch.full((16,), 50, dtype=torch.long)
- >>> target_lengths = torch.randint(10, 30, (16,), dtype=torch.long)
- >>> loss = F.ctc_loss(log_probs, targets, input_lengths, target_lengths)
- >>> loss.backward()
- """
- if has_torch_function_variadic(log_probs, targets, input_lengths, target_lengths):
- return handle_torch_function(
- ctc_loss,
- (log_probs, targets, input_lengths, target_lengths),
- log_probs, targets, input_lengths, target_lengths,
- blank=blank, reduction=reduction, zero_infinity=zero_infinity
- )
- return torch.ctc_loss(
- log_probs, targets, input_lengths, target_lengths, blank, _Reduction.get_enum(reduction), zero_infinity
- )
- if ctc_loss.__doc__:
- ctc_loss.__doc__ = ctc_loss.__doc__.format(**reproducibility_notes)
- def nll_loss(
- input: Tensor,
- target: Tensor,
- weight: Optional[Tensor] = None,
- size_average: Optional[bool] = None,
- ignore_index: int = -100,
- reduce: Optional[bool] = None,
- reduction: str = "mean",
- ) -> Tensor:
- r"""Compute the negative log likelihood loss.
- See :class:`~torch.nn.NLLLoss` for details.
- Args:
- input: :math:`(N, C)` where `C = number of classes` or :math:`(N, C, H, W)`
- in case of 2D Loss, or :math:`(N, C, d_1, d_2, ..., d_K)` where :math:`K \geq 1`
- in the case of K-dimensional loss. `input` is expected to be log-probabilities.
- target: :math:`(N)` where each value is :math:`0 \leq \text{targets}[i] \leq C-1`,
- or :math:`(N, d_1, d_2, ..., d_K)` where :math:`K \geq 1` for
- K-dimensional loss.
- weight (Tensor, optional): a manual rescaling weight given to each
- class. If given, has to be a Tensor of size `C`
- size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
- the losses are averaged over each loss element in the batch. Note that for
- some losses, there multiple elements per sample. If the field :attr:`size_average`
- is set to ``False``, the losses are instead summed for each minibatch. Ignored
- when reduce is ``False``. Default: ``True``
- ignore_index (int, optional): Specifies a target value that is ignored
- and does not contribute to the input gradient. When :attr:`size_average` is
- ``True``, the loss is averaged over non-ignored targets. Default: -100
- reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
- losses are averaged or summed over observations for each minibatch depending
- on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
- batch element instead and ignores :attr:`size_average`. Default: ``True``
- reduction (str, optional): Specifies the reduction to apply to the output:
- ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
- ``'mean'``: the sum of the output will be divided by the number of
- elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
- and :attr:`reduce` are in the process of being deprecated, and in the meantime,
- specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
- Example::
- >>> # input is of size N x C = 3 x 5
- >>> input = torch.randn(3, 5, requires_grad=True)
- >>> # each element in target has to have 0 <= value < C
- >>> target = torch.tensor([1, 0, 4])
- >>> output = F.nll_loss(F.log_softmax(input, dim=1), target)
- >>> output.backward()
- """
- if has_torch_function_variadic(input, target, weight):
- return handle_torch_function(
- nll_loss,
- (input, target, weight),
- input,
- target,
- weight=weight,
- size_average=size_average,
- ignore_index=ignore_index,
- reduce=reduce,
- reduction=reduction,
- )
- if size_average is not None or reduce is not None:
- reduction = _Reduction.legacy_get_string(size_average, reduce)
- return torch._C._nn.nll_loss_nd(input, target, weight, _Reduction.get_enum(reduction), ignore_index)
- def poisson_nll_loss(
- input: Tensor,
- target: Tensor,
- log_input: bool = True,
- full: bool = False,
- size_average: Optional[bool] = None,
- eps: float = 1e-8,
- reduce: Optional[bool] = None,
- reduction: str = "mean",
- ) -> Tensor:
- r"""Poisson negative log likelihood loss.
- See :class:`~torch.nn.PoissonNLLLoss` for details.
- Args:
- input: expectation of underlying Poisson distribution.
- target: random sample :math:`target \sim \text{Poisson}(input)`.
- log_input: if ``True`` the loss is computed as
- :math:`\exp(\text{input}) - \text{target} * \text{input}`, if ``False`` then loss is
- :math:`\text{input} - \text{target} * \log(\text{input}+\text{eps})`. Default: ``True``
- full: whether to compute full loss, i. e. to add the Stirling
- approximation term. Default: ``False``
- :math:`\text{target} * \log(\text{target}) - \text{target} + 0.5 * \log(2 * \pi * \text{target})`.
- size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
- the losses are averaged over each loss element in the batch. Note that for
- some losses, there multiple elements per sample. If the field :attr:`size_average`
- is set to ``False``, the losses are instead summed for each minibatch. Ignored
- when reduce is ``False``. Default: ``True``
- eps (float, optional): Small value to avoid evaluation of :math:`\log(0)` when
- :attr:`log_input`\ =\ ``False``. Default: 1e-8
- reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
- losses are averaged or summed over observations for each minibatch depending
- on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
- batch element instead and ignores :attr:`size_average`. Default: ``True``
- reduction (str, optional): Specifies the reduction to apply to the output:
- ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
- ``'mean'``: the sum of the output will be divided by the number of
- elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
- and :attr:`reduce` are in the process of being deprecated, and in the meantime,
- specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
- """
- if has_torch_function_variadic(input, target):
- return handle_torch_function(
- poisson_nll_loss,
- (input, target),
- input,
- target,
- log_input=log_input,
- full=full,
- size_average=size_average,
- eps=eps,
- reduce=reduce,
- reduction=reduction,
- )
- if size_average is not None or reduce is not None:
- reduction = _Reduction.legacy_get_string(size_average, reduce)
- if reduction != "none" and reduction != "mean" and reduction != "sum":
- ret = input
- raise ValueError(reduction + " is not a valid value for reduction")
- ret = torch.poisson_nll_loss(input, target, log_input, full, eps, _Reduction.get_enum(reduction))
- return ret
- def gaussian_nll_loss(
- input: Tensor,
- target: Tensor,
- var: Tensor,
- full: bool = False,
- eps: float = 1e-6,
- reduction: str = "mean",
- ) -> Tensor:
- r"""Gaussian negative log likelihood loss.
- See :class:`~torch.nn.GaussianNLLLoss` for details.
- Args:
- input: expectation of the Gaussian distribution.
- target: sample from the Gaussian distribution.
- var: tensor of positive variance(s), one for each of the expectations
- in the input (heteroscedastic), or a single one (homoscedastic).
- full (bool, optional): include the constant term in the loss calculation. Default: ``False``.
- eps (float, optional): value added to var, for stability. Default: 1e-6.
- reduction (str, optional): specifies the reduction to apply to the output:
- ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
- ``'mean'``: the output is the average of all batch member losses,
- ``'sum'``: the output is the sum of all batch member losses.
- Default: ``'mean'``.
- """
- if has_torch_function_variadic(input, target, var):
- return handle_torch_function(
- gaussian_nll_loss,
- (input, target, var),
- input,
- target,
- var,
- full=full,
- eps=eps,
- reduction=reduction,
- )
- # Check var size
- # If var.size == input.size, the case is heteroscedastic and no further checks are needed.
- # Otherwise:
- if var.size() != input.size():
- # If var is one dimension short of input, but the sizes match otherwise, then this is a homoscedastic case.
- # e.g. input.size = (10, 2, 3), var.size = (10, 2)
- # -> unsqueeze var so that var.shape = (10, 2, 1)
- # this is done so that broadcasting can happen in the loss calculation
- if input.size()[:-1] == var.size():
- var = torch.unsqueeze(var, -1)
- # This checks if the sizes match up to the final dimension, and the final dimension of var is of size 1.
- # This is also a homoscedastic case.
- # e.g. input.size = (10, 2, 3), var.size = (10, 2, 1)
- elif input.size()[:-1] == var.size()[:-1] and var.size(-1) == 1: # Heteroscedastic case
- pass
- # If none of the above pass, then the size of var is incorrect.
- else:
- raise ValueError("var is of incorrect size")
- # Check validity of reduction mode
- if reduction != 'none' and reduction != 'mean' and reduction != 'sum':
- raise ValueError(reduction + " is not valid")
- # Entries of var must be non-negative
- if torch.any(var < 0):
- raise ValueError("var has negative entry/entries")
- # Clamp for stability
- var = var.clone()
- with torch.no_grad():
- var.clamp_(min=eps)
- # Calculate the loss
- loss = 0.5 * (torch.log(var) + (input - target)**2 / var)
- if full:
- loss += 0.5 * math.log(2 * math.pi)
- if reduction == 'mean':
- return loss.mean()
- elif reduction == 'sum':
- return loss.sum()
- else:
- return loss
- def kl_div(
- input: Tensor,
- target: Tensor,
- size_average: Optional[bool] = None,
- reduce: Optional[bool] = None,
- reduction: str = "mean",
- log_target: bool = False,
- ) -> Tensor:
- r"""Compute the KL Divergence loss.
- Refer - The `Kullback-Leibler divergence Loss
- <https://en.wikipedia.org/wiki/Kullback-Leibler_divergence>`__
- See :class:`~torch.nn.KLDivLoss` for details.
- Args:
- input: Tensor of arbitrary shape in log-probabilities.
- target: Tensor of the same shape as input. See :attr:`log_target` for
- the target's interpretation.
- size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
- the losses are averaged over each loss element in the batch. Note that for
- some losses, there multiple elements per sample. If the field :attr:`size_average`
- is set to ``False``, the losses are instead summed for each minibatch. Ignored
- when reduce is ``False``. Default: ``True``
- reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
- losses are averaged or summed over observations for each minibatch depending
- on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
- batch element instead and ignores :attr:`size_average`. Default: ``True``
- reduction (str, optional): Specifies the reduction to apply to the output:
- ``'none'`` | ``'batchmean'`` | ``'sum'`` | ``'mean'``.
- ``'none'``: no reduction will be applied
- ``'batchmean'``: the sum of the output will be divided by the batchsize
- ``'sum'``: the output will be summed
- ``'mean'``: the output will be divided by the number of elements in the output
- Default: ``'mean'``
- log_target (bool): A flag indicating whether ``target`` is passed in the log space.
- It is recommended to pass certain distributions (like ``softmax``)
- in the log space to avoid numerical issues caused by explicit ``log``.
- Default: ``False``
- .. note::
- :attr:`size_average` and :attr:`reduce` are in the process of being deprecated,
- and in the meantime, specifying either of those two args will override :attr:`reduction`.
- .. warning::
- :attr:`reduction` = ``'mean'`` doesn't return the true kl divergence value, please use
- :attr:`reduction` = ``'batchmean'`` which aligns with KL math definition.
- """
- if has_torch_function_variadic(input, target):
- return handle_torch_function(
- kl_div,
- (input, target),
- input,
- target,
- size_average=size_average,
- reduce=reduce,
- reduction=reduction,
- log_target=log_target,
- )
- if size_average is not None or reduce is not None:
- reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
- else:
- if reduction == "mean":
- warnings.warn(
- "reduction: 'mean' divides the total loss by both the batch size and the support size."
- "'batchmean' divides only by the batch size, and aligns with the KL div math definition."
- "'mean' will be changed to behave the same as 'batchmean' in the next major release."
- )
- # special case for batchmean
- if reduction == "batchmean":
- reduction_enum = _Reduction.get_enum("sum")
- else:
- reduction_enum = _Reduction.get_enum(reduction)
- reduced = torch.kl_div(input, target, reduction_enum, log_target=log_target)
- if reduction == "batchmean" and input.dim() != 0:
- reduced = reduced / input.size()[0]
- return reduced
- def cross_entropy(
- input: Tensor,
- target: Tensor,
- weight: Optional[Tensor] = None,
- size_average: Optional[bool] = None,
- ignore_index: int = -100,
- reduce: Optional[bool] = None,
- reduction: str = "mean",
- label_smoothing: float = 0.0,
- ) -> Tensor:
- r"""Compute the cross entropy loss between input logits and target.
- See :class:`~torch.nn.CrossEntropyLoss` for details.
- Args:
- input (Tensor) : Predicted unnormalized logits;
- see Shape section below for supported shapes.
- target (Tensor) : Ground truth class indices or class probabilities;
- see Shape section below for supported shapes.
- weight (Tensor, optional): a manual rescaling weight given to each
- class. If given, has to be a Tensor of size `C`
- size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
- the losses are averaged over each loss element in the batch. Note that for
- some losses, there multiple elements per sample. If the field :attr:`size_average`
- is set to ``False``, the losses are instead summed for each minibatch. Ignored
- when reduce is ``False``. Default: ``True``
- ignore_index (int, optional): Specifies a target value that is ignored
- and does not contribute to the input gradient. When :attr:`size_average` is
- ``True``, the loss is averaged over non-ignored targets. Note that
- :attr:`ignore_index` is only applicable when the target contains class indices.
- Default: -100
- reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
- losses are averaged or summed over observations for each minibatch depending
- on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
- batch element instead and ignores :attr:`size_average`. Default: ``True``
- reduction (str, optional): Specifies the reduction to apply to the output:
- ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
- ``'mean'``: the sum of the output will be divided by the number of
- elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
- and :attr:`reduce` are in the process of being deprecated, and in the meantime,
- specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
- label_smoothing (float, optional): A float in [0.0, 1.0]. Specifies the amount
- of smoothing when computing the loss, where 0.0 means no smoothing. The targets
- become a mixture of the original ground truth and a uniform distribution as described in
- `Rethinking the Inception Architecture for Computer Vision <https://arxiv.org/abs/1512.00567>`__. Default: :math:`0.0`.
- Shape:
- - Input: Shape :math:`(C)`, :math:`(N, C)` or :math:`(N, C, d_1, d_2, ..., d_K)` with :math:`K \geq 1`
- in the case of `K`-dimensional loss.
- - Target: If containing class indices, shape :math:`()`, :math:`(N)` or :math:`(N, d_1, d_2, ..., d_K)` with
- :math:`K \geq 1` in the case of K-dimensional loss where each value should be between :math:`[0, C)`.
- If containing class probabilities, same shape as the input and each value should be between :math:`[0, 1]`.
- where:
- .. math::
- \begin{aligned}
- C ={} & \text{number of classes} \\
- N ={} & \text{batch size} \\
- \end{aligned}
- Examples::
- >>> # Example of target with class indices
- >>> input = torch.randn(3, 5, requires_grad=True)
- >>> target = torch.randint(5, (3,), dtype=torch.int64)
- >>> loss = F.cross_entropy(input, target)
- >>> loss.backward()
- >>>
- >>> # Example of target with class probabilities
- >>> input = torch.randn(3, 5, requires_grad=True)
- >>> target = torch.randn(3, 5).softmax(dim=1)
- >>> loss = F.cross_entropy(input, target)
- >>> loss.backward()
- """
- if has_torch_function_variadic(input, target, weight):
- return handle_torch_function(
- cross_entropy,
- (input, target, weight),
- input,
- target,
- weight=weight,
- size_average=size_average,
- ignore_index=ignore_index,
- reduce=reduce,
- reduction=reduction,
- label_smoothing=label_smoothing,
- )
- if size_average is not None or reduce is not None:
- reduction = _Reduction.legacy_get_string(size_average, reduce)
- return torch._C._nn.cross_entropy_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index, label_smoothing)
- def binary_cross_entropy(
- input: Tensor,
- target: Tensor,
- weight: Optional[Tensor] = None,
- size_average: Optional[bool] = None,
- reduce: Optional[bool] = None,
- reduction: str = "mean",
- ) -> Tensor:
- r"""Measure Binary Cross Entropy between the target and input probabilities.
- See :class:`~torch.nn.BCELoss` for details.
- Args:
- input: Tensor of arbitrary shape as probabilities.
- target: Tensor of the same shape as input with values between 0 and 1.
- weight (Tensor, optional): a manual rescaling weight
- if provided it's repeated to match input tensor shape
- size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
- the losses are averaged over each loss element in the batch. Note that for
- some losses, there multiple elements per sample. If the field :attr:`size_average`
- is set to ``False``, the losses are instead summed for each minibatch. Ignored
- when reduce is ``False``. Default: ``True``
- reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
- losses are averaged or summed over observations for each minibatch depending
- on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
- batch element instead and ignores :attr:`size_average`. Default: ``True``
- reduction (str, optional): Specifies the reduction to apply to the output:
- ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
- ``'mean'``: the sum of the output will be divided by the number of
- elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
- and :attr:`reduce` are in the process of being deprecated, and in the meantime,
- specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
- Examples::
- >>> input = torch.randn(3, 2, requires_grad=True)
- >>> target = torch.rand(3, 2, requires_grad=False)
- >>> loss = F.binary_cross_entropy(torch.sigmoid(input), target)
- >>> loss.backward()
- """
- if has_torch_function_variadic(input, target, weight):
- return handle_torch_function(
- binary_cross_entropy,
- (input, target, weight),
- input,
- target,
- weight=weight,
- size_average=size_average,
- reduce=reduce,
- reduction=reduction,
- )
- if size_average is not None or reduce is not None:
- reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
- else:
- reduction_enum = _Reduction.get_enum(reduction)
- if target.size() != input.size():
- raise ValueError(
- f"Using a target size ({target.size()}) that is different to the input size ({input.size()}) is deprecated. "
- "Please ensure they have the same size."
- )
- if weight is not None:
- new_size = _infer_size(target.size(), weight.size())
- weight = weight.expand(new_size)
- return torch._C._nn.binary_cross_entropy(input, target, weight, reduction_enum)
- def binary_cross_entropy_with_logits(
- input: Tensor,
- target: Tensor,
- weight: Optional[Tensor] = None,
- size_average: Optional[bool] = None,
- reduce: Optional[bool] = None,
- reduction: str = "mean",
- pos_weight: Optional[Tensor] = None,
- ) -> Tensor:
- r"""Calculate Binary Cross Entropy between target and input logits.
- See :class:`~torch.nn.BCEWithLogitsLoss` for details.
- Args:
- input: Tensor of arbitrary shape as unnormalized scores (often referred to as logits).
- target: Tensor of the same shape as input with values between 0 and 1
- weight (Tensor, optional): a manual rescaling weight
- if provided it's repeated to match input tensor shape
- size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
- the losses are averaged over each loss element in the batch. Note that for
- some losses, there multiple elements per sample. If the field :attr:`size_average`
- is set to ``False``, the losses are instead summed for each minibatch. Ignored
- when reduce is ``False``. Default: ``True``
- reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
- losses are averaged or summed over observations for each minibatch depending
- on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
- batch element instead and ignores :attr:`size_average`. Default: ``True``
- reduction (str, optional): Specifies the reduction to apply to the output:
- ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
- ``'mean'``: the sum of the output will be divided by the number of
- elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
- and :attr:`reduce` are in the process of being deprecated, and in the meantime,
- specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
- pos_weight (Tensor, optional): a weight of positive examples to be broadcasted with target.
- Must be a tensor with equal size along the class dimension to the number of classes.
- Pay close attention to PyTorch's broadcasting semantics in order to achieve the desired
- operations. For a target of size [B, C, H, W] (where B is batch size) pos_weight of
- size [B, C, H, W] will apply different pos_weights to each element of the batch or
- [C, H, W] the same pos_weights across the batch. To apply the same positive weight
- along all spatial dimensions for a 2D multi-class target [C, H, W] use: [C, 1, 1].
- Default: ``None``
- Examples::
- >>> input = torch.randn(3, requires_grad=True)
- >>> target = torch.empty(3).random_(2)
- >>> loss = F.binary_cross_entropy_with_logits(input, target)
- >>> loss.backward()
- """
- if has_torch_function_variadic(input, target, weight, pos_weight):
- return handle_torch_function(
- binary_cross_entropy_with_logits,
- (input, target, weight, pos_weight),
- input,
- target,
- weight=weight,
- size_average=size_average,
- reduce=reduce,
- reduction=reduction,
- pos_weight=pos_weight,
- )
- if size_average is not None or reduce is not None:
- reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
- else:
- reduction_enum = _Reduction.get_enum(reduction)
- if not (target.size() == input.size()):
- raise ValueError(f"Target size ({target.size()}) must be the same as input size ({input.size()})")
- return torch.binary_cross_entropy_with_logits(input, target, weight, pos_weight, reduction_enum)
- def smooth_l1_loss(
- input: Tensor,
- target: Tensor,
- size_average: Optional[bool] = None,
- reduce: Optional[bool] = None,
- reduction: str = "mean",
- beta: float = 1.0,
- ) -> Tensor:
- r"""Compute the Smooth L1 loss.
- Function uses a squared term if the absolute
- element-wise error falls below beta and an L1 term otherwise.
- See :class:`~torch.nn.SmoothL1Loss` for details.
- """
- if has_torch_function_variadic(input, target):
- return handle_torch_function(
- smooth_l1_loss,
- (input, target),
- input,
- target,
- size_average=size_average,
- reduce=reduce,
- reduction=reduction,
- beta=beta,
- )
- if not (target.size() == input.size()):
- warnings.warn(
- f"Using a target size ({target.size()}) that is different to the input size ({input.size()}). "
- "This will likely lead to incorrect results due to broadcasting. "
- "Please ensure they have the same size.",
- stacklevel=2,
- )
- if size_average is not None or reduce is not None:
- reduction = _Reduction.legacy_get_string(size_average, reduce)
- expanded_input, expanded_target = torch.broadcast_tensors(input, target)
- if beta == 0.0:
- return torch._C._nn.l1_loss(expanded_input, expanded_target, _Reduction.get_enum(reduction))
- else:
- return torch._C._nn.smooth_l1_loss(expanded_input, expanded_target, _Reduction.get_enum(reduction), beta)
- def huber_loss(
- input: Tensor,
- target: Tensor,
- reduction: str = 'mean',
- delta: float = 1.0,
- ) -> Tensor:
- r"""Compute the Huber loss.
- Function uses a squared term if the absolute
- element-wise error falls below delta and a delta-scaled L1 term otherwise.
- When delta equals 1, this loss is equivalent to SmoothL1Loss.
- In general, Huber loss differs from SmoothL1Loss by a factor of delta (AKA beta in Smooth L1).
- See :class:`~torch.nn.HuberLoss` for details.
- """
- if has_torch_function_variadic(input, target):
- return handle_torch_function(
- huber_loss,
- (input, target),
- input,
- target,
- reduction=reduction,
- delta=delta,
- )
- if not (target.size() == input.size()):
- warnings.warn(f"Using a target size ({target.size()}) that is different to the input size ({input.size()}). "
- "This will likely lead to incorrect results due to broadcasting. "
- "Please ensure they have the same size.",
- stacklevel=2)
- expanded_input, expanded_target = torch.broadcast_tensors(input, target)
- return torch._C._nn.huber_loss(expanded_input, expanded_target, _Reduction.get_enum(reduction), delta)
- def l1_loss(
- input: Tensor,
- target: Tensor,
- size_average: Optional[bool] = None,
- reduce: Optional[bool] = None,
- reduction: str = "mean",
- ) -> Tensor: # noqa: D400,D402
- r"""l1_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor
- Function that takes the mean element-wise absolute value difference.
- See :class:`~torch.nn.L1Loss` for details.
- """
- if has_torch_function_variadic(input, target):
- return handle_torch_function(
- l1_loss, (input, target), input, target, size_average=size_average, reduce=reduce, reduction=reduction
- )
- if not (target.size() == input.size()):
- warnings.warn(
- f"Using a target size ({target.size()}) that is different to the input size ({input.size()}). "
- "This will likely lead to incorrect results due to broadcasting. "
- "Please ensure they have the same size.",
- stacklevel=2,
- )
- if size_average is not None or reduce is not None:
- reduction = _Reduction.legacy_get_string(size_average, reduce)
- expanded_input, expanded_target = torch.broadcast_tensors(input, target)
- return torch._C._nn.l1_loss(expanded_input, expanded_target, _Reduction.get_enum(reduction))
- def mse_loss(
- input: Tensor,
- target: Tensor,
- size_average: Optional[bool] = None,
- reduce: Optional[bool] = None,
- reduction: str = "mean",
- ) -> Tensor: # noqa: D400,D402
- r"""mse_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor
- Measures the element-wise mean squared error.
- See :class:`~torch.nn.MSELoss` for details.
- """
- if has_torch_function_variadic(input, target):
- return handle_torch_function(
- mse_loss, (input, target), input, target, size_average=size_average, reduce=reduce, reduction=reduction
- )
- if not (target.size() == input.size()):
- warnings.warn(
- f"Using a target size ({target.size()}) that is different to the input size ({input.size()}). "
- "This will likely lead to incorrect results due to broadcasting. "
- "Please ensure they have the same size.",
- stacklevel=2,
- )
- if size_average is not None or reduce is not None:
- reduction = _Reduction.legacy_get_string(size_average, reduce)
- expanded_input, expanded_target = torch.broadcast_tensors(input, target)
- return torch._C._nn.mse_loss(expanded_input, expanded_target, _Reduction.get_enum(reduction))
- def margin_ranking_loss(
- input1: Tensor,
- input2: Tensor,
- target: Tensor,
- margin: float = 0,
- size_average: Optional[bool] = None,
- reduce: Optional[bool] = None,
- reduction: str = "mean",
- ) -> Tensor: # noqa: D400,D402
- r"""margin_ranking_loss(input1, input2, target, margin=0, size_average=None, reduce=None, reduction='mean') -> Tensor
- See :class:`~torch.nn.MarginRankingLoss` for details.
- """
- if has_torch_function_variadic(input1, input2, target):
- return handle_torch_function(
- margin_ranking_loss,
- (input1, input2, target),
- input1,
- input2,
- target,
- margin=margin,
- size_average=size_average,
- reduce=reduce,
- reduction=reduction,
- )
- if size_average is not None or reduce is not None:
- reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
- else:
- reduction_enum = _Reduction.get_enum(reduction)
- if (input1.dim() != input2.dim() or input1.dim() != target.dim()):
- raise RuntimeError(
- f"margin_ranking_loss : All input tensors should have same dimension but got sizes: "
- f"input1: {input1.size()}, input2: {input2.size()}, target: {target.size()} "
- )
- return torch.margin_ranking_loss(input1, input2, target, margin, reduction_enum)
- def hinge_embedding_loss(
- input: Tensor,
- target: Tensor,
- margin: float = 1.0,
- size_average: Optional[bool] = None,
- reduce: Optional[bool] = None,
- reduction: str = "mean",
- ) -> Tensor: # noqa: D400,D402
- r"""hinge_embedding_loss(input, target, margin=1.0, size_average=None, reduce=None, reduction='mean') -> Tensor
- See :class:`~torch.nn.HingeEmbeddingLoss` for details.
- """
- if has_torch_function_variadic(input, target):
- return handle_torch_function(
- hinge_embedding_loss,
- (input, target),
- input,
- target,
- margin=margin,
- size_average=size_average,
- reduce=reduce,
- reduction=reduction,
- )
- if size_average is not None or reduce is not None:
- reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
- else:
- reduction_enum = _Reduction.get_enum(reduction)
- return torch.hinge_embedding_loss(input, target, margin, reduction_enum)
- def multilabel_margin_loss(
- input: Tensor,
- target: Tensor,
- size_average: Optional[bool] = None,
- reduce: Optional[bool] = None,
- reduction: str = "mean",
- ) -> Tensor: # noqa: D400,D402
- r"""multilabel_margin_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor
- See :class:`~torch.nn.MultiLabelMarginLoss` for details.
- """
- if has_torch_function_variadic(input, target):
- return handle_torch_function(
- multilabel_margin_loss,
- (input, target),
- input,
- target,
- size_average=size_average,
- reduce=reduce,
- reduction=reduction,
- )
- if size_average is not None or reduce is not None:
- reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
- else:
- reduction_enum = _Reduction.get_enum(reduction)
- return torch._C._nn.multilabel_margin_loss(input, target, reduction_enum)
- def soft_margin_loss(
- input: Tensor,
- target: Tensor,
- size_average: Optional[bool] = None,
- reduce: Optional[bool] = None,
- reduction: str = "mean",
- ) -> Tensor: # noqa: D400,D402
- r"""
- soft_margin_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor
- See :class:`~torch.nn.SoftMarginLoss` for details.
- """
- if has_torch_function_variadic(input, target):
- return handle_torch_function(
- soft_margin_loss, (input, target), input, target, size_average=size_average, reduce=reduce, reduction=reduction
- )
- if size_average is not None or reduce is not None:
- reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
- else:
- reduction_enum = _Reduction.get_enum(reduction)
- return torch._C._nn.soft_margin_loss(input, target, reduction_enum)
- def multilabel_soft_margin_loss(
- input: Tensor,
- target: Tensor,
- weight: Optional[Tensor] = None,
- size_average: Optional[bool] = None,
- reduce: Optional[bool] = None,
- reduction: str = "mean",
- ) -> Tensor: # noqa: D400,D402
- r"""multilabel_soft_margin_loss(input, target, weight=None, size_average=None, reduce=None, reduction='mean') -> Tensor
- See :class:`~torch.nn.MultiLabelSoftMarginLoss` for details.
- """
- if has_torch_function_variadic(input, target, weight):
- return handle_torch_function(
- multilabel_soft_margin_loss,
- (input, target, weight),
- input,
- target,
- weight=weight,
- size_average=size_average,
- reduce=reduce,
- reduction=reduction,
- )
- if size_average is not None or reduce is not None:
- reduction = _Reduction.legacy_get_string(size_average, reduce)
- loss = -(target * logsigmoid(input) + (1 - target) * logsigmoid(-input))
- if weight is not None:
- loss = loss * weight
- class_dim = input.dim() - 1
- C = input.size(class_dim)
- loss = loss.sum(dim=class_dim) / C # only return N loss values
- if reduction == "none":
- ret = loss
- elif reduction == "mean":
- ret = loss.mean()
- elif reduction == "sum":
- ret = loss.sum()
- else:
- ret = input
- raise ValueError(reduction + " is not valid")
- return ret
- def cosine_embedding_loss(
- input1: Tensor,
- input2: Tensor,
- target: Tensor,
- margin: float = 0,
- size_average: Optional[bool] = None,
- reduce: Optional[bool] = None,
- reduction: str = "mean",
- ) -> Tensor: # noqa: D400,D402
- r"""cosine_embedding_loss(input1, input2, target, margin=0, size_average=None, reduce=None, reduction='mean') -> Tensor
- See :class:`~torch.nn.CosineEmbeddingLoss` for details.
- """
- if has_torch_function_variadic(input1, input2, target):
- return handle_torch_function(
- cosine_embedding_loss,
- (input1, input2, target),
- input1,
- input2,
- target,
- margin=margin,
- size_average=size_average,
- reduce=reduce,
- reduction=reduction,
- )
- if size_average is not None or reduce is not None:
- reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
- else:
- reduction_enum = _Reduction.get_enum(reduction)
- return torch.cosine_embedding_loss(input1, input2, target, margin, reduction_enum)
- def multi_margin_loss(
- input: Tensor,
- target: Tensor,
- p: int = 1,
- margin: float = 1.0,
- weight: Optional[Tensor] = None,
- size_average: Optional[bool] = None,
- reduce: Optional[bool] = None,
- reduction: str = "mean",
- ) -> Tensor: # noqa: D400,D402
- r"""multi_margin_loss(input, target, p=1, margin=1, weight=None, size_average=None, reduce=None, reduction='mean') -> Tensor
- See :class:`~torch.nn.MultiMarginLoss` for details.
- """
- if has_torch_function_variadic(input, target, weight):
- return handle_torch_function(
- multi_margin_loss,
- (input, target, weight),
- input,
- target,
- p=p,
- margin=margin,
- weight=weight,
- size_average=size_average,
- reduce=reduce,
- reduction=reduction,
- )
- if size_average is not None or reduce is not None:
- reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
- else:
- reduction_enum = _Reduction.get_enum(reduction)
- if p != 1 and p != 2:
- raise ValueError("only p == 1 and p == 2 supported")
- if weight is not None:
- if weight.dim() != 1:
- raise ValueError("weight must be one-dimensional")
- return torch._C._nn.multi_margin_loss(input, target, p, margin, weight, reduction_enum)
- pixel_shuffle = _add_docstr(
- torch.pixel_shuffle,
- r"""
- pixel_shuffle(input, upscale_factor) -> Tensor
- Rearranges elements in a tensor of shape :math:`(*, C \times r^2, H, W)` to a
- tensor of shape :math:`(*, C, H \times r, W \times r)`, where r is the :attr:`upscale_factor`.
- See :class:`~torch.nn.PixelShuffle` for details.
- Args:
- input (Tensor): the input tensor
- upscale_factor (int): factor to increase spatial resolution by
- Examples::
- >>> input = torch.randn(1, 9, 4, 4)
- >>> output = torch.nn.functional.pixel_shuffle(input, 3)
- >>> print(output.size())
- torch.Size([1, 1, 12, 12])
- """,
- )
- pixel_unshuffle = _add_docstr(
- torch.pixel_unshuffle,
- r"""
- pixel_unshuffle(input, downscale_factor) -> Tensor
- Reverses the :class:`~torch.nn.PixelShuffle` operation by rearranging elements in a
- tensor of shape :math:`(*, C, H \times r, W \times r)` to a tensor of shape
- :math:`(*, C \times r^2, H, W)`, where r is the :attr:`downscale_factor`.
- See :class:`~torch.nn.PixelUnshuffle` for details.
- Args:
- input (Tensor): the input tensor
- downscale_factor (int): factor to increase spatial resolution by
- Examples::
- >>> input = torch.randn(1, 1, 12, 12)
- >>> output = torch.nn.functional.pixel_unshuffle(input, 3)
- >>> print(output.size())
- torch.Size([1, 9, 4, 4])
- """,
- )
- channel_shuffle = _add_docstr(
- torch.channel_shuffle,
- r"""
- channel_shuffle(input, groups) -> Tensor
- Divide the channels in a tensor of shape :math:`(*, C , H, W)`
- into g groups and rearrange them as :math:`(*, C \frac g, g, H, W)`,
- while keeping the original tensor shape.
- See :class:`~torch.nn.ChannelShuffle` for details.
- Args:
- input (Tensor): the input tensor
- groups (int): number of groups to divide channels in and rearrange.
- Examples::
- >>> input = torch.randn(1, 4, 2, 2)
- >>> print(input)
- [[[[1, 2],
- [3, 4]],
- [[5, 6],
- [7, 8]],
- [[9, 10],
- [11, 12]],
- [[13, 14],
- [15, 16]],
- ]]
- >>> output = torch.nn.functional.channel_shuffle(input, 2)
- >>> print(output)
- [[[[1, 2],
- [3, 4]],
- [[9, 10],
- [11, 12]],
- [[5, 6],
- [7, 8]],
- [[13, 14],
- [15, 16]],
- ]]
- """,
- )
- native_channel_shuffle = _add_docstr(
- torch.native_channel_shuffle,
- r"""
- native_channel_shuffle(input, groups) -> Tensor
- Native kernel level implementation of the `channel_shuffle`.
- This function might become private in future releases, use with caution.
- Divide the channels in a tensor of shape :math:`(*, C , H, W)`
- into g groups and rearrange them as :math:`(*, C \frac g, g, H, W)`,
- while keeping the original tensor shape.
- See :class:`~torch.nn.ChannelShuffle` for details.
- Args:
- input (Tensor): the input tensor
- groups (int): number of groups to divide channels in and rearrange.
- Examples::
- >>> input = torch.randn(1, 4, 2, 2)
- >>> print(input)
- [[[[1, 2],
- [3, 4]],
- [[5, 6],
- [7, 8]],
- [[9, 10],
- [11, 12]],
- [[13, 14],
- [15, 16]],
- ]]
- >>> output = torch.nn.functional.native_channel_shuffle(input, 2)
- >>> print(output)
- [[[[1, 2],
- [3, 4]],
- [[9, 10],
- [11, 12]],
- [[5, 6],
- [7, 8]],
- [[13, 14],
- [15, 16]],
- ]]
- """,
- )
- @_overload # noqa: F811
- def upsample(input: Tensor, size: Optional[int] = None, scale_factor: Optional[float] = None, mode: str = "nearest", align_corners: Optional[bool] = None) -> Tensor: # noqa: F811,B950
- pass
- @_overload # noqa: F811
- def upsample(input: Tensor, size: Optional[List[int]] = None, scale_factor: Optional[float] = None, mode: str = "nearest", align_corners: Optional[bool] = None) -> Tensor: # noqa: F811,B950
- pass
- def upsample(input, size=None, scale_factor=None, mode="nearest", align_corners=None): # noqa: F811
- r"""Upsample input.
- Provided tensor is upsampled to either the given :attr:`size` or the given
- :attr:`scale_factor`
- .. warning::
- This function is deprecated in favor of :func:`torch.nn.functional.interpolate`.
- This is equivalent with ``nn.functional.interpolate(...)``.
- Note:
- {backward_reproducibility_note}
- The algorithm used for upsampling is determined by :attr:`mode`.
- Currently temporal, spatial and volumetric upsampling are supported, i.e.
- expected inputs are 3-D, 4-D or 5-D in shape.
- The input dimensions are interpreted in the form:
- `mini-batch x channels x [optional depth] x [optional height] x width`.
- The modes available for upsampling are: `nearest`, `linear` (3D-only),
- `bilinear`, `bicubic` (4D-only), `trilinear` (5D-only)
- Args:
- input (Tensor): the input tensor
- size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int]):
- output spatial size.
- scale_factor (float or Tuple[float]): multiplier for spatial size. Has to match input size if it is a tuple.
- mode (str): algorithm used for upsampling:
- ``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` |
- ``'trilinear'``. Default: ``'nearest'``
- align_corners (bool, optional): Geometrically, we consider the pixels of the
- input and output as squares rather than points.
- If set to ``True``, the input and output tensors are aligned by the
- center points of their corner pixels, preserving the values at the corner pixels.
- If set to ``False``, the input and output tensors are aligned by the corner
- points of their corner pixels, and the interpolation uses edge value padding
- for out-of-boundary values, making this operation *independent* of input size
- when :attr:`scale_factor` is kept the same. This only has an effect when :attr:`mode`
- is ``'linear'``, ``'bilinear'``, ``'bicubic'`` or ``'trilinear'``.
- Default: ``False``
- .. note::
- With ``mode='bicubic'``, it's possible to cause overshoot, in other words it can produce
- negative values or values greater than 255 for images.
- Explicitly call ``result.clamp(min=0, max=255)`` if you want to reduce the overshoot
- when displaying the image.
- .. warning::
- With ``align_corners = True``, the linearly interpolating modes
- (`linear`, `bilinear`, and `trilinear`) don't proportionally align the
- output and input pixels, and thus the output values can depend on the
- input size. This was the default behavior for these modes up to version
- 0.3.1. Since then, the default behavior is ``align_corners = False``.
- See :class:`~torch.nn.Upsample` for concrete examples on how this
- affects the outputs.
- """
- warnings.warn(
- "`nn.functional.upsample` is deprecated. "
- "Use `nn.functional.interpolate` instead.",
- stacklevel=2,
- )
- return interpolate(input, size, scale_factor, mode, align_corners)
- if upsample.__doc__:
- upsample.__doc__ = upsample.__doc__.format(**reproducibility_notes)
- def _is_integer(x) -> bool:
- r"""Type check the input number is an integer.
- Will return True for int, SymInt, Numpy integers and Tensors with integer elements.
- """
- if isinstance(x, (int, torch.SymInt)):
- return True
- if np is not None and isinstance(x, np.integer):
- return True
- return isinstance(x, Tensor) and not x.is_floating_point()
- @_overload # noqa: F811
- def interpolate(input: Tensor, size: Optional[int] = None, scale_factor: Optional[List[float]] = None, mode: str = 'nearest', align_corners: Optional[bool] = None, recompute_scale_factor: Optional[bool] = None, antialias: bool = False) -> Tensor: # noqa: F811,B950
- pass
- @_overload # noqa: F811
- def interpolate(input: Tensor, size: Optional[List[int]] = None, scale_factor: Optional[List[float]] = None, mode: str = 'nearest', align_corners: Optional[bool] = None, recompute_scale_factor: Optional[bool] = None, antialias: bool = False) -> Tensor: # noqa: F811,B950
- pass
- @_overload # noqa: F811
- def interpolate(input: Tensor, size: Optional[int] = None, scale_factor: Optional[float] = None, mode: str = 'nearest', align_corners: Optional[bool] = None, recompute_scale_factor: Optional[bool] = None, antialias: bool = False) -> Tensor: # noqa: F811,B950
- pass
- @_overload # noqa: F811
- def interpolate( # noqa: F811
- input: Tensor,
- size: Optional[List[int]] = None,
- scale_factor: Optional[float] = None,
- mode: str = "nearest",
- align_corners: Optional[bool] = None,
- recompute_scale_factor: Optional[bool] = None,
- antialias: bool = False,
- ) -> Tensor: # noqa: F811
- pass
- def interpolate(input: Tensor, size: Optional[int] = None, scale_factor: Optional[List[float]] = None, mode: str = 'nearest', align_corners: Optional[bool] = None, recompute_scale_factor: Optional[bool] = None, antialias: bool = False) -> Tensor: # noqa: F811,B950
- r"""Down/up samples the input.
- Tensor interpolated to either the given :attr:`size` or the given
- :attr:`scale_factor`
- The algorithm used for interpolation is determined by :attr:`mode`.
- Currently temporal, spatial and volumetric sampling are supported, i.e.
- expected inputs are 3-D, 4-D or 5-D in shape.
- The input dimensions are interpreted in the form:
- `mini-batch x channels x [optional depth] x [optional height] x width`.
- The modes available for resizing are: `nearest`, `linear` (3D-only),
- `bilinear`, `bicubic` (4D-only), `trilinear` (5D-only), `area`, `nearest-exact`
- Args:
- input (Tensor): the input tensor
- size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int]):
- output spatial size.
- scale_factor (float or Tuple[float]): multiplier for spatial size. If `scale_factor` is a tuple,
- its length has to match the number of spatial dimensions; `input.dim() - 2`.
- mode (str): algorithm used for upsampling:
- ``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` |
- ``'trilinear'`` | ``'area'`` | ``'nearest-exact'``. Default: ``'nearest'``
- align_corners (bool, optional): Geometrically, we consider the pixels of the
- input and output as squares rather than points.
- If set to ``True``, the input and output tensors are aligned by the
- center points of their corner pixels, preserving the values at the corner pixels.
- If set to ``False``, the input and output tensors are aligned by the corner
- points of their corner pixels, and the interpolation uses edge value padding
- for out-of-boundary values, making this operation *independent* of input size
- when :attr:`scale_factor` is kept the same. This only has an effect when :attr:`mode`
- is ``'linear'``, ``'bilinear'``, ``'bicubic'`` or ``'trilinear'``.
- Default: ``False``
- recompute_scale_factor (bool, optional): recompute the scale_factor for use in the
- interpolation calculation. If `recompute_scale_factor` is ``True``, then
- `scale_factor` must be passed in and `scale_factor` is used to compute the
- output `size`. The computed output `size` will be used to infer new scales for
- the interpolation. Note that when `scale_factor` is floating-point, it may differ
- from the recomputed `scale_factor` due to rounding and precision issues.
- If `recompute_scale_factor` is ``False``, then `size` or `scale_factor` will
- be used directly for interpolation. Default: ``None``.
- antialias (bool, optional): flag to apply anti-aliasing. Default: ``False``. Using anti-alias
- option together with ``align_corners=False``, interpolation result would match Pillow
- result for downsampling operation. Supported modes: ``'bilinear'``, ``'bicubic'``.
- .. note::
- With ``mode='bicubic'``, it's possible to cause overshoot, in other words it can produce
- negative values or values greater than 255 for images.
- Explicitly call ``result.clamp(min=0, max=255)`` if you want to reduce the overshoot
- when displaying the image.
- .. note::
- Mode ``mode='nearest-exact'`` matches Scikit-Image and PIL nearest neighbours interpolation
- algorithms and fixes known issues with ``mode='nearest'``. This mode is introduced to keep
- backward compatibility.
- Mode ``mode='nearest'`` matches buggy OpenCV's ``INTER_NEAREST`` interpolation algorithm.
- .. note::
- The gradients for the dtype ``float16`` on CUDA may be inaccurate in the upsample operation
- when using modes ``['linear', 'bilinear', 'bicubic', 'trilinear', 'area']``.
- For more details, please refer to the discussion in
- `issue#104157 <https://github.com/pytorch/pytorch/issues/104157>`_.
- Note:
- {backward_reproducibility_note}
- """
- if has_torch_function_unary(input):
- return handle_torch_function(
- interpolate,
- (input,),
- input,
- size=size,
- scale_factor=scale_factor,
- mode=mode,
- align_corners=align_corners,
- recompute_scale_factor=recompute_scale_factor,
- antialias=antialias
- )
- if mode in ("nearest", "area", "nearest-exact"):
- if align_corners is not None:
- raise ValueError(
- "align_corners option can only be set with the "
- "interpolating modes: linear | bilinear | bicubic | trilinear"
- )
- else:
- if align_corners is None:
- align_corners = False
- dim = input.dim() - 2 # Number of spatial dimensions.
- # Process size and scale_factor. Validate that exactly one is set.
- # Validate its length if it is a list, or expand it if it is a scalar.
- # After this block, exactly one of output_size and scale_factors will
- # be non-None, and it will be a list (or tuple).
- if size is not None and scale_factor is not None:
- raise ValueError("only one of size or scale_factor should be defined")
- elif size is not None:
- assert scale_factor is None
- scale_factors = None
- if isinstance(size, (list, tuple)):
- if len(size) != dim:
- raise ValueError(
- "Input and output must have the same number of spatial dimensions, but got "
- f"input with spatial dimensions of {list(input.shape[2:])} and output size of {size}. "
- "Please provide input tensor in (N, C, d1, d2, ...,dK) format and "
- "output size in (o1, o2, ...,oK) format."
- )
- if not torch.jit.is_scripting():
- if not all(_is_integer(x) for x in size):
- raise TypeError(
- "expected size to be one of int or Tuple[int] or Tuple[int, int] or "
- f"Tuple[int, int, int], but got size with types {[type(x) for x in size]}"
- )
- output_size = size
- else:
- output_size = [size for _ in range(dim)]
- elif scale_factor is not None:
- assert size is None
- output_size = None
- if isinstance(scale_factor, (list, tuple)):
- if len(scale_factor) != dim:
- raise ValueError(
- "Input and scale_factor must have the same number of spatial dimensions, but "
- f"got input with spatial dimensions of {list(input.shape[2:])} and "
- f"scale_factor of shape {scale_factor}. "
- "Please provide input tensor in (N, C, d1, d2, ...,dK) format and "
- "scale_factor in (s1, s2, ...,sK) format."
- )
- scale_factors = scale_factor
- else:
- scale_factors = [scale_factor for _ in range(dim)]
- else:
- raise ValueError("either size or scale_factor should be defined")
- if recompute_scale_factor is not None and recompute_scale_factor and size is not None:
- raise ValueError("recompute_scale_factor is not meaningful with an explicit size.")
- # "area" mode always requires an explicit size rather than scale factor.
- # Re-use the recompute_scale_factor code path.
- if mode == "area" and output_size is None:
- recompute_scale_factor = True
- if recompute_scale_factor is not None and recompute_scale_factor:
- # We compute output_size here, then un-set scale_factors.
- # The C++ code will recompute it based on the (integer) output size.
- assert scale_factors is not None
- if not torch.jit.is_scripting() and torch._C._get_tracing_state():
- # make scale_factor a tensor in tracing so constant doesn't get baked in
- output_size = [
- (torch.floor((input.size(i + 2).float() * torch.tensor(scale_factors[i], dtype=torch.float32)).float()))
- for i in range(dim)
- ]
- elif torch.jit.is_scripting():
- output_size = [int(math.floor(float(input.size(i + 2)) * scale_factors[i]))
- for i in range(dim)]
- else:
- output_size = [
- _sym_int(input.size(i + 2) * scale_factors[i])
- for i in range(dim)
- ]
- scale_factors = None
- if antialias and not (mode in ("bilinear", "bicubic") and input.ndim == 4):
- raise ValueError("Anti-alias option is restricted to bilinear and bicubic modes and requires a 4-D tensor as input")
- if input.dim() == 3 and mode == "nearest":
- return torch._C._nn.upsample_nearest1d(input, output_size, scale_factors)
- if input.dim() == 4 and mode == "nearest":
- return torch._C._nn.upsample_nearest2d(input, output_size, scale_factors)
- if input.dim() == 5 and mode == "nearest":
- return torch._C._nn.upsample_nearest3d(input, output_size, scale_factors)
- if input.dim() == 3 and mode == "nearest-exact":
- return torch._C._nn._upsample_nearest_exact1d(input, output_size, scale_factors)
- if input.dim() == 4 and mode == "nearest-exact":
- return torch._C._nn._upsample_nearest_exact2d(input, output_size, scale_factors)
- if input.dim() == 5 and mode == "nearest-exact":
- return torch._C._nn._upsample_nearest_exact3d(input, output_size, scale_factors)
- if input.dim() == 3 and mode == "area":
- assert output_size is not None
- return adaptive_avg_pool1d(input, output_size)
- if input.dim() == 4 and mode == "area":
- assert output_size is not None
- return adaptive_avg_pool2d(input, output_size)
- if input.dim() == 5 and mode == "area":
- assert output_size is not None
- return adaptive_avg_pool3d(input, output_size)
- if input.dim() == 3 and mode == "linear":
- assert align_corners is not None
- return torch._C._nn.upsample_linear1d(input, output_size, align_corners, scale_factors)
- if input.dim() == 4 and mode == "bilinear":
- assert align_corners is not None
- if antialias:
- return torch._C._nn._upsample_bilinear2d_aa(input, output_size, align_corners, scale_factors)
- # Two levels are necessary to prevent TorchScript from touching
- # are_deterministic_algorithms_enabled.
- if not torch.jit.is_scripting():
- if torch.are_deterministic_algorithms_enabled() and input.is_cuda:
- # Use slow decomp whose backward will be in terms of index_put
- # importlib is required because the import cannot be top level
- # (cycle) and cannot be nested (TS doesn't support)
- return importlib.import_module('torch._decomp.decompositions')._upsample_linear_vec(
- input, output_size, align_corners, scale_factors)
- return torch._C._nn.upsample_bilinear2d(input, output_size, align_corners, scale_factors)
- if input.dim() == 5 and mode == "trilinear":
- assert align_corners is not None
- return torch._C._nn.upsample_trilinear3d(input, output_size, align_corners, scale_factors)
- if input.dim() == 4 and mode == "bicubic":
- assert align_corners is not None
- if antialias:
- return torch._C._nn._upsample_bicubic2d_aa(input, output_size, align_corners, scale_factors)
- return torch._C._nn.upsample_bicubic2d(input, output_size, align_corners, scale_factors)
- if input.dim() == 3 and mode == "bilinear":
- raise NotImplementedError("Got 3D input, but bilinear mode needs 4D input")
- if input.dim() == 3 and mode == "trilinear":
- raise NotImplementedError("Got 3D input, but trilinear mode needs 5D input")
- if input.dim() == 4 and mode == "linear":
- raise NotImplementedError("Got 4D input, but linear mode needs 3D input")
- if input.dim() == 4 and mode == "trilinear":
- raise NotImplementedError("Got 4D input, but trilinear mode needs 5D input")
- if input.dim() == 5 and mode == "linear":
- raise NotImplementedError("Got 5D input, but linear mode needs 3D input")
- if input.dim() == 5 and mode == "bilinear":
- raise NotImplementedError("Got 5D input, but bilinear mode needs 4D input")
- raise NotImplementedError(
- "Input Error: Only 3D, 4D and 5D input Tensors supported"
- f" (got {input.dim()}D) for the modes: nearest | linear | bilinear | bicubic | trilinear | area | nearest-exact"
- f" (got {mode})"
- )
- if interpolate.__doc__:
- interpolate.__doc__ = interpolate.__doc__.format(**reproducibility_notes)
- @_overload # noqa: F811
- def upsample_nearest(input: Tensor, size: Optional[int] = None, scale_factor: Optional[float] = None) -> Tensor: # noqa: F811
- pass
- @_overload # noqa: F811
- def upsample_nearest(input: Tensor, size: Optional[List[int]] = None, scale_factor: Optional[float] = None) -> Tensor: # noqa: F811
- pass
- def upsample_nearest(input, size=None, scale_factor=None): # noqa: F811
- r"""Upsamples the input, using nearest neighbours' pixel values.
- .. warning::
- This function is deprecated in favor of :func:`torch.nn.functional.interpolate`.
- This is equivalent with ``nn.functional.interpolate(..., mode='nearest')``.
- Currently spatial and volumetric upsampling are supported (i.e. expected
- inputs are 4 or 5 dimensional).
- Args:
- input (Tensor): input
- size (int or Tuple[int, int] or Tuple[int, int, int]): output spatia
- size.
- scale_factor (int): multiplier for spatial size. Has to be an integer.
- Note:
- {backward_reproducibility_note}
- """
- # DeprecationWarning is ignored by default
- warnings.warn(
- "`nn.functional.upsample_nearest` is deprecated. "
- "Use `nn.functional.interpolate` instead.",
- stacklevel=2,
- )
- return interpolate(input, size, scale_factor, mode="nearest")
- if upsample_nearest.__doc__:
- upsample_nearest.__doc__ = upsample_nearest.__doc__.format(**reproducibility_notes)
- @_overload # noqa: F811
- def upsample_bilinear(
- input: Tensor, size: Optional[int] = None, scale_factor: Optional[float] = None
- ) -> Tensor: # noqa: F811
- pass
- @_overload # noqa: F811
- def upsample_bilinear( # noqa: F811
- input: Tensor, size: Optional[List[int]] = None, scale_factor: Optional[float] = None
- ) -> Tensor: # noqa: F811
- pass
- @_overload # noqa: F811
- def upsample_bilinear( # noqa: F811
- input: Tensor, size: Optional[int] = None, scale_factor: Optional[List[float]] = None
- ) -> Tensor: # noqa: F811
- pass
- @_overload # noqa: F811
- def upsample_bilinear( # noqa: F811
- input: Tensor, size: Optional[List[int]] = None, scale_factor: Optional[List[float]] = None
- ) -> Tensor: # noqa: F811
- pass
- def upsample_bilinear(input, size=None, scale_factor=None): # noqa: F811
- r"""Upsamples the input, using bilinear upsampling.
- .. warning::
- This function is deprecated in favor of :func:`torch.nn.functional.interpolate`.
- This is equivalent with
- ``nn.functional.interpolate(..., mode='bilinear', align_corners=True)``.
- Expected inputs are spatial (4 dimensional). Use `upsample_trilinear` fo
- volumetric (5 dimensional) inputs.
- Args:
- input (Tensor): input
- size (int or Tuple[int, int]): output spatial size.
- scale_factor (int or Tuple[int, int]): multiplier for spatial size
- Note:
- {backward_reproducibility_note}
- """
- # DeprecationWarning is ignored by default
- warnings.warn(
- "`nn.functional.upsample_bilinear` is deprecated. "
- "Use `nn.functional.interpolate` instead.",
- stacklevel=2,
- )
- return interpolate(input, size, scale_factor, mode="bilinear", align_corners=True)
- if upsample_bilinear.__doc__:
- upsample_bilinear.__doc__ = upsample_bilinear.__doc__.format(**reproducibility_notes)
- GRID_SAMPLE_INTERPOLATION_MODES = {
- "bilinear": 0,
- "nearest": 1,
- "bicubic": 2,
- }
- GRID_SAMPLE_PADDING_MODES = {
- "zeros": 0,
- "border": 1,
- "reflection": 2,
- }
- def grid_sample(
- input: Tensor,
- grid: Tensor,
- mode: str = "bilinear",
- padding_mode: str = "zeros",
- align_corners: Optional[bool] = None,
- ) -> Tensor:
- r"""Compute grid sample.
- Given an :attr:`input` and a flow-field :attr:`grid`, computes the
- ``output`` using :attr:`input` values and pixel locations from :attr:`grid`.
- Currently, only spatial (4-D) and volumetric (5-D) :attr:`input` are
- supported.
- In the spatial (4-D) case, for :attr:`input` with shape
- :math:`(N, C, H_\text{in}, W_\text{in})` and :attr:`grid` with shape
- :math:`(N, H_\text{out}, W_\text{out}, 2)`, the output will have shape
- :math:`(N, C, H_\text{out}, W_\text{out})`.
- For each output location ``output[n, :, h, w]``, the size-2 vector
- ``grid[n, h, w]`` specifies :attr:`input` pixel locations ``x`` and ``y``,
- which are used to interpolate the output value ``output[n, :, h, w]``.
- In the case of 5D inputs, ``grid[n, d, h, w]`` specifies the
- ``x``, ``y``, ``z`` pixel locations for interpolating
- ``output[n, :, d, h, w]``. :attr:`mode` argument specifies ``nearest`` or
- ``bilinear`` interpolation method to sample the input pixels.
- :attr:`grid` specifies the sampling pixel locations normalized by the
- :attr:`input` spatial dimensions. Therefore, it should have most values in
- the range of ``[-1, 1]``. For example, values ``x = -1, y = -1`` is the
- left-top pixel of :attr:`input`, and values ``x = 1, y = 1`` is the
- right-bottom pixel of :attr:`input`.
- If :attr:`grid` has values outside the range of ``[-1, 1]``, the corresponding
- outputs are handled as defined by :attr:`padding_mode`. Options are
- * ``padding_mode="zeros"``: use ``0`` for out-of-bound grid locations,
- * ``padding_mode="border"``: use border values for out-of-bound grid locations,
- * ``padding_mode="reflection"``: use values at locations reflected by
- the border for out-of-bound grid locations. For location far away
- from the border, it will keep being reflected until becoming in bound,
- e.g., (normalized) pixel location ``x = -3.5`` reflects by border ``-1``
- and becomes ``x' = 1.5``, then reflects by border ``1`` and becomes
- ``x'' = -0.5``.
- Note:
- This function is often used in conjunction with :func:`affine_grid`
- to build `Spatial Transformer Networks`_ .
- Note:
- When using the CUDA backend, this operation may induce nondeterministic
- behaviour in its backward pass that is not easily switched off.
- Please see the notes on :doc:`/notes/randomness` for background.
- Note:
- NaN values in :attr:`grid` would be interpreted as ``-1``.
- Args:
- input (Tensor): input of shape :math:`(N, C, H_\text{in}, W_\text{in})` (4-D case)
- or :math:`(N, C, D_\text{in}, H_\text{in}, W_\text{in})` (5-D case)
- grid (Tensor): flow-field of shape :math:`(N, H_\text{out}, W_\text{out}, 2)` (4-D case)
- or :math:`(N, D_\text{out}, H_\text{out}, W_\text{out}, 3)` (5-D case)
- mode (str): interpolation mode to calculate output values
- ``'bilinear'`` | ``'nearest'`` | ``'bicubic'``. Default: ``'bilinear'``
- Note: ``mode='bicubic'`` supports only 4-D input.
- When ``mode='bilinear'`` and the input is 5-D, the interpolation mode
- used internally will actually be trilinear. However, when the input is 4-D,
- the interpolation mode will legitimately be bilinear.
- padding_mode (str): padding mode for outside grid values
- ``'zeros'`` | ``'border'`` | ``'reflection'``. Default: ``'zeros'``
- align_corners (bool, optional): Geometrically, we consider the pixels of the
- input as squares rather than points.
- If set to ``True``, the extrema (``-1`` and ``1``) are considered as referring
- to the center points of the input's corner pixels. If set to ``False``, they
- are instead considered as referring to the corner points of the input's corner
- pixels, making the sampling more resolution agnostic.
- This option parallels the ``align_corners`` option in
- :func:`interpolate`, and so whichever option is used here
- should also be used there to resize the input image before grid sampling.
- Default: ``False``
- Returns:
- output (Tensor): output Tensor
- .. _`Spatial Transformer Networks`:
- https://arxiv.org/abs/1506.02025
- .. warning::
- When ``align_corners = True``, the grid positions depend on the pixel
- size relative to the input image size, and so the locations sampled by
- :func:`grid_sample` will differ for the same input given at different
- resolutions (that is, after being upsampled or downsampled).
- The default behavior up to version 1.2.0 was ``align_corners = True``.
- Since then, the default behavior has been changed to ``align_corners = False``,
- in order to bring it in line with the default for :func:`interpolate`.
- .. note::
- ``mode='bicubic'`` is implemented using the `cubic convolution algorithm`_ with :math:`\alpha=-0.75`.
- The constant :math:`\alpha` might be different from packages to packages.
- For example, `PIL`_ and `OpenCV`_ use -0.5 and -0.75 respectively.
- This algorithm may "overshoot" the range of values it's interpolating.
- For example, it may produce negative values or values greater than 255 when interpolating input in [0, 255].
- Clamp the results with :func:`torch.clamp` to ensure they are within the valid range.
- .. _`cubic convolution algorithm`: https://en.wikipedia.org/wiki/Bicubic_interpolation
- .. _`PIL`: https://github.com/python-pillow/Pillow/blob/4634eafe3c695a014267eefdce830b4a825beed7/src/libImaging/Resample.c#L51
- .. _`OpenCV`: https://github.com/opencv/opencv/blob/f345ed564a06178670750bad59526cfa4033be55/modules/imgproc/src/resize.cpp#L908
- """
- if has_torch_function_variadic(input, grid):
- return handle_torch_function(
- grid_sample, (input, grid), input, grid, mode=mode, padding_mode=padding_mode, align_corners=align_corners
- )
- if mode != "bilinear" and mode != "nearest" and mode != "bicubic":
- raise ValueError(
- f"nn.functional.grid_sample(): expected mode to be 'bilinear', 'nearest' or 'bicubic', but got: '{mode}'"
- )
- if padding_mode != "zeros" and padding_mode != "border" and padding_mode != "reflection":
- raise ValueError(
- "nn.functional.grid_sample(): expected padding_mode "
- "to be 'zeros', 'border', or 'reflection', "
- f"but got: '{padding_mode}'"
- )
- if mode == "bilinear":
- mode_enum = 0
- elif mode == "nearest":
- mode_enum = 1
- else: # mode == 'bicubic'
- mode_enum = 2
- if padding_mode == "zeros":
- padding_mode_enum = 0
- elif padding_mode == "border":
- padding_mode_enum = 1
- else: # padding_mode == 'reflection'
- padding_mode_enum = 2
- if align_corners is None:
- warnings.warn(
- "Default grid_sample and affine_grid behavior has changed "
- "to align_corners=False since 1.3.0. Please specify "
- "align_corners=True if the old behavior is desired. "
- "See the documentation of grid_sample for details."
- )
- align_corners = False
- return torch.grid_sampler(input, grid, mode_enum, padding_mode_enum, align_corners)
- def affine_grid(theta: Tensor, size: List[int], align_corners: Optional[bool] = None) -> Tensor:
- r"""Generate 2D or 3D flow field (sampling grid), given a batch of affine matrices :attr:`theta`.
- .. note::
- This function is often used in conjunction with :func:`grid_sample`
- to build `Spatial Transformer Networks`_ .
- Args:
- theta (Tensor): input batch of affine matrices with shape
- (:math:`N \times 2 \times 3`) for 2D or
- (:math:`N \times 3 \times 4`) for 3D
- size (torch.Size): the target output image size.
- (:math:`N \times C \times H \times W` for 2D or
- :math:`N \times C \times D \times H \times W` for 3D)
- Example: torch.Size((32, 3, 24, 24))
- align_corners (bool, optional): if ``True``, consider ``-1`` and ``1``
- to refer to the centers of the corner pixels rather than the image corners.
- Refer to :func:`grid_sample` for a more complete description.
- A grid generated by :func:`affine_grid` should be passed to :func:`grid_sample`
- with the same setting for this option.
- Default: ``False``
- Returns:
- output (Tensor): output Tensor of size (:math:`N \times H \times W \times 2`)
- .. _`Spatial Transformer Networks`:
- https://arxiv.org/abs/1506.02025
- .. warning::
- When ``align_corners = True``, the grid positions depend on the pixel
- size relative to the input image size, and so the locations sampled by
- :func:`grid_sample` will differ for the same input given at different
- resolutions (that is, after being upsampled or downsampled).
- The default behavior up to version 1.2.0 was ``align_corners = True``.
- Since then, the default behavior has been changed to ``align_corners = False``,
- in order to bring it in line with the default for :func:`interpolate`.
- .. warning::
- When ``align_corners = True``, 2D affine transforms on 1D data and
- 3D affine transforms on 2D data (that is, when one of the spatial
- dimensions has unit size) are ill-defined, and not an intended use case.
- This is not a problem when ``align_corners = False``.
- Up to version 1.2.0, all grid points along a unit dimension were
- considered arbitrarily to be at ``-1``.
- From version 1.3.0, under ``align_corners = True`` all grid points
- along a unit dimension are considered to be at ``0``
- (the center of the input image).
- """
- if has_torch_function_unary(theta):
- return handle_torch_function(affine_grid, (theta,), theta, size, align_corners=align_corners)
- if align_corners is None:
- warnings.warn(
- "Default grid_sample and affine_grid behavior has changed "
- "to align_corners=False since 1.3.0. Please specify "
- "align_corners=True if the old behavior is desired. "
- "See the documentation of grid_sample for details."
- )
- align_corners = False
- # enforce floating point dtype on theta
- if not theta.is_floating_point():
- raise ValueError(f"Expected theta to have floating point type, but got {theta.dtype}")
- # check that shapes and sizes match
- if len(size) == 4:
- if theta.dim() != 3 or theta.shape[-2] != 2 or theta.shape[-1] != 3:
- raise ValueError(
- f"Expected a batch of 2D affine matrices of shape Nx2x3 for size {size}. Got {theta.shape}."
- )
- spatial_size = size[-2:] # spatial dimension sizes
- elif len(size) == 5:
- if theta.dim() != 3 or theta.shape[-2] != 3 or theta.shape[-1] != 4:
- raise ValueError(
- f"Expected a batch of 3D affine matrices of shape Nx3x4 for size {size}. Got {theta.shape}."
- )
- spatial_size = size[-3:] # spatial dimension sizes
- else:
- raise NotImplementedError(
- "affine_grid only supports 4D and 5D sizes, "
- "for 2D and 3D affine transforms, respectively. "
- f"Got size {size}."
- )
- # check for empty span
- if align_corners and min(spatial_size) == 1:
- warnings.warn(
- "Since version 1.3.0, affine_grid behavior has changed "
- "for unit-size grids when align_corners=True. "
- "This is not an intended use case of affine_grid. "
- "See the documentation of affine_grid for details."
- )
- elif min(size) <= 0:
- raise ValueError(f"Expected non-zero, positive output size. Got {size}")
- return torch.affine_grid_generator(theta, size, align_corners)
- def pad(input: Tensor, pad: List[int], mode: str = "constant", value: Optional[float] = None) -> Tensor:
- r"""
- pad(input, pad, mode="constant", value=None) -> Tensor
- Pads tensor.
- Padding size:
- The padding size by which to pad some dimensions of :attr:`input`
- are described starting from the last dimension and moving forward.
- :math:`\left\lfloor\frac{\text{len(pad)}}{2}\right\rfloor` dimensions
- of ``input`` will be padded.
- For example, to pad only the last dimension of the input tensor, then
- :attr:`pad` has the form
- :math:`(\text{padding\_left}, \text{padding\_right})`;
- to pad the last 2 dimensions of the input tensor, then use
- :math:`(\text{padding\_left}, \text{padding\_right},`
- :math:`\text{padding\_top}, \text{padding\_bottom})`;
- to pad the last 3 dimensions, use
- :math:`(\text{padding\_left}, \text{padding\_right},`
- :math:`\text{padding\_top}, \text{padding\_bottom}`
- :math:`\text{padding\_front}, \text{padding\_back})`.
- Padding mode:
- See :class:`torch.nn.CircularPad2d`, :class:`torch.nn.ConstantPad2d`,
- :class:`torch.nn.ReflectionPad2d`, and :class:`torch.nn.ReplicationPad2d`
- for concrete examples on how each of the padding modes works. Constant
- padding is implemented for arbitrary dimensions. Circular, replicate and
- reflection padding are implemented for padding the last 3 dimensions of a
- 4D or 5D input tensor, the last 2 dimensions of a 3D or 4D input tensor,
- or the last dimension of a 2D or 3D input tensor.
- Note:
- When using the CUDA backend, this operation may induce nondeterministic
- behaviour in its backward pass that is not easily switched off.
- Please see the notes on :doc:`/notes/randomness` for background.
- Args:
- input (Tensor): N-dimensional tensor
- pad (tuple): m-elements tuple, where
- :math:`\frac{m}{2} \leq` input dimensions and :math:`m` is even.
- mode: ``'constant'``, ``'reflect'``, ``'replicate'`` or ``'circular'``.
- Default: ``'constant'``
- value: fill value for ``'constant'`` padding. Default: ``0``
- Examples::
- >>> t4d = torch.empty(3, 3, 4, 2)
- >>> p1d = (1, 1) # pad last dim by 1 on each side
- >>> out = F.pad(t4d, p1d, "constant", 0) # effectively zero padding
- >>> print(out.size())
- torch.Size([3, 3, 4, 4])
- >>> p2d = (1, 1, 2, 2) # pad last dim by (1, 1) and 2nd to last by (2, 2)
- >>> out = F.pad(t4d, p2d, "constant", 0)
- >>> print(out.size())
- torch.Size([3, 3, 8, 4])
- >>> t4d = torch.empty(3, 3, 4, 2)
- >>> p3d = (0, 1, 2, 1, 3, 3) # pad by (0, 1), (2, 1), and (3, 3)
- >>> out = F.pad(t4d, p3d, "constant", 0)
- >>> print(out.size())
- torch.Size([3, 9, 7, 3])
- """
- if has_torch_function_unary(input):
- return handle_torch_function(
- torch.nn.functional.pad, (input,), input, pad, mode=mode, value=value)
- if not torch.jit.is_scripting():
- if torch.are_deterministic_algorithms_enabled() and input.is_cuda:
- if mode == 'replicate':
- # Use slow decomp whose backward will be in terms of index_put.
- # importlib is required because the import cannot be top level
- # (cycle) and cannot be nested (TS doesn't support)
- return importlib.import_module('torch._decomp.decompositions')._replication_pad(
- input, pad
- )
- return torch._C._nn.pad(input, pad, mode, value)
- # TODO: Fix via https://github.com/pytorch/pytorch/issues/75798
- pad.__module__ = "torch.nn.functional"
- # distance
- pairwise_distance = _add_docstr(
- torch.pairwise_distance,
- r"""
- pairwise_distance(x1, x2, p=2.0, eps=1e-6, keepdim=False) -> Tensor
- See :class:`torch.nn.PairwiseDistance` for details
- """)
- pdist = _add_docstr(
- torch.pdist,
- r"""
- pdist(input, p=2) -> Tensor
- Computes the p-norm distance between every pair of row vectors in the input.
- This is identical to the upper triangular portion, excluding the diagonal, of
- `torch.norm(input[:, None] - input, dim=2, p=p)`. This function will be faster
- if the rows are contiguous.
- If input has shape :math:`N \times M` then the output will have shape
- :math:`\frac{1}{2} N (N - 1)`.
- This function is equivalent to ``scipy.spatial.distance.pdist(input,
- 'minkowski', p=p)`` if :math:`p \in (0, \infty)`. When :math:`p = 0` it is
- equivalent to ``scipy.spatial.distance.pdist(input, 'hamming') * M``.
- When :math:`p = \infty`, the closest scipy function is
- ``scipy.spatial.distance.pdist(xn, lambda x, y: np.abs(x - y).max())``.
- Args:
- input: input tensor of shape :math:`N \times M`.
- p: p value for the p-norm distance to calculate between each vector pair
- :math:`\in [0, \infty]`.
- """,
- )
- cosine_similarity = _add_docstr(
- torch.cosine_similarity,
- r"""
- cosine_similarity(x1, x2, dim=1, eps=1e-8) -> Tensor
- Returns cosine similarity between ``x1`` and ``x2``, computed along dim. ``x1`` and ``x2`` must be broadcastable
- to a common shape. ``dim`` refers to the dimension in this common shape. Dimension ``dim`` of the output is
- squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 fewer dimension.
- .. math ::
- \text{similarity} = \dfrac{x_1 \cdot x_2}{\max(\Vert x_1 \Vert _2, \epsilon) \cdot \max(\Vert x_2 \Vert _2, \epsilon)}
- Supports :ref:`type promotion <type-promotion-doc>`.
- Args:
- x1 (Tensor): First input.
- x2 (Tensor): Second input.
- dim (int, optional): Dimension along which cosine similarity is computed. Default: 1
- eps (float, optional): Small value to avoid division by zero.
- Default: 1e-8
- Example::
- >>> input1 = torch.randn(100, 128)
- >>> input2 = torch.randn(100, 128)
- >>> output = F.cosine_similarity(input1, input2)
- >>> print(output)
- """,
- )
- one_hot = _add_docstr(
- torch._C._nn.one_hot,
- r"""
- one_hot(tensor, num_classes=-1) -> LongTensor
- Takes LongTensor with index values of shape ``(*)`` and returns a tensor
- of shape ``(*, num_classes)`` that have zeros everywhere except where the
- index of last dimension matches the corresponding value of the input tensor,
- in which case it will be 1.
- See also `One-hot on Wikipedia`_ .
- .. _One-hot on Wikipedia:
- https://en.wikipedia.org/wiki/One-hot
- Arguments:
- tensor (LongTensor): class values of any shape.
- num_classes (int): Total number of classes. If set to -1, the number
- of classes will be inferred as one greater than the largest class
- value in the input tensor.
- Returns:
- LongTensor that has one more dimension with 1 values at the
- index of last dimension indicated by the input, and 0 everywhere
- else.
- Examples:
- >>> F.one_hot(torch.arange(0, 5) % 3)
- tensor([[1, 0, 0],
- [0, 1, 0],
- [0, 0, 1],
- [1, 0, 0],
- [0, 1, 0]])
- >>> F.one_hot(torch.arange(0, 5) % 3, num_classes=5)
- tensor([[1, 0, 0, 0, 0],
- [0, 1, 0, 0, 0],
- [0, 0, 1, 0, 0],
- [1, 0, 0, 0, 0],
- [0, 1, 0, 0, 0]])
- >>> F.one_hot(torch.arange(0, 6).view(3,2) % 3)
- tensor([[[1, 0, 0],
- [0, 1, 0]],
- [[0, 0, 1],
- [1, 0, 0]],
- [[0, 1, 0],
- [0, 0, 1]]])
- """,
- )
- def triplet_margin_loss(
- anchor: Tensor,
- positive: Tensor,
- negative: Tensor,
- margin: float = 1.0,
- p: float = 2,
- eps: float = 1e-6,
- swap: bool = False,
- size_average: Optional[bool] = None,
- reduce: Optional[bool] = None,
- reduction: str = "mean",
- ) -> Tensor:
- r"""Compute the triplet loss between given input tensors and a margin greater than 0.
- See :class:`~torch.nn.TripletMarginLoss` for details.
- """
- if has_torch_function_variadic(anchor, positive, negative):
- return handle_torch_function(
- triplet_margin_loss,
- (anchor, positive, negative),
- anchor,
- positive,
- negative,
- margin=margin,
- p=p,
- eps=eps,
- swap=swap,
- size_average=size_average,
- reduce=reduce,
- reduction=reduction,
- )
- if size_average is not None or reduce is not None:
- reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
- else:
- reduction_enum = _Reduction.get_enum(reduction)
- if margin <= 0:
- raise ValueError(f"margin must be greater than 0, got {margin}")
- return torch.triplet_margin_loss(anchor, positive, negative, margin, p, eps, swap, reduction_enum)
- def triplet_margin_with_distance_loss(
- anchor: Tensor,
- positive: Tensor,
- negative: Tensor,
- *,
- distance_function: Optional[Callable[[Tensor, Tensor], Tensor]] = None,
- margin: float = 1.0,
- swap: bool = False,
- reduction: str = "mean"
- ) -> Tensor:
- r"""Compute the triplet margin loss for input tensors using a custom distance function.
- See :class:`~torch.nn.TripletMarginWithDistanceLoss` for details.
- """
- if torch.jit.is_scripting():
- raise NotImplementedError(
- "F.triplet_margin_with_distance_loss does not support JIT scripting: "
- "functions requiring Callables cannot be scripted."
- )
- if has_torch_function_variadic(anchor, positive, negative):
- return handle_torch_function(
- triplet_margin_with_distance_loss,
- (anchor, positive, negative),
- anchor,
- positive,
- negative,
- distance_function=distance_function,
- margin=margin,
- swap=swap,
- reduction=reduction,
- )
- # Check validity of reduction mode
- if reduction not in ("mean", "sum", "none"):
- raise ValueError(f"{reduction} is not a valid value for reduction")
- # Check validity of margin
- if margin <= 0:
- raise ValueError(f"margin must be greater than 0, got {margin}")
- # Check dimensions
- a_dim = anchor.ndim
- p_dim = positive.ndim
- n_dim = negative.ndim
- if not (a_dim == p_dim and p_dim == n_dim):
- raise RuntimeError(
- f"The anchor, positive, and negative tensors are expected to have "
- f"the same number of dimensions, but got: anchor {a_dim}D, "
- f"positive {p_dim}D, and negative {n_dim}D inputs")
- # Calculate loss
- if distance_function is None:
- distance_function = torch.pairwise_distance
- dist_pos = distance_function(anchor, positive)
- dist_neg = distance_function(anchor, negative)
- # The distance swap is described in the paper "Learning shallow
- # convolutional feature descriptors with triplet losses" by V. Balntas, E.
- # Riba et al. If True, and if the positive example is closer to the
- # negative example than the anchor is, swaps the positive example and the
- # anchor in the loss computation.
- if swap:
- dist_swap = distance_function(positive, negative)
- dist_neg = torch.minimum(dist_neg, dist_swap)
- loss = torch.clamp_min(margin + dist_pos - dist_neg, 0)
- # Apply reduction
- if reduction == "sum":
- return torch.sum(loss)
- elif reduction == "mean":
- return torch.mean(loss)
- else: # reduction == "none"
- return loss
- def normalize(input: Tensor, p: float = 2.0, dim: int = 1, eps: float = 1e-12, out: Optional[Tensor] = None) -> Tensor:
- r"""Perform :math:`L_p` normalization of inputs over specified dimension.
- For a tensor :attr:`input` of sizes :math:`(n_0, ..., n_{dim}, ..., n_k)`, each
- :math:`n_{dim}` -element vector :math:`v` along dimension :attr:`dim` is transformed as
- .. math::
- v = \frac{v}{\max(\lVert v \rVert_p, \epsilon)}.
- With the default arguments it uses the Euclidean norm over vectors along dimension :math:`1` for normalization.
- Args:
- input: input tensor of any shape
- p (float): the exponent value in the norm formulation. Default: 2
- dim (int or tuple of ints): the dimension to reduce. Default: 1
- eps (float): small value to avoid division by zero. Default: 1e-12
- out (Tensor, optional): the output tensor. If :attr:`out` is used, this
- operation won't be differentiable.
- """
- if has_torch_function_variadic(input, out):
- return handle_torch_function(normalize, (input, out), input, p=p, dim=dim, eps=eps, out=out)
- if out is None:
- denom = input.norm(p, dim, keepdim=True).clamp_min(eps).expand_as(input)
- return input / denom
- else:
- denom = input.norm(p, dim, keepdim=True).clamp_min_(eps).expand_as(input)
- return torch.div(input, denom, out=out)
- def assert_int_or_pair(arg: List[int], arg_name: str, message: str) -> None:
- assert isinstance(arg, int) or len(arg) == 2, message.format(arg_name)
- def unfold(
- input: Tensor, kernel_size: BroadcastingList2[int],
- dilation: BroadcastingList2[int] = 1,
- padding: BroadcastingList2[int] = 0,
- stride: BroadcastingList2[int] = 1
- ) -> Tensor:
- r"""Extract sliding local blocks from a batched input tensor.
- .. warning::
- Currently, only 4-D input tensors (batched image-like tensors) are
- supported.
- .. warning::
- More than one element of the unfolded tensor may refer to a single
- memory location. As a result, in-place operations (especially ones that
- are vectorized) may result in incorrect behavior. If you need to write
- to the tensor, please clone it first.
- See :class:`torch.nn.Unfold` for details
- """
- if has_torch_function_unary(input):
- return handle_torch_function(
- unfold, (input,), input, kernel_size, dilation=dilation, padding=padding, stride=stride
- )
- return torch._C._nn.im2col(input, _pair(kernel_size), _pair(dilation), _pair(padding), _pair(stride))
- def fold(
- input: Tensor, output_size: BroadcastingList2[int],
- kernel_size: BroadcastingList2[int],
- dilation: BroadcastingList2[int] = 1,
- padding: BroadcastingList2[int] = 0,
- stride: BroadcastingList2[int] = 1
- ) -> Tensor:
- r"""Combine an array of sliding local blocks into a large containing tensor.
- .. warning::
- Currently, only unbatched (3D) or batched (4D) image-like output tensors are supported.
- See :class:`torch.nn.Fold` for details
- """
- if has_torch_function_unary(input):
- return handle_torch_function(
- fold, (input,), input, output_size, kernel_size, dilation=dilation, padding=padding, stride=stride
- )
- return torch._C._nn.col2im(
- input, _pair(output_size), _pair(kernel_size), _pair(dilation), _pair(padding), _pair(stride)
- )
- #
- # multihead attention
- #
- def _in_projection_packed(
- q: Tensor,
- k: Tensor,
- v: Tensor,
- w: Tensor,
- b: Optional[Tensor] = None,
- ) -> List[Tensor]:
- r"""Perform the in-projection step of the attention operation, using packed weights.
- Output is a triple containing projection tensors for query, key and value.
- Args:
- q, k, v: query, key and value tensors to be projected. For self-attention,
- these are typically the same tensor; for encoder-decoder attention,
- k and v are typically the same tensor. (We take advantage of these
- identities for performance if they are present.) Regardless, q, k and v
- must share a common embedding dimension; otherwise their shapes may vary.
- w: projection weights for q, k and v, packed into a single tensor. Weights
- are packed along dimension 0, in q, k, v order.
- b: optional projection biases for q, k and v, packed into a single tensor
- in q, k, v order.
- Shape:
- Inputs:
- - q: :math:`(..., E)` where E is the embedding dimension
- - k: :math:`(..., E)` where E is the embedding dimension
- - v: :math:`(..., E)` where E is the embedding dimension
- - w: :math:`(E * 3, E)` where E is the embedding dimension
- - b: :math:`E * 3` where E is the embedding dimension
- Output:
- - in output list :math:`[q', k', v']`, each output tensor will have the
- same shape as the corresponding input tensor.
- """
- E = q.size(-1)
- if k is v:
- if q is k:
- # self-attention
- proj = linear(q, w, b)
- # reshape to 3, E and not E, 3 is deliberate for better memory coalescing and keeping same order as chunk()
- proj = proj.unflatten(-1, (3, E)).unsqueeze(0).transpose(0, -2).squeeze(-2).contiguous()
- return proj[0], proj[1], proj[2]
- else:
- # encoder-decoder attention
- w_q, w_kv = w.split([E, E * 2])
- if b is None:
- b_q = b_kv = None
- else:
- b_q, b_kv = b.split([E, E * 2])
- q_proj = linear(q, w_q, b_q)
- kv_proj = linear(k, w_kv, b_kv)
- # reshape to 2, E and not E, 2 is deliberate for better memory coalescing and keeping same order as chunk()
- kv_proj = kv_proj.unflatten(-1, (2, E)).unsqueeze(0).transpose(0, -2).squeeze(-2).contiguous()
- return (q_proj, kv_proj[0], kv_proj[1])
- else:
- w_q, w_k, w_v = w.chunk(3)
- if b is None:
- b_q = b_k = b_v = None
- else:
- b_q, b_k, b_v = b.chunk(3)
- return linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v)
- def _in_projection(
- q: Tensor,
- k: Tensor,
- v: Tensor,
- w_q: Tensor,
- w_k: Tensor,
- w_v: Tensor,
- b_q: Optional[Tensor] = None,
- b_k: Optional[Tensor] = None,
- b_v: Optional[Tensor] = None,
- ) -> Tuple[Tensor, Tensor, Tensor]:
- r"""Perform the in-projection step of the attention operation.
- This is simply a triple of linear projections,
- with shape constraints on the weights which
- ensure embedding dimension uniformity in the projected outputs.
- Output is a triple containing projection tensors for query, key and value.
- Args:
- q, k, v: query, key and value tensors to be projected.
- w_q, w_k, w_v: weights for q, k and v, respectively.
- b_q, b_k, b_v: optional biases for q, k and v, respectively.
- Shape:
- Inputs:
- - q: :math:`(Qdims..., Eq)` where Eq is the query embedding dimension and Qdims are any
- number of leading dimensions.
- - k: :math:`(Kdims..., Ek)` where Ek is the key embedding dimension and Kdims are any
- number of leading dimensions.
- - v: :math:`(Vdims..., Ev)` where Ev is the value embedding dimension and Vdims are any
- number of leading dimensions.
- - w_q: :math:`(Eq, Eq)`
- - w_k: :math:`(Eq, Ek)`
- - w_v: :math:`(Eq, Ev)`
- - b_q: :math:`(Eq)`
- - b_k: :math:`(Eq)`
- - b_v: :math:`(Eq)`
- Output: in output triple :math:`(q', k', v')`,
- - q': :math:`[Qdims..., Eq]`
- - k': :math:`[Kdims..., Eq]`
- - v': :math:`[Vdims..., Eq]`
- """
- Eq, Ek, Ev = q.size(-1), k.size(-1), v.size(-1)
- assert w_q.shape == (Eq, Eq), f"expecting query weights shape of {(Eq, Eq)}, but got {w_q.shape}"
- assert w_k.shape == (Eq, Ek), f"expecting key weights shape of {(Eq, Ek)}, but got {w_k.shape}"
- assert w_v.shape == (Eq, Ev), f"expecting value weights shape of {(Eq, Ev)}, but got {w_v.shape}"
- assert b_q is None or b_q.shape == (Eq,), f"expecting query bias shape of {(Eq,)}, but got {b_q.shape}"
- assert b_k is None or b_k.shape == (Eq,), f"expecting key bias shape of {(Eq,)}, but got {b_k.shape}"
- assert b_v is None or b_v.shape == (Eq,), f"expecting value bias shape of {(Eq,)}, but got {b_v.shape}"
- return linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v)
- scaled_dot_product_attention = _add_docstr(
- torch._C._nn.scaled_dot_product_attention, r"""
- scaled_dot_product_attention(query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False, scale=None) -> Tensor:
- Computes scaled dot product attention on query, key and value tensors, using
- an optional attention mask if passed, and applying dropout if a probability
- greater than 0.0 is specified. The optional scale argument can only be specified as a keyword argument.
- .. code-block:: python
- # Efficient implementation equivalent to the following:
- def scaled_dot_product_attention(query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False, scale=None) -> torch.Tensor:
- L, S = query.size(-2), key.size(-2)
- scale_factor = 1 / math.sqrt(query.size(-1)) if scale is None else scale
- attn_bias = torch.zeros(L, S, dtype=query.dtype)
- if is_causal:
- assert attn_mask is None
- temp_mask = torch.ones(L, S, dtype=torch.bool).tril(diagonal=0)
- attn_bias.masked_fill_(temp_mask.logical_not(), float("-inf"))
- attn_bias.to(query.dtype)
- if attn_mask is not None:
- if attn_mask.dtype == torch.bool:
- attn_bias.masked_fill_(attn_mask.logical_not(), float("-inf"))
- else:
- attn_bias += attn_mask
- attn_weight = query @ key.transpose(-2, -1) * scale_factor
- attn_weight += attn_bias
- attn_weight = torch.softmax(attn_weight, dim=-1)
- attn_weight = torch.dropout(attn_weight, dropout_p, train=True)
- return attn_weight @ value
- .. warning:: This function is beta and subject to change.
- .. warning::
- This function always applies dropout according to the specified ``dropout_p`` argument.
- To disable dropout during evaluation, be sure to pass a value of ``0.0`` when the module
- that makes the function call is not in training mode.
- For example:
- .. code-block:: python
- class MyModel(nn.Module):
- def __init__(self, p=0.5):
- super().__init__()
- self.p = p
- def forward(self, ...):
- return F.scaled_dot_product_attention(..., dropout_p=(self.p if self.training else 0.0))
- Note:
- There are currently three supported implementations of scaled dot product attention:
- - `FlashAttention-2: Faster Attention with Better Parallelism and Work Partitioning`_
- - `Memory-Efficient Attention`_
- - A PyTorch implementation defined in C++ matching the above formulation
- The function may call optimized kernels for improved performance when using the CUDA backend.
- For all other backends, the PyTorch implementation will be used.
- All implementations are enabled by default. Scaled dot product attention attempts to automatically select the
- most optimal implementation based on the inputs. In order to provide more fine-grained control over what implementation
- is used, the following functions are provided for enabling and disabling implementations.
- The context manager is the preferred mechanism:
- - :func:`torch.nn.attention.sdpa_kernel`: A context manager used to enable or disable any of the implementations.
- - :func:`torch.backends.cuda.enable_flash_sdp`: Globally enables or disables FlashAttention.
- - :func:`torch.backends.cuda.enable_mem_efficient_sdp`: Globally enables or disables Memory-Efficient Attention.
- - :func:`torch.backends.cuda.enable_math_sdp`: Globally enables or disables the PyTorch C++ implementation.
- Each of the fused kernels has specific input limitations. If the user requires the use of a specific fused implementation,
- disable the PyTorch C++ implementation using :func:`torch.nn.attention.sdpa_kernel`.
- In the event that a fused implementation is not available, a warning will be raised with the
- reasons why the fused implementation cannot run.
- Due to the nature of fusing floating point operations, the output of this function may be different
- depending on what backend kernel is chosen.
- The c++ implementation supports torch.float64 and can be used when higher precision is required.
- For more information please see :doc:`/notes/numerical_accuracy`
- Note:
- {cudnn_reproducibility_note}
- """.format(**reproducibility_notes)
- + r"""
- Args:
- query (Tensor): Query tensor; shape :math:`(N, ..., L, E)`.
- key (Tensor): Key tensor; shape :math:`(N, ..., S, E)`.
- value (Tensor): Value tensor; shape :math:`(N, ..., S, Ev)`.
- attn_mask (optional Tensor): Attention mask; shape must be broadcastable to the shape of attention weights,
- which is :math:`(N,..., L, S)`. Two types of masks are supported.
- A boolean mask where a value of True indicates that the element *should* take part in attention.
- A float mask of the same type as query, key, value that is added to the attention score.
- dropout_p (float): Dropout probability; if greater than 0.0, dropout is applied
- is_causal (bool): If set to true, the attention masking is a lower triangular matrix when the mask is a
- square matrix. The attention masking has the form of the upper left causal bias due to the alignment
- (see :class:`torch.nn.attention.bias.CausalBias`) when the mask is a non-square matrix.
- An error is thrown if both attn_mask and is_causal are set.
- scale (optional float, keyword-only): Scaling factor applied prior to softmax. If None, the default value is set
- to :math:`\frac{1}{\sqrt{E}}`.
- Returns:
- output (Tensor): Attention output; shape :math:`(N, ..., L, Ev)`.
- Shape legend:
- - :math:`N: \text{Batch size} ... : \text{Any number of other batch dimensions (optional)}`
- - :math:`S: \text{Source sequence length}`
- - :math:`L: \text{Target sequence length}`
- - :math:`E: \text{Embedding dimension of the query and key}`
- - :math:`Ev: \text{Embedding dimension of the value}`
- Examples:
- >>> # Optionally use the context manager to ensure one of the fused kernels is run
- >>> query = torch.rand(32, 8, 128, 64, dtype=torch.float16, device="cuda")
- >>> key = torch.rand(32, 8, 128, 64, dtype=torch.float16, device="cuda")
- >>> value = torch.rand(32, 8, 128, 64, dtype=torch.float16, device="cuda")
- >>> with torch.backends.cuda.sdp_kernel(enable_math=False):
- >>> F.scaled_dot_product_attention(query,key,value)
- .. _FlashAttention-2\: Faster Attention with Better Parallelism and Work Partitioning:
- https://arxiv.org/abs/2307.08691
- .. _Memory-Efficient Attention:
- https://github.com/facebookresearch/xformers
- """)
- def _mha_shape_check(query: Tensor, key: Tensor, value: Tensor,
- key_padding_mask: Optional[Tensor], attn_mask: Optional[Tensor], num_heads: int):
- # Verifies the expected shape for `query, `key`, `value`, `key_padding_mask` and `attn_mask`
- # and returns if the input is batched or not.
- # Raises an error if `query` is not 2-D (unbatched) or 3-D (batched) tensor.
- # Shape check.
- if query.dim() == 3:
- # Batched Inputs
- is_batched = True
- assert key.dim() == 3 and value.dim() == 3, \
- ("For batched (3-D) `query`, expected `key` and `value` to be 3-D"
- f" but found {key.dim()}-D and {value.dim()}-D tensors respectively")
- if key_padding_mask is not None:
- assert key_padding_mask.dim() == 2, \
- ("For batched (3-D) `query`, expected `key_padding_mask` to be `None` or 2-D"
- f" but found {key_padding_mask.dim()}-D tensor instead")
- if attn_mask is not None:
- assert attn_mask.dim() in (2, 3), \
- ("For batched (3-D) `query`, expected `attn_mask` to be `None`, 2-D or 3-D"
- f" but found {attn_mask.dim()}-D tensor instead")
- elif query.dim() == 2:
- # Unbatched Inputs
- is_batched = False
- assert key.dim() == 2 and value.dim() == 2, \
- ("For unbatched (2-D) `query`, expected `key` and `value` to be 2-D"
- f" but found {key.dim()}-D and {value.dim()}-D tensors respectively")
- if key_padding_mask is not None:
- assert key_padding_mask.dim() == 1, \
- ("For unbatched (2-D) `query`, expected `key_padding_mask` to be `None` or 1-D"
- f" but found {key_padding_mask.dim()}-D tensor instead")
- if attn_mask is not None:
- assert attn_mask.dim() in (2, 3), \
- ("For unbatched (2-D) `query`, expected `attn_mask` to be `None`, 2-D or 3-D"
- f" but found {attn_mask.dim()}-D tensor instead")
- if attn_mask.dim() == 3:
- expected_shape = (num_heads, query.shape[0], key.shape[0])
- assert attn_mask.shape == expected_shape, \
- (f"Expected `attn_mask` shape to be {expected_shape} but got {attn_mask.shape}")
- else:
- raise AssertionError(
- f"query should be unbatched 2D or batched 3D tensor but received {query.dim()}-D query tensor")
- return is_batched
- def _canonical_mask(
- mask: Optional[Tensor],
- mask_name: str,
- other_type: Optional[DType],
- other_name: str,
- target_type: DType,
- check_other: bool = True,
- ) -> Optional[Tensor]:
- if mask is not None:
- _mask_dtype = mask.dtype
- _mask_is_float = torch.is_floating_point(mask)
- if _mask_dtype != torch.bool and not _mask_is_float:
- raise AssertionError(
- f"only bool and floating types of {mask_name} are supported")
- if check_other and other_type is not None:
- if _mask_dtype != other_type:
- warnings.warn(
- f"Support for mismatched {mask_name} and {other_name} "
- "is deprecated. Use same type for both instead."
- )
- if not _mask_is_float:
- mask = (
- torch.zeros_like(mask, dtype=target_type)
- .masked_fill_(mask, float("-inf"))
- )
- return mask
- def _none_or_dtype(input: Optional[Tensor]) -> Optional[DType]:
- if input is None:
- return None
- elif isinstance(input, torch.Tensor):
- return input.dtype
- raise RuntimeError("input to _none_or_dtype() must be None or torch.Tensor")
- def multi_head_attention_forward(
- query: Tensor,
- key: Tensor,
- value: Tensor,
- embed_dim_to_check: int,
- num_heads: int,
- in_proj_weight: Optional[Tensor],
- in_proj_bias: Optional[Tensor],
- bias_k: Optional[Tensor],
- bias_v: Optional[Tensor],
- add_zero_attn: bool,
- dropout_p: float,
- out_proj_weight: Tensor,
- out_proj_bias: Optional[Tensor],
- training: bool = True,
- key_padding_mask: Optional[Tensor] = None,
- need_weights: bool = True,
- attn_mask: Optional[Tensor] = None,
- use_separate_proj_weight: bool = False,
- q_proj_weight: Optional[Tensor] = None,
- k_proj_weight: Optional[Tensor] = None,
- v_proj_weight: Optional[Tensor] = None,
- static_k: Optional[Tensor] = None,
- static_v: Optional[Tensor] = None,
- average_attn_weights: bool = True,
- is_causal: bool = False,
- ) -> Tuple[Tensor, Optional[Tensor]]:
- r"""Forward method for MultiHeadAttention.
- See :class:`torch.nn.MultiheadAttention` for details.
- Args:
- query, key, value: map a query and a set of key-value pairs to an output.
- See "Attention Is All You Need" for more details.
- embed_dim_to_check: total dimension of the model.
- num_heads: parallel attention heads.
- in_proj_weight, in_proj_bias: input projection weight and bias.
- bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
- add_zero_attn: add a new batch of zeros to the key and
- value sequences at dim=1.
- dropout_p: probability of an element to be zeroed.
- out_proj_weight, out_proj_bias: the output projection weight and bias.
- training: apply dropout if is ``True``.
- key_padding_mask: if provided, specified padding elements in the key will
- be ignored by the attention. This is an binary mask. When the value is True,
- the corresponding value on the attention layer will be filled with -inf.
- need_weights: output attn_output_weights.
- Default: `True`
- Note: `needs_weight` defaults to `True`, but should be set to `False`
- For best performance when attention weights are not needed.
- *Setting needs_weights to `True`
- leads to a significant performance degradation.*
- attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
- the batches while a 3D mask allows to specify a different mask for the entries of each batch.
- is_causal: If specified, applies a causal mask as attention mask, and ignores
- attn_mask for computing scaled dot product attention.
- Default: ``False``.
- .. warning::
- is_causal is provides a hint that the attn_mask is the
- causal mask.Providing incorrect hints can result in
- incorrect execution, including forward and backward
- compatibility.
- use_separate_proj_weight: the function accept the proj. weights for query, key,
- and value in different forms. If false, in_proj_weight will be used, which is
- a combination of q_proj_weight, k_proj_weight, v_proj_weight.
- q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
- static_k, static_v: static key and value used for attention operators.
- average_attn_weights: If true, indicates that the returned ``attn_weights`` should be averaged across heads.
- Otherwise, ``attn_weights`` are provided separately per head. Note that this flag only has an effect
- when ``need_weights=True.``. Default: True
- Shape:
- Inputs:
- - query: :math:`(L, E)` or :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
- the embedding dimension.
- - key: :math:`(S, E)` or :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
- the embedding dimension.
- - value: :math:`(S, E)` or :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
- the embedding dimension.
- - key_padding_mask: :math:`(S)` or :math:`(N, S)` where N is the batch size, S is the source sequence length.
- If a FloatTensor is provided, it will be directly added to the value.
- If a BoolTensor is provided, the positions with the
- value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
- 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
- S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
- positions. If a BoolTensor is provided, positions with ``True``
- are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
- is provided, it will be added to the attention weight.
- - static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
- N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- - static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
- N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- Outputs:
- - attn_output: :math:`(L, E)` or :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
- E is the embedding dimension.
- - attn_output_weights: Only returned when ``need_weights=True``. If ``average_attn_weights=True``, returns
- attention weights averaged across heads of shape :math:`(L, S)` when input is unbatched or
- :math:`(N, L, S)`, where :math:`N` is the batch size, :math:`L` is the target sequence length, and
- :math:`S` is the source sequence length. If ``average_attn_weights=False``, returns attention weights per
- head of shape :math:`(num_heads, L, S)` when input is unbatched or :math:`(N, num_heads, L, S)`.
- """
- tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v, out_proj_weight, out_proj_bias)
- if has_torch_function(tens_ops):
- return handle_torch_function(
- multi_head_attention_forward,
- tens_ops,
- query,
- key,
- value,
- embed_dim_to_check,
- num_heads,
- in_proj_weight,
- in_proj_bias,
- bias_k,
- bias_v,
- add_zero_attn,
- dropout_p,
- out_proj_weight,
- out_proj_bias,
- training=training,
- key_padding_mask=key_padding_mask,
- need_weights=need_weights,
- attn_mask=attn_mask,
- is_causal=is_causal,
- use_separate_proj_weight=use_separate_proj_weight,
- q_proj_weight=q_proj_weight,
- k_proj_weight=k_proj_weight,
- v_proj_weight=v_proj_weight,
- static_k=static_k,
- static_v=static_v,
- average_attn_weights=average_attn_weights,
- )
- is_batched = _mha_shape_check(query, key, value, key_padding_mask, attn_mask, num_heads)
- # For unbatched input, we unsqueeze at the expected batch-dim to pretend that the input
- # is batched, run the computation and before returning squeeze the
- # batch dimension so that the output doesn't carry this temporary batch dimension.
- if not is_batched:
- # unsqueeze if the input is unbatched
- query = query.unsqueeze(1)
- key = key.unsqueeze(1)
- value = value.unsqueeze(1)
- if key_padding_mask is not None:
- key_padding_mask = key_padding_mask.unsqueeze(0)
- # set up shape vars
- tgt_len, bsz, embed_dim = query.shape
- src_len, _, _ = key.shape
- key_padding_mask = _canonical_mask(
- mask=key_padding_mask,
- mask_name="key_padding_mask",
- other_type=_none_or_dtype(attn_mask),
- other_name="attn_mask",
- target_type=query.dtype
- )
- if is_causal and attn_mask is None:
- raise RuntimeError(
- "Need attn_mask if specifying the is_causal hint. "
- "You may use the Transformer module method "
- "`generate_square_subsequent_mask` to create this mask."
- )
- if is_causal and key_padding_mask is None and not need_weights:
- # when we have a kpm or need weights, we need attn_mask
- # Otherwise, we use the is_causal hint go as is_causal
- # indicator to SDPA.
- attn_mask = None
- else:
- attn_mask = _canonical_mask(
- mask=attn_mask,
- mask_name="attn_mask",
- other_type=None,
- other_name="",
- target_type=query.dtype,
- check_other=False,
- )
- if key_padding_mask is not None:
- # We have the attn_mask, and use that to merge kpm into it.
- # Turn off use of is_causal hint, as the merged mask is no
- # longer causal.
- is_causal = False
- assert embed_dim == embed_dim_to_check, \
- f"was expecting embedding dimension of {embed_dim_to_check}, but got {embed_dim}"
- if isinstance(embed_dim, torch.Tensor):
- # embed_dim can be a tensor when JIT tracing
- head_dim = embed_dim.div(num_heads, rounding_mode='trunc')
- else:
- head_dim = embed_dim // num_heads
- assert head_dim * num_heads == embed_dim, f"embed_dim {embed_dim} not divisible by num_heads {num_heads}"
- if use_separate_proj_weight:
- # allow MHA to have different embedding dimensions when separate projection weights are used
- assert key.shape[:2] == value.shape[:2], \
- f"key's sequence and batch dims {key.shape[:2]} do not match value's {value.shape[:2]}"
- else:
- assert key.shape == value.shape, f"key shape {key.shape} does not match value shape {value.shape}"
- #
- # compute in-projection
- #
- if not use_separate_proj_weight:
- assert in_proj_weight is not None, "use_separate_proj_weight is False but in_proj_weight is None"
- q, k, v = _in_projection_packed(query, key, value, in_proj_weight, in_proj_bias)
- else:
- assert q_proj_weight is not None, "use_separate_proj_weight is True but q_proj_weight is None"
- assert k_proj_weight is not None, "use_separate_proj_weight is True but k_proj_weight is None"
- assert v_proj_weight is not None, "use_separate_proj_weight is True but v_proj_weight is None"
- if in_proj_bias is None:
- b_q = b_k = b_v = None
- else:
- b_q, b_k, b_v = in_proj_bias.chunk(3)
- q, k, v = _in_projection(query, key, value, q_proj_weight, k_proj_weight, v_proj_weight, b_q, b_k, b_v)
- # prep attention mask
- if attn_mask is not None:
- # ensure attn_mask's dim is 3
- if attn_mask.dim() == 2:
- correct_2d_size = (tgt_len, src_len)
- if attn_mask.shape != correct_2d_size:
- raise RuntimeError(f"The shape of the 2D attn_mask is {attn_mask.shape}, but should be {correct_2d_size}.")
- attn_mask = attn_mask.unsqueeze(0)
- elif attn_mask.dim() == 3:
- correct_3d_size = (bsz * num_heads, tgt_len, src_len)
- if attn_mask.shape != correct_3d_size:
- raise RuntimeError(f"The shape of the 3D attn_mask is {attn_mask.shape}, but should be {correct_3d_size}.")
- else:
- raise RuntimeError(f"attn_mask's dimension {attn_mask.dim()} is not supported")
- # add bias along batch dimension (currently second)
- if bias_k is not None and bias_v is not None:
- assert static_k is None, "bias cannot be added to static key."
- assert static_v is None, "bias cannot be added to static value."
- k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
- v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
- if attn_mask is not None:
- attn_mask = pad(attn_mask, (0, 1))
- if key_padding_mask is not None:
- key_padding_mask = pad(key_padding_mask, (0, 1))
- else:
- assert bias_k is None
- assert bias_v is None
- #
- # reshape q, k, v for multihead attention and make them batch first
- #
- q = q.view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
- if static_k is None:
- k = k.view(k.shape[0], bsz * num_heads, head_dim).transpose(0, 1)
- else:
- # TODO finish disentangling control flow so we don't do in-projections when statics are passed
- assert static_k.size(0) == bsz * num_heads, \
- f"expecting static_k.size(0) of {bsz * num_heads}, but got {static_k.size(0)}"
- assert static_k.size(2) == head_dim, \
- f"expecting static_k.size(2) of {head_dim}, but got {static_k.size(2)}"
- k = static_k
- if static_v is None:
- v = v.view(v.shape[0], bsz * num_heads, head_dim).transpose(0, 1)
- else:
- # TODO finish disentangling control flow so we don't do in-projections when statics are passed
- assert static_v.size(0) == bsz * num_heads, \
- f"expecting static_v.size(0) of {bsz * num_heads}, but got {static_v.size(0)}"
- assert static_v.size(2) == head_dim, \
- f"expecting static_v.size(2) of {head_dim}, but got {static_v.size(2)}"
- v = static_v
- # add zero attention along batch dimension (now first)
- if add_zero_attn:
- zero_attn_shape = (bsz * num_heads, 1, head_dim)
- k = torch.cat([k, torch.zeros(zero_attn_shape, dtype=k.dtype, device=k.device)], dim=1)
- v = torch.cat([v, torch.zeros(zero_attn_shape, dtype=v.dtype, device=v.device)], dim=1)
- if attn_mask is not None:
- attn_mask = pad(attn_mask, (0, 1))
- if key_padding_mask is not None:
- key_padding_mask = pad(key_padding_mask, (0, 1))
- # update source sequence length after adjustments
- src_len = k.size(1)
- # merge key padding and attention masks
- if key_padding_mask is not None:
- assert key_padding_mask.shape == (bsz, src_len), \
- f"expecting key_padding_mask shape of {(bsz, src_len)}, but got {key_padding_mask.shape}"
- key_padding_mask = key_padding_mask.view(bsz, 1, 1, src_len). \
- expand(-1, num_heads, -1, -1).reshape(bsz * num_heads, 1, src_len)
- if attn_mask is None:
- attn_mask = key_padding_mask
- else:
- attn_mask = attn_mask + key_padding_mask
- # adjust dropout probability
- if not training:
- dropout_p = 0.0
- #
- # (deep breath) calculate attention and out projection
- #
- if need_weights:
- B, Nt, E = q.shape
- q_scaled = q * math.sqrt(1.0 / float(E))
- assert not (is_causal and attn_mask is None), "FIXME: is_causal not implemented for need_weights"
- if attn_mask is not None:
- attn_output_weights = torch.baddbmm(attn_mask, q_scaled, k.transpose(-2, -1))
- else:
- attn_output_weights = torch.bmm(q_scaled, k.transpose(-2, -1))
- attn_output_weights = softmax(attn_output_weights, dim=-1)
- if dropout_p > 0.0:
- attn_output_weights = dropout(attn_output_weights, p=dropout_p)
- attn_output = torch.bmm(attn_output_weights, v)
- attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len * bsz, embed_dim)
- attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
- attn_output = attn_output.view(tgt_len, bsz, attn_output.size(1))
- # optionally average attention weights over heads
- attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
- if average_attn_weights:
- attn_output_weights = attn_output_weights.mean(dim=1)
- if not is_batched:
- # squeeze the output if input was unbatched
- attn_output = attn_output.squeeze(1)
- attn_output_weights = attn_output_weights.squeeze(0)
- return attn_output, attn_output_weights
- else:
- # attn_mask can be either (L,S) or (N*num_heads, L, S)
- # if attn_mask's shape is (1, L, S) we need to unsqueeze to (1, 1, L, S)
- # in order to match the input for SDPA of (N, num_heads, L, S)
- if attn_mask is not None:
- if attn_mask.size(0) == 1 and attn_mask.dim() == 3:
- attn_mask = attn_mask.unsqueeze(0)
- else:
- attn_mask = attn_mask.view(bsz, num_heads, -1, src_len)
- q = q.view(bsz, num_heads, tgt_len, head_dim)
- k = k.view(bsz, num_heads, src_len, head_dim)
- v = v.view(bsz, num_heads, src_len, head_dim)
- attn_output = scaled_dot_product_attention(q, k, v, attn_mask, dropout_p, is_causal)
- attn_output = attn_output.permute(2, 0, 1, 3).contiguous().view(bsz * tgt_len, embed_dim)
- attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
- attn_output = attn_output.view(tgt_len, bsz, attn_output.size(1))
- if not is_batched:
- # squeeze the output if input was unbatched
- attn_output = attn_output.squeeze(1)
- return attn_output, None
|