_VariableFunctions.pyi 1.1 MB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200102011020210203102041020510206102071020810209102101021110212102131021410215102161021710218102191022010221102221022310224102251022610227102281022910230102311023210233102341023510236102371023810239102401024110242102431024410245102461024710248102491025010251102521025310254102551025610257102581025910260102611026210263102641026510266102671026810269102701027110272102731027410275102761027710278102791028010281102821028310284102851028610287102881028910290102911029210293102941029510296102971029810299103001030110302103031030410305103061030710308103091031010311103121031310314103151031610317103181031910320103211032210323103241032510326103271032810329103301033110332103331033410335103361033710338103391034010341103421034310344103451034610347103481034910350103511035210353103541035510356103571035810359103601036110362103631036410365103661036710368103691037010371103721037310374103751037610377103781037910380103811038210383103841038510386103871038810389103901039110392103931039410395103961039710398103991040010401104021040310404104051040610407104081040910410104111041210413104141041510416104171041810419104201042110422104231042410425104261042710428104291043010431104321043310434104351043610437104381043910440104411044210443104441044510446104471044810449104501045110452104531045410455104561045710458104591046010461104621046310464104651046610467104681046910470104711047210473104741047510476104771047810479104801048110482104831048410485104861048710488104891049010491104921049310494104951049610497104981049910500105011050210503105041050510506105071050810509105101051110512105131051410515105161051710518105191052010521105221052310524105251052610527105281052910530105311053210533105341053510536105371053810539105401054110542105431054410545105461054710548105491055010551105521055310554105551055610557105581055910560105611056210563105641056510566105671056810569105701057110572105731057410575105761057710578105791058010581105821058310584105851058610587105881058910590105911059210593105941059510596105971059810599106001060110602106031060410605106061060710608106091061010611106121061310614106151061610617106181061910620106211062210623106241062510626106271062810629106301063110632106331063410635106361063710638106391064010641106421064310644106451064610647106481064910650106511065210653106541065510656106571065810659106601066110662106631066410665106661066710668106691067010671106721067310674106751067610677106781067910680106811068210683106841068510686106871068810689106901069110692106931069410695106961069710698106991070010701107021070310704107051070610707107081070910710107111071210713107141071510716107171071810719107201072110722107231072410725107261072710728107291073010731107321073310734107351073610737107381073910740107411074210743107441074510746107471074810749107501075110752107531075410755107561075710758107591076010761107621076310764107651076610767107681076910770107711077210773107741077510776107771077810779107801078110782107831078410785107861078710788107891079010791107921079310794107951079610797107981079910800108011080210803108041080510806108071080810809108101081110812108131081410815108161081710818108191082010821108221082310824108251082610827108281082910830108311083210833108341083510836108371083810839108401084110842108431084410845108461084710848108491085010851108521085310854108551085610857108581085910860108611086210863108641086510866108671086810869108701087110872108731087410875108761087710878108791088010881108821088310884108851088610887108881088910890108911089210893108941089510896108971089810899109001090110902109031090410905109061090710908109091091010911109121091310914109151091610917109181091910920109211092210923109241092510926109271092810929109301093110932109331093410935109361093710938109391094010941109421094310944109451094610947109481094910950109511095210953109541095510956109571095810959109601096110962109631096410965109661096710968109691097010971109721097310974109751097610977109781097910980109811098210983109841098510986109871098810989109901099110992109931099410995109961099710998109991100011001110021100311004110051100611007110081100911010110111101211013110141101511016110171101811019110201102111022110231102411025110261102711028110291103011031110321103311034110351103611037110381103911040110411104211043110441104511046110471104811049110501105111052110531105411055110561105711058110591106011061110621106311064110651106611067110681106911070110711107211073110741107511076110771107811079110801108111082110831108411085110861108711088110891109011091110921109311094110951109611097110981109911100111011110211103111041110511106111071110811109111101111111112111131111411115111161111711118111191112011121111221112311124111251112611127111281112911130111311113211133111341113511136111371113811139111401114111142111431114411145111461114711148111491115011151111521115311154111551115611157111581115911160111611116211163111641116511166111671116811169111701117111172111731117411175111761117711178111791118011181111821118311184111851118611187111881118911190111911119211193111941119511196111971119811199112001120111202112031120411205112061120711208112091121011211112121121311214112151121611217112181121911220112211122211223112241122511226112271122811229112301123111232112331123411235112361123711238112391124011241112421124311244112451124611247112481124911250112511125211253112541125511256112571125811259112601126111262112631126411265112661126711268112691127011271112721127311274112751127611277112781127911280112811128211283112841128511286112871128811289112901129111292112931129411295112961129711298112991130011301113021130311304113051130611307113081130911310113111131211313113141131511316113171131811319113201132111322113231132411325113261132711328113291133011331113321133311334113351133611337113381133911340113411134211343113441134511346113471134811349113501135111352113531135411355113561135711358113591136011361113621136311364113651136611367113681136911370113711137211373113741137511376113771137811379113801138111382113831138411385113861138711388113891139011391113921139311394113951139611397113981139911400114011140211403114041140511406114071140811409114101141111412114131141411415114161141711418114191142011421114221142311424114251142611427114281142911430114311143211433114341143511436114371143811439114401144111442114431144411445114461144711448114491145011451114521145311454114551145611457114581145911460114611146211463114641146511466114671146811469114701147111472114731147411475114761147711478114791148011481114821148311484114851148611487114881148911490114911149211493114941149511496114971149811499115001150111502115031150411505115061150711508115091151011511115121151311514115151151611517115181151911520115211152211523115241152511526115271152811529115301153111532115331153411535115361153711538115391154011541115421154311544115451154611547115481154911550115511155211553115541155511556115571155811559115601156111562115631156411565115661156711568115691157011571115721157311574115751157611577115781157911580115811158211583115841158511586115871158811589115901159111592115931159411595115961159711598115991160011601116021160311604116051160611607116081160911610116111161211613116141161511616116171161811619116201162111622116231162411625116261162711628116291163011631116321163311634116351163611637116381163911640116411164211643116441164511646116471164811649116501165111652116531165411655116561165711658116591166011661116621166311664116651166611667116681166911670116711167211673116741167511676116771167811679116801168111682116831168411685116861168711688116891169011691116921169311694116951169611697116981169911700117011170211703117041170511706117071170811709117101171111712117131171411715117161171711718117191172011721117221172311724117251172611727117281172911730117311173211733117341173511736117371173811739117401174111742117431174411745117461174711748117491175011751117521175311754117551175611757117581175911760117611176211763117641176511766117671176811769117701177111772117731177411775117761177711778117791178011781117821178311784117851178611787117881178911790117911179211793117941179511796117971179811799118001180111802118031180411805118061180711808118091181011811118121181311814118151181611817118181181911820118211182211823118241182511826118271182811829118301183111832118331183411835118361183711838118391184011841118421184311844118451184611847118481184911850118511185211853118541185511856118571185811859118601186111862118631186411865118661186711868118691187011871118721187311874118751187611877118781187911880118811188211883118841188511886118871188811889118901189111892118931189411895118961189711898118991190011901119021190311904119051190611907119081190911910119111191211913119141191511916119171191811919119201192111922119231192411925119261192711928119291193011931119321193311934119351193611937119381193911940119411194211943119441194511946119471194811949119501195111952119531195411955119561195711958119591196011961119621196311964119651196611967119681196911970119711197211973119741197511976119771197811979119801198111982119831198411985119861198711988119891199011991119921199311994119951199611997119981199912000120011200212003120041200512006120071200812009120101201112012120131201412015120161201712018120191202012021120221202312024120251202612027120281202912030120311203212033120341203512036120371203812039120401204112042120431204412045120461204712048120491205012051120521205312054120551205612057120581205912060120611206212063120641206512066120671206812069120701207112072120731207412075120761207712078120791208012081120821208312084120851208612087120881208912090120911209212093120941209512096120971209812099121001210112102121031210412105121061210712108121091211012111121121211312114121151211612117121181211912120121211212212123121241212512126121271212812129121301213112132121331213412135121361213712138121391214012141121421214312144121451214612147121481214912150121511215212153121541215512156121571215812159121601216112162121631216412165121661216712168121691217012171121721217312174121751217612177121781217912180121811218212183121841218512186121871218812189121901219112192121931219412195121961219712198121991220012201122021220312204122051220612207122081220912210122111221212213122141221512216122171221812219122201222112222122231222412225122261222712228122291223012231122321223312234122351223612237122381223912240122411224212243122441224512246122471224812249122501225112252122531225412255122561225712258122591226012261122621226312264122651226612267122681226912270122711227212273122741227512276122771227812279122801228112282122831228412285122861228712288122891229012291122921229312294122951229612297122981229912300123011230212303123041230512306123071230812309123101231112312123131231412315123161231712318123191232012321123221232312324123251232612327123281232912330123311233212333123341233512336123371233812339123401234112342123431234412345123461234712348123491235012351123521235312354123551235612357123581235912360123611236212363123641236512366123671236812369123701237112372123731237412375123761237712378123791238012381123821238312384123851238612387123881238912390123911239212393123941239512396123971239812399124001240112402124031240412405124061240712408124091241012411124121241312414124151241612417124181241912420124211242212423124241242512426124271242812429124301243112432124331243412435124361243712438124391244012441124421244312444124451244612447124481244912450124511245212453124541245512456124571245812459124601246112462124631246412465124661246712468124691247012471124721247312474124751247612477124781247912480124811248212483124841248512486124871248812489124901249112492124931249412495124961249712498124991250012501125021250312504125051250612507125081250912510125111251212513125141251512516125171251812519125201252112522125231252412525125261252712528125291253012531125321253312534125351253612537125381253912540125411254212543125441254512546125471254812549125501255112552125531255412555125561255712558125591256012561125621256312564125651256612567125681256912570125711257212573125741257512576125771257812579125801258112582125831258412585125861258712588125891259012591125921259312594125951259612597125981259912600126011260212603126041260512606126071260812609126101261112612126131261412615126161261712618126191262012621126221262312624126251262612627126281262912630126311263212633126341263512636126371263812639126401264112642126431264412645126461264712648126491265012651126521265312654126551265612657126581265912660126611266212663126641266512666126671266812669126701267112672126731267412675126761267712678126791268012681126821268312684126851268612687126881268912690126911269212693126941269512696126971269812699127001270112702127031270412705127061270712708127091271012711127121271312714127151271612717127181271912720127211272212723127241272512726127271272812729127301273112732127331273412735127361273712738127391274012741127421274312744127451274612747127481274912750127511275212753127541275512756127571275812759127601276112762127631276412765127661276712768127691277012771127721277312774127751277612777127781277912780127811278212783127841278512786127871278812789127901279112792127931279412795127961279712798127991280012801128021280312804128051280612807128081280912810128111281212813128141281512816128171281812819128201282112822128231282412825128261282712828128291283012831128321283312834128351283612837128381283912840128411284212843128441284512846128471284812849128501285112852128531285412855128561285712858128591286012861128621286312864128651286612867128681286912870128711287212873128741287512876128771287812879128801288112882128831288412885128861288712888128891289012891128921289312894128951289612897128981289912900129011290212903129041290512906129071290812909129101291112912129131291412915129161291712918129191292012921129221292312924129251292612927129281292912930129311293212933129341293512936129371293812939129401294112942129431294412945129461294712948129491295012951129521295312954129551295612957129581295912960129611296212963129641296512966129671296812969129701297112972129731297412975129761297712978129791298012981129821298312984129851298612987129881298912990129911299212993129941299512996129971299812999130001300113002130031300413005130061300713008130091301013011130121301313014130151301613017130181301913020130211302213023130241302513026130271302813029130301303113032130331303413035130361303713038130391304013041130421304313044130451304613047130481304913050130511305213053130541305513056130571305813059130601306113062130631306413065130661306713068130691307013071130721307313074130751307613077130781307913080130811308213083130841308513086130871308813089130901309113092130931309413095130961309713098130991310013101131021310313104131051310613107131081310913110131111311213113131141311513116131171311813119131201312113122131231312413125131261312713128131291313013131131321313313134131351313613137131381313913140131411314213143131441314513146131471314813149131501315113152131531315413155131561315713158131591316013161131621316313164131651316613167131681316913170131711317213173131741317513176131771317813179131801318113182131831318413185131861318713188131891319013191131921319313194131951319613197131981319913200132011320213203132041320513206132071320813209132101321113212132131321413215132161321713218132191322013221132221322313224132251322613227132281322913230132311323213233132341323513236132371323813239132401324113242132431324413245132461324713248132491325013251132521325313254132551325613257132581325913260132611326213263132641326513266132671326813269132701327113272132731327413275132761327713278132791328013281132821328313284132851328613287132881328913290132911329213293132941329513296132971329813299133001330113302133031330413305133061330713308133091331013311133121331313314133151331613317133181331913320133211332213323133241332513326133271332813329133301333113332133331333413335133361333713338133391334013341133421334313344133451334613347133481334913350133511335213353133541335513356133571335813359133601336113362133631336413365133661336713368133691337013371133721337313374133751337613377133781337913380133811338213383133841338513386133871338813389133901339113392133931339413395133961339713398133991340013401134021340313404134051340613407134081340913410134111341213413134141341513416134171341813419134201342113422134231342413425134261342713428134291343013431134321343313434134351343613437134381343913440134411344213443134441344513446134471344813449134501345113452134531345413455134561345713458134591346013461134621346313464134651346613467134681346913470134711347213473134741347513476134771347813479134801348113482134831348413485134861348713488134891349013491134921349313494134951349613497134981349913500135011350213503135041350513506135071350813509135101351113512135131351413515135161351713518135191352013521135221352313524135251352613527135281352913530135311353213533135341353513536135371353813539135401354113542135431354413545135461354713548135491355013551135521355313554135551355613557135581355913560135611356213563135641356513566135671356813569135701357113572135731357413575135761357713578135791358013581135821358313584135851358613587135881358913590135911359213593135941359513596135971359813599136001360113602136031360413605136061360713608136091361013611136121361313614136151361613617136181361913620136211362213623136241362513626136271362813629136301363113632136331363413635136361363713638136391364013641136421364313644136451364613647136481364913650136511365213653136541365513656136571365813659136601366113662136631366413665136661366713668136691367013671136721367313674136751367613677136781367913680136811368213683136841368513686136871368813689136901369113692136931369413695136961369713698136991370013701137021370313704137051370613707137081370913710137111371213713137141371513716137171371813719137201372113722137231372413725137261372713728137291373013731137321373313734137351373613737137381373913740137411374213743137441374513746137471374813749137501375113752137531375413755137561375713758137591376013761137621376313764137651376613767137681376913770137711377213773137741377513776137771377813779137801378113782137831378413785137861378713788137891379013791137921379313794137951379613797137981379913800138011380213803138041380513806138071380813809138101381113812138131381413815138161381713818138191382013821138221382313824138251382613827138281382913830138311383213833138341383513836138371383813839138401384113842138431384413845138461384713848138491385013851138521385313854138551385613857138581385913860138611386213863138641386513866138671386813869138701387113872138731387413875138761387713878138791388013881138821388313884138851388613887138881388913890138911389213893138941389513896138971389813899139001390113902139031390413905139061390713908139091391013911139121391313914139151391613917139181391913920139211392213923139241392513926139271392813929139301393113932139331393413935139361393713938139391394013941139421394313944139451394613947139481394913950139511395213953139541395513956139571395813959139601396113962139631396413965139661396713968139691397013971139721397313974139751397613977139781397913980139811398213983139841398513986139871398813989139901399113992139931399413995139961399713998139991400014001140021400314004140051400614007140081400914010140111401214013140141401514016140171401814019140201402114022140231402414025140261402714028140291403014031140321403314034140351403614037140381403914040140411404214043140441404514046140471404814049140501405114052140531405414055140561405714058140591406014061140621406314064140651406614067140681406914070140711407214073140741407514076140771407814079140801408114082140831408414085140861408714088140891409014091140921409314094140951409614097140981409914100141011410214103141041410514106141071410814109141101411114112141131411414115141161411714118141191412014121141221412314124141251412614127141281412914130141311413214133141341413514136141371413814139141401414114142141431414414145141461414714148141491415014151141521415314154141551415614157141581415914160141611416214163141641416514166141671416814169141701417114172141731417414175141761417714178141791418014181141821418314184141851418614187141881418914190141911419214193141941419514196141971419814199142001420114202142031420414205142061420714208142091421014211142121421314214142151421614217142181421914220142211422214223142241422514226142271422814229142301423114232142331423414235142361423714238142391424014241142421424314244142451424614247142481424914250142511425214253142541425514256142571425814259142601426114262142631426414265142661426714268142691427014271142721427314274142751427614277142781427914280142811428214283142841428514286142871428814289142901429114292142931429414295142961429714298142991430014301143021430314304143051430614307143081430914310143111431214313143141431514316143171431814319143201432114322143231432414325143261432714328143291433014331143321433314334143351433614337143381433914340143411434214343143441434514346143471434814349143501435114352143531435414355143561435714358143591436014361143621436314364143651436614367143681436914370143711437214373143741437514376143771437814379143801438114382143831438414385143861438714388143891439014391143921439314394143951439614397143981439914400144011440214403144041440514406144071440814409144101441114412144131441414415144161441714418144191442014421144221442314424144251442614427144281442914430144311443214433144341443514436144371443814439144401444114442144431444414445144461444714448144491445014451144521445314454144551445614457144581445914460144611446214463144641446514466144671446814469144701447114472144731447414475144761447714478144791448014481144821448314484144851448614487144881448914490144911449214493144941449514496144971449814499145001450114502145031450414505145061450714508145091451014511145121451314514145151451614517145181451914520145211452214523145241452514526145271452814529145301453114532145331453414535145361453714538145391454014541145421454314544145451454614547145481454914550145511455214553145541455514556145571455814559145601456114562145631456414565145661456714568145691457014571145721457314574145751457614577145781457914580145811458214583145841458514586145871458814589145901459114592145931459414595145961459714598145991460014601146021460314604146051460614607146081460914610146111461214613146141461514616146171461814619146201462114622146231462414625146261462714628146291463014631146321463314634146351463614637146381463914640146411464214643146441464514646146471464814649146501465114652146531465414655146561465714658146591466014661146621466314664146651466614667146681466914670146711467214673146741467514676146771467814679146801468114682146831468414685146861468714688146891469014691146921469314694146951469614697146981469914700147011470214703147041470514706147071470814709147101471114712147131471414715147161471714718147191472014721147221472314724147251472614727147281472914730147311473214733147341473514736147371473814739147401474114742147431474414745147461474714748147491475014751147521475314754147551475614757147581475914760147611476214763147641476514766147671476814769147701477114772147731477414775147761477714778147791478014781147821478314784147851478614787147881478914790147911479214793147941479514796147971479814799148001480114802148031480414805148061480714808148091481014811148121481314814148151481614817148181481914820148211482214823148241482514826148271482814829148301483114832148331483414835148361483714838148391484014841148421484314844148451484614847148481484914850148511485214853148541485514856148571485814859148601486114862148631486414865148661486714868148691487014871148721487314874148751487614877148781487914880148811488214883148841488514886148871488814889148901489114892148931489414895148961489714898148991490014901149021490314904149051490614907149081490914910149111491214913149141491514916149171491814919149201492114922149231492414925149261492714928149291493014931149321493314934149351493614937149381493914940149411494214943149441494514946149471494814949149501495114952149531495414955149561495714958149591496014961149621496314964149651496614967149681496914970149711497214973149741497514976149771497814979149801498114982149831498414985149861498714988149891499014991149921499314994149951499614997149981499915000150011500215003150041500515006150071500815009150101501115012150131501415015150161501715018150191502015021150221502315024150251502615027150281502915030150311503215033150341503515036150371503815039150401504115042150431504415045150461504715048150491505015051150521505315054150551505615057150581505915060150611506215063150641506515066150671506815069150701507115072150731507415075150761507715078150791508015081150821508315084150851508615087150881508915090150911509215093150941509515096150971509815099151001510115102151031510415105151061510715108151091511015111151121511315114151151511615117151181511915120151211512215123151241512515126151271512815129151301513115132151331513415135151361513715138151391514015141151421514315144151451514615147151481514915150151511515215153151541515515156151571515815159151601516115162151631516415165151661516715168151691517015171151721517315174151751517615177151781517915180151811518215183151841518515186151871518815189151901519115192151931519415195151961519715198151991520015201152021520315204152051520615207152081520915210152111521215213152141521515216152171521815219152201522115222152231522415225152261522715228152291523015231152321523315234152351523615237152381523915240152411524215243152441524515246152471524815249152501525115252152531525415255152561525715258152591526015261152621526315264152651526615267152681526915270152711527215273152741527515276152771527815279152801528115282152831528415285152861528715288152891529015291152921529315294152951529615297152981529915300153011530215303153041530515306153071530815309153101531115312153131531415315153161531715318153191532015321153221532315324153251532615327153281532915330153311533215333153341533515336153371533815339153401534115342153431534415345153461534715348153491535015351153521535315354153551535615357153581535915360153611536215363153641536515366153671536815369153701537115372153731537415375153761537715378153791538015381153821538315384153851538615387153881538915390153911539215393153941539515396153971539815399154001540115402154031540415405154061540715408154091541015411154121541315414154151541615417154181541915420154211542215423154241542515426154271542815429154301543115432154331543415435154361543715438154391544015441154421544315444154451544615447154481544915450154511545215453154541545515456154571545815459154601546115462154631546415465154661546715468154691547015471154721547315474154751547615477154781547915480154811548215483154841548515486154871548815489154901549115492154931549415495154961549715498154991550015501155021550315504155051550615507155081550915510155111551215513155141551515516155171551815519155201552115522155231552415525155261552715528155291553015531155321553315534155351553615537155381553915540155411554215543155441554515546155471554815549155501555115552155531555415555155561555715558155591556015561155621556315564155651556615567155681556915570155711557215573155741557515576155771557815579155801558115582155831558415585155861558715588155891559015591155921559315594155951559615597155981559915600156011560215603156041560515606156071560815609156101561115612156131561415615156161561715618156191562015621156221562315624156251562615627156281562915630156311563215633156341563515636156371563815639156401564115642156431564415645156461564715648156491565015651156521565315654156551565615657156581565915660156611566215663156641566515666156671566815669156701567115672156731567415675156761567715678156791568015681156821568315684156851568615687156881568915690156911569215693156941569515696156971569815699157001570115702157031570415705157061570715708157091571015711157121571315714157151571615717157181571915720157211572215723157241572515726157271572815729157301573115732157331573415735157361573715738157391574015741157421574315744157451574615747157481574915750157511575215753157541575515756157571575815759157601576115762157631576415765157661576715768157691577015771157721577315774157751577615777157781577915780157811578215783157841578515786157871578815789157901579115792157931579415795157961579715798157991580015801158021580315804158051580615807158081580915810158111581215813158141581515816158171581815819158201582115822158231582415825158261582715828158291583015831158321583315834158351583615837158381583915840158411584215843158441584515846158471584815849158501585115852158531585415855158561585715858158591586015861158621586315864158651586615867158681586915870158711587215873158741587515876158771587815879158801588115882158831588415885158861588715888158891589015891158921589315894158951589615897158981589915900159011590215903159041590515906159071590815909159101591115912159131591415915159161591715918159191592015921159221592315924159251592615927159281592915930159311593215933159341593515936159371593815939159401594115942159431594415945159461594715948159491595015951159521595315954159551595615957159581595915960159611596215963159641596515966159671596815969159701597115972159731597415975159761597715978159791598015981159821598315984159851598615987159881598915990159911599215993159941599515996159971599815999160001600116002160031600416005160061600716008160091601016011160121601316014160151601616017160181601916020160211602216023160241602516026160271602816029160301603116032160331603416035160361603716038160391604016041160421604316044160451604616047160481604916050160511605216053160541605516056160571605816059160601606116062160631606416065160661606716068160691607016071160721607316074160751607616077160781607916080160811608216083160841608516086160871608816089160901609116092160931609416095160961609716098160991610016101161021610316104161051610616107161081610916110161111611216113161141611516116161171611816119161201612116122161231612416125161261612716128161291613016131161321613316134161351613616137161381613916140161411614216143161441614516146161471614816149161501615116152161531615416155161561615716158161591616016161161621616316164161651616616167161681616916170161711617216173161741617516176161771617816179161801618116182161831618416185161861618716188161891619016191161921619316194161951619616197161981619916200162011620216203162041620516206162071620816209162101621116212162131621416215162161621716218162191622016221162221622316224162251622616227162281622916230162311623216233162341623516236162371623816239162401624116242162431624416245162461624716248162491625016251162521625316254162551625616257162581625916260162611626216263162641626516266162671626816269162701627116272162731627416275162761627716278162791628016281162821628316284162851628616287162881628916290162911629216293162941629516296162971629816299163001630116302163031630416305163061630716308163091631016311163121631316314163151631616317163181631916320163211632216323163241632516326163271632816329163301633116332163331633416335163361633716338163391634016341163421634316344163451634616347163481634916350163511635216353163541635516356163571635816359163601636116362163631636416365163661636716368163691637016371163721637316374163751637616377163781637916380163811638216383163841638516386163871638816389163901639116392163931639416395163961639716398163991640016401164021640316404164051640616407164081640916410164111641216413164141641516416164171641816419164201642116422164231642416425164261642716428164291643016431164321643316434164351643616437164381643916440164411644216443164441644516446164471644816449164501645116452164531645416455164561645716458164591646016461164621646316464164651646616467164681646916470164711647216473164741647516476164771647816479164801648116482164831648416485164861648716488164891649016491164921649316494164951649616497164981649916500165011650216503165041650516506165071650816509165101651116512165131651416515165161651716518165191652016521165221652316524165251652616527165281652916530165311653216533165341653516536165371653816539165401654116542165431654416545165461654716548165491655016551165521655316554165551655616557165581655916560165611656216563165641656516566165671656816569165701657116572165731657416575165761657716578165791658016581165821658316584165851658616587165881658916590165911659216593165941659516596165971659816599166001660116602166031660416605166061660716608166091661016611166121661316614166151661616617166181661916620166211662216623166241662516626166271662816629166301663116632166331663416635166361663716638166391664016641166421664316644166451664616647166481664916650166511665216653166541665516656166571665816659166601666116662166631666416665166661666716668166691667016671166721667316674166751667616677166781667916680166811668216683166841668516686166871668816689166901669116692166931669416695166961669716698166991670016701167021670316704167051670616707167081670916710167111671216713167141671516716167171671816719167201672116722167231672416725167261672716728167291673016731167321673316734167351673616737167381673916740167411674216743167441674516746167471674816749167501675116752167531675416755167561675716758167591676016761167621676316764167651676616767167681676916770167711677216773167741677516776167771677816779167801678116782167831678416785167861678716788167891679016791167921679316794167951679616797167981679916800168011680216803168041680516806168071680816809168101681116812168131681416815168161681716818168191682016821168221682316824168251682616827168281682916830168311683216833168341683516836168371683816839168401684116842168431684416845168461684716848168491685016851168521685316854168551685616857168581685916860168611686216863168641686516866168671686816869168701687116872168731687416875168761687716878168791688016881168821688316884168851688616887168881688916890168911689216893168941689516896168971689816899169001690116902169031690416905169061690716908169091691016911169121691316914169151691616917169181691916920169211692216923169241692516926169271692816929169301693116932169331693416935169361693716938169391694016941169421694316944169451694616947169481694916950169511695216953169541695516956169571695816959169601696116962169631696416965169661696716968169691697016971169721697316974169751697616977169781697916980169811698216983169841698516986169871698816989169901699116992169931699416995169961699716998169991700017001170021700317004170051700617007170081700917010170111701217013170141701517016170171701817019170201702117022170231702417025170261702717028170291703017031170321703317034170351703617037170381703917040170411704217043170441704517046170471704817049170501705117052170531705417055170561705717058170591706017061170621706317064170651706617067170681706917070170711707217073170741707517076170771707817079170801708117082170831708417085170861708717088170891709017091170921709317094170951709617097170981709917100171011710217103171041710517106171071710817109171101711117112171131711417115171161711717118171191712017121171221712317124171251712617127171281712917130171311713217133171341713517136171371713817139171401714117142171431714417145171461714717148171491715017151171521715317154171551715617157171581715917160171611716217163171641716517166171671716817169171701717117172171731717417175171761717717178171791718017181171821718317184171851718617187171881718917190171911719217193171941719517196171971719817199172001720117202172031720417205172061720717208172091721017211172121721317214172151721617217172181721917220172211722217223172241722517226172271722817229172301723117232172331723417235172361723717238172391724017241172421724317244172451724617247172481724917250172511725217253172541725517256172571725817259172601726117262172631726417265172661726717268172691727017271172721727317274172751727617277172781727917280172811728217283172841728517286172871728817289172901729117292172931729417295172961729717298172991730017301173021730317304173051730617307173081730917310173111731217313173141731517316173171731817319173201732117322173231732417325173261732717328173291733017331173321733317334173351733617337173381733917340173411734217343173441734517346173471734817349173501735117352173531735417355173561735717358173591736017361173621736317364173651736617367173681736917370173711737217373173741737517376173771737817379173801738117382173831738417385173861738717388173891739017391173921739317394173951739617397173981739917400174011740217403174041740517406174071740817409174101741117412174131741417415174161741717418174191742017421174221742317424174251742617427174281742917430174311743217433174341743517436174371743817439174401744117442174431744417445174461744717448174491745017451174521745317454174551745617457174581745917460174611746217463174641746517466174671746817469174701747117472174731747417475174761747717478174791748017481174821748317484174851748617487174881748917490174911749217493174941749517496174971749817499175001750117502175031750417505175061750717508175091751017511175121751317514175151751617517175181751917520175211752217523175241752517526175271752817529175301753117532175331753417535175361753717538175391754017541175421754317544175451754617547175481754917550175511755217553175541755517556175571755817559175601756117562175631756417565175661756717568175691757017571175721757317574175751757617577175781757917580175811758217583175841758517586175871758817589175901759117592175931759417595175961759717598175991760017601176021760317604176051760617607176081760917610176111761217613176141761517616176171761817619176201762117622176231762417625176261762717628176291763017631176321763317634176351763617637176381763917640176411764217643176441764517646176471764817649176501765117652176531765417655176561765717658176591766017661176621766317664176651766617667176681766917670176711767217673176741767517676176771767817679176801768117682176831768417685176861768717688176891769017691176921769317694176951769617697176981769917700177011770217703177041770517706177071770817709177101771117712177131771417715177161771717718177191772017721177221772317724177251772617727177281772917730177311773217733177341773517736177371773817739177401774117742177431774417745177461774717748177491775017751177521775317754177551775617757177581775917760177611776217763177641776517766177671776817769177701777117772177731777417775177761777717778177791778017781177821778317784177851778617787177881778917790177911779217793177941779517796177971779817799178001780117802178031780417805178061780717808178091781017811178121781317814178151781617817178181781917820178211782217823178241782517826178271782817829178301783117832178331783417835178361783717838178391784017841178421784317844178451784617847178481784917850178511785217853178541785517856178571785817859178601786117862178631786417865178661786717868178691787017871178721787317874178751787617877178781787917880178811788217883178841788517886178871788817889178901789117892178931789417895178961789717898178991790017901179021790317904179051790617907179081790917910179111791217913179141791517916179171791817919179201792117922179231792417925179261792717928179291793017931179321793317934179351793617937179381793917940179411794217943179441794517946179471794817949179501795117952179531795417955179561795717958179591796017961179621796317964179651796617967179681796917970179711797217973179741797517976179771797817979179801798117982179831798417985179861798717988179891799017991179921799317994179951799617997179981799918000180011800218003180041800518006180071800818009180101801118012180131801418015180161801718018180191802018021180221802318024180251802618027180281802918030180311803218033180341803518036180371803818039180401804118042180431804418045180461804718048180491805018051180521805318054180551805618057180581805918060180611806218063180641806518066180671806818069180701807118072180731807418075180761807718078180791808018081180821808318084180851808618087180881808918090180911809218093180941809518096180971809818099181001810118102181031810418105181061810718108181091811018111181121811318114181151811618117181181811918120181211812218123181241812518126181271812818129181301813118132181331813418135181361813718138181391814018141181421814318144181451814618147181481814918150181511815218153181541815518156181571815818159181601816118162181631816418165181661816718168181691817018171181721817318174181751817618177181781817918180181811818218183181841818518186181871818818189181901819118192181931819418195181961819718198181991820018201182021820318204182051820618207182081820918210182111821218213182141821518216182171821818219182201822118222182231822418225182261822718228182291823018231182321823318234182351823618237182381823918240182411824218243182441824518246182471824818249182501825118252182531825418255182561825718258182591826018261182621826318264182651826618267182681826918270182711827218273182741827518276182771827818279182801828118282182831828418285182861828718288182891829018291182921829318294182951829618297182981829918300183011830218303183041830518306183071830818309183101831118312183131831418315183161831718318183191832018321183221832318324183251832618327183281832918330183311833218333183341833518336183371833818339183401834118342183431834418345183461834718348183491835018351183521835318354183551835618357183581835918360183611836218363183641836518366183671836818369183701837118372183731837418375183761837718378183791838018381183821838318384183851838618387183881838918390183911839218393183941839518396183971839818399184001840118402184031840418405184061840718408184091841018411184121841318414184151841618417184181841918420184211842218423184241842518426184271842818429184301843118432184331843418435184361843718438184391844018441184421844318444184451844618447184481844918450184511845218453184541845518456184571845818459184601846118462184631846418465184661846718468184691847018471184721847318474184751847618477184781847918480184811848218483184841848518486184871848818489184901849118492184931849418495184961849718498184991850018501185021850318504185051850618507185081850918510185111851218513185141851518516185171851818519185201852118522185231852418525185261852718528185291853018531185321853318534185351853618537185381853918540185411854218543185441854518546185471854818549185501855118552185531855418555185561855718558185591856018561185621856318564185651856618567185681856918570185711857218573185741857518576185771857818579185801858118582185831858418585185861858718588185891859018591185921859318594185951859618597185981859918600186011860218603186041860518606186071860818609186101861118612186131861418615186161861718618186191862018621186221862318624186251862618627186281862918630186311863218633186341863518636186371863818639186401864118642186431864418645186461864718648186491865018651186521865318654186551865618657186581865918660186611866218663186641866518666186671866818669186701867118672186731867418675186761867718678186791868018681186821868318684186851868618687186881868918690186911869218693186941869518696186971869818699187001870118702187031870418705187061870718708187091871018711187121871318714187151871618717187181871918720187211872218723187241872518726187271872818729187301873118732187331873418735187361873718738187391874018741187421874318744187451874618747187481874918750187511875218753187541875518756187571875818759187601876118762187631876418765187661876718768187691877018771187721877318774187751877618777187781877918780187811878218783187841878518786187871878818789187901879118792187931879418795187961879718798187991880018801188021880318804188051880618807188081880918810188111881218813188141881518816188171881818819188201882118822188231882418825188261882718828188291883018831188321883318834188351883618837188381883918840188411884218843188441884518846188471884818849188501885118852188531885418855188561885718858188591886018861188621886318864188651886618867188681886918870188711887218873188741887518876188771887818879188801888118882188831888418885188861888718888188891889018891188921889318894188951889618897188981889918900189011890218903189041890518906189071890818909189101891118912189131891418915189161891718918189191892018921189221892318924189251892618927189281892918930189311893218933189341893518936189371893818939189401894118942189431894418945189461894718948189491895018951189521895318954189551895618957189581895918960189611896218963189641896518966189671896818969189701897118972189731897418975189761897718978189791898018981189821898318984189851898618987189881898918990189911899218993189941899518996189971899818999190001900119002190031900419005190061900719008190091901019011190121901319014190151901619017190181901919020190211902219023190241902519026190271902819029190301903119032190331903419035190361903719038190391904019041190421904319044190451904619047190481904919050190511905219053190541905519056190571905819059190601906119062190631906419065190661906719068190691907019071190721907319074190751907619077190781907919080190811908219083190841908519086190871908819089190901909119092190931909419095190961909719098190991910019101191021910319104191051910619107191081910919110191111911219113191141911519116191171911819119191201912119122191231912419125191261912719128191291913019131191321913319134191351913619137191381913919140191411914219143191441914519146191471914819149191501915119152191531915419155191561915719158191591916019161191621916319164191651916619167191681916919170191711917219173191741917519176191771917819179191801918119182191831918419185191861918719188191891919019191191921919319194191951919619197191981919919200192011920219203192041920519206192071920819209192101921119212192131921419215192161921719218192191922019221192221922319224192251922619227192281922919230192311923219233192341923519236192371923819239192401924119242192431924419245192461924719248192491925019251192521925319254192551925619257192581925919260192611926219263192641926519266192671926819269192701927119272192731927419275192761927719278192791928019281192821928319284192851928619287192881928919290192911929219293192941929519296192971929819299193001930119302193031930419305193061930719308193091931019311193121931319314193151931619317193181931919320193211932219323193241932519326193271932819329193301933119332193331933419335193361933719338193391934019341193421934319344193451934619347193481934919350193511935219353193541935519356193571935819359193601936119362193631936419365193661936719368193691937019371193721937319374193751937619377193781937919380193811938219383193841938519386193871938819389193901939119392193931939419395193961939719398193991940019401194021940319404194051940619407194081940919410194111941219413194141941519416194171941819419194201942119422194231942419425194261942719428194291943019431194321943319434194351943619437194381943919440194411944219443194441944519446194471944819449194501945119452194531945419455194561945719458194591946019461194621946319464194651946619467194681946919470194711947219473194741947519476194771947819479194801948119482194831948419485194861948719488194891949019491194921949319494194951949619497194981949919500195011950219503195041950519506195071950819509195101951119512195131951419515195161951719518195191952019521195221952319524195251952619527195281952919530195311953219533195341953519536195371953819539195401954119542195431954419545195461954719548195491955019551195521955319554195551955619557195581955919560195611956219563195641956519566195671956819569195701957119572195731957419575195761957719578195791958019581195821958319584195851958619587195881958919590195911959219593195941959519596195971959819599196001960119602196031960419605196061960719608196091961019611196121961319614196151961619617196181961919620196211962219623196241962519626196271962819629196301963119632196331963419635196361963719638196391964019641196421964319644196451964619647196481964919650196511965219653196541965519656196571965819659196601966119662196631966419665196661966719668196691967019671196721967319674196751967619677196781967919680196811968219683196841968519686196871968819689196901969119692196931969419695196961969719698196991970019701197021970319704197051970619707197081970919710197111971219713197141971519716197171971819719197201972119722197231972419725197261972719728197291973019731197321973319734197351973619737197381973919740197411974219743197441974519746197471974819749197501975119752197531975419755197561975719758197591976019761197621976319764197651976619767197681976919770197711977219773197741977519776197771977819779197801978119782197831978419785197861978719788197891979019791197921979319794197951979619797197981979919800198011980219803198041980519806198071980819809198101981119812198131981419815198161981719818198191982019821198221982319824198251982619827198281982919830198311983219833198341983519836198371983819839198401984119842198431984419845198461984719848198491985019851198521985319854198551985619857198581985919860198611986219863198641986519866198671986819869198701987119872198731987419875198761987719878198791988019881198821988319884198851988619887198881988919890198911989219893198941989519896198971989819899199001990119902199031990419905199061990719908199091991019911199121991319914199151991619917199181991919920199211992219923199241992519926199271992819929199301993119932199331993419935199361993719938199391994019941199421994319944199451994619947199481994919950199511995219953199541995519956199571995819959199601996119962199631996419965199661996719968199691997019971199721997319974199751997619977199781997919980199811998219983199841998519986199871998819989199901999119992199931999419995199961999719998199992000020001200022000320004200052000620007200082000920010200112001220013200142001520016200172001820019200202002120022200232002420025200262002720028200292003020031200322003320034200352003620037200382003920040200412004220043200442004520046200472004820049200502005120052200532005420055200562005720058200592006020061200622006320064200652006620067200682006920070200712007220073200742007520076200772007820079200802008120082200832008420085200862008720088200892009020091200922009320094200952009620097200982009920100201012010220103201042010520106201072010820109201102011120112201132011420115201162011720118201192012020121201222012320124201252012620127201282012920130201312013220133201342013520136201372013820139201402014120142201432014420145201462014720148201492015020151201522015320154201552015620157201582015920160201612016220163201642016520166201672016820169201702017120172201732017420175201762017720178201792018020181201822018320184201852018620187201882018920190201912019220193201942019520196201972019820199202002020120202202032020420205202062020720208202092021020211202122021320214202152021620217202182021920220202212022220223202242022520226202272022820229202302023120232202332023420235202362023720238202392024020241202422024320244202452024620247202482024920250202512025220253202542025520256202572025820259202602026120262202632026420265202662026720268202692027020271202722027320274202752027620277202782027920280202812028220283202842028520286202872028820289202902029120292202932029420295202962029720298202992030020301203022030320304203052030620307203082030920310203112031220313203142031520316203172031820319203202032120322203232032420325203262032720328203292033020331203322033320334203352033620337203382033920340203412034220343203442034520346203472034820349203502035120352203532035420355203562035720358203592036020361203622036320364203652036620367203682036920370203712037220373203742037520376203772037820379203802038120382203832038420385203862038720388203892039020391203922039320394203952039620397203982039920400204012040220403204042040520406204072040820409204102041120412204132041420415204162041720418204192042020421204222042320424204252042620427204282042920430204312043220433204342043520436204372043820439204402044120442204432044420445204462044720448204492045020451204522045320454204552045620457204582045920460204612046220463204642046520466204672046820469204702047120472204732047420475204762047720478204792048020481204822048320484204852048620487204882048920490204912049220493204942049520496204972049820499205002050120502205032050420505205062050720508205092051020511205122051320514205152051620517205182051920520205212052220523205242052520526205272052820529205302053120532205332053420535205362053720538205392054020541205422054320544205452054620547205482054920550205512055220553205542055520556205572055820559205602056120562205632056420565205662056720568205692057020571205722057320574205752057620577205782057920580205812058220583205842058520586205872058820589205902059120592205932059420595205962059720598205992060020601206022060320604206052060620607206082060920610206112061220613206142061520616206172061820619206202062120622206232062420625206262062720628206292063020631206322063320634206352063620637206382063920640206412064220643206442064520646206472064820649206502065120652206532065420655206562065720658206592066020661206622066320664206652066620667206682066920670206712067220673206742067520676206772067820679206802068120682206832068420685206862068720688206892069020691206922069320694206952069620697206982069920700207012070220703207042070520706207072070820709207102071120712207132071420715207162071720718207192072020721207222072320724207252072620727207282072920730207312073220733207342073520736207372073820739207402074120742207432074420745207462074720748207492075020751207522075320754207552075620757207582075920760207612076220763207642076520766207672076820769207702077120772207732077420775207762077720778207792078020781207822078320784207852078620787207882078920790207912079220793207942079520796207972079820799208002080120802208032080420805208062080720808208092081020811208122081320814208152081620817208182081920820208212082220823208242082520826208272082820829208302083120832208332083420835208362083720838208392084020841208422084320844208452084620847208482084920850208512085220853208542085520856208572085820859208602086120862208632086420865208662086720868208692087020871208722087320874208752087620877208782087920880208812088220883208842088520886208872088820889208902089120892208932089420895208962089720898208992090020901209022090320904209052090620907209082090920910209112091220913209142091520916209172091820919209202092120922209232092420925209262092720928209292093020931209322093320934209352093620937209382093920940209412094220943209442094520946209472094820949209502095120952209532095420955209562095720958209592096020961209622096320964209652096620967209682096920970209712097220973209742097520976209772097820979209802098120982209832098420985209862098720988209892099020991209922099320994209952099620997209982099921000210012100221003210042100521006210072100821009210102101121012210132101421015210162101721018210192102021021210222102321024210252102621027210282102921030210312103221033210342103521036210372103821039210402104121042210432104421045210462104721048210492105021051210522105321054210552105621057210582105921060210612106221063210642106521066210672106821069210702107121072210732107421075210762107721078210792108021081210822108321084210852108621087210882108921090210912109221093210942109521096210972109821099211002110121102211032110421105211062110721108211092111021111211122111321114211152111621117211182111921120211212112221123211242112521126211272112821129211302113121132211332113421135211362113721138211392114021141211422114321144211452114621147211482114921150211512115221153211542115521156211572115821159211602116121162211632116421165211662116721168211692117021171211722117321174211752117621177211782117921180211812118221183211842118521186211872118821189211902119121192211932119421195211962119721198211992120021201212022120321204212052120621207212082120921210212112121221213212142121521216212172121821219212202122121222212232122421225212262122721228212292123021231212322123321234212352123621237212382123921240212412124221243212442124521246212472124821249212502125121252212532125421255212562125721258212592126021261212622126321264212652126621267212682126921270212712127221273212742127521276212772127821279212802128121282212832128421285212862128721288212892129021291212922129321294212952129621297212982129921300213012130221303213042130521306213072130821309213102131121312213132131421315213162131721318213192132021321213222132321324213252132621327213282132921330213312133221333213342133521336213372133821339213402134121342213432134421345213462134721348213492135021351213522135321354213552135621357213582135921360213612136221363213642136521366213672136821369213702137121372213732137421375213762137721378213792138021381213822138321384213852138621387213882138921390213912139221393213942139521396213972139821399214002140121402214032140421405214062140721408214092141021411214122141321414214152141621417214182141921420214212142221423214242142521426214272142821429214302143121432214332143421435214362143721438214392144021441214422144321444214452144621447214482144921450214512145221453214542145521456214572145821459214602146121462214632146421465214662146721468214692147021471214722147321474214752147621477214782147921480214812148221483214842148521486214872148821489214902149121492214932149421495214962149721498214992150021501215022150321504215052150621507215082150921510215112151221513215142151521516215172151821519215202152121522215232152421525215262152721528215292153021531215322153321534215352153621537215382153921540215412154221543215442154521546215472154821549215502155121552215532155421555215562155721558215592156021561215622156321564215652156621567215682156921570215712157221573215742157521576215772157821579215802158121582215832158421585215862158721588215892159021591215922159321594215952159621597215982159921600216012160221603216042160521606216072160821609216102161121612216132161421615216162161721618216192162021621216222162321624216252162621627216282162921630216312163221633216342163521636216372163821639216402164121642216432164421645216462164721648216492165021651216522165321654216552165621657216582165921660216612166221663216642166521666216672166821669216702167121672216732167421675216762167721678216792168021681216822168321684216852168621687216882168921690216912169221693216942169521696216972169821699217002170121702217032170421705217062170721708217092171021711217122171321714217152171621717217182171921720217212172221723217242172521726217272172821729217302173121732217332173421735217362173721738217392174021741217422174321744217452174621747217482174921750217512175221753217542175521756217572175821759217602176121762217632176421765217662176721768217692177021771217722177321774217752177621777217782177921780217812178221783217842178521786217872178821789217902179121792217932179421795217962179721798217992180021801218022180321804218052180621807218082180921810218112181221813218142181521816218172181821819218202182121822218232182421825218262182721828218292183021831218322183321834218352183621837218382183921840218412184221843218442184521846218472184821849218502185121852218532185421855218562185721858218592186021861218622186321864218652186621867218682186921870218712187221873218742187521876218772187821879218802188121882218832188421885218862188721888218892189021891218922189321894218952189621897218982189921900219012190221903219042190521906219072190821909219102191121912219132191421915219162191721918219192192021921219222192321924219252192621927219282192921930219312193221933219342193521936219372193821939219402194121942219432194421945219462194721948219492195021951219522195321954219552195621957219582195921960219612196221963219642196521966219672196821969219702197121972219732197421975219762197721978219792198021981219822198321984219852198621987219882198921990219912199221993219942199521996219972199821999220002200122002220032200422005220062200722008220092201022011220122201322014220152201622017220182201922020220212202222023220242202522026220272202822029220302203122032220332203422035220362203722038220392204022041220422204322044220452204622047220482204922050220512205222053220542205522056220572205822059220602206122062220632206422065220662206722068220692207022071220722207322074220752207622077220782207922080220812208222083220842208522086220872208822089220902209122092220932209422095220962209722098220992210022101221022210322104221052210622107221082210922110221112211222113221142211522116221172211822119221202212122122221232212422125221262212722128221292213022131221322213322134221352213622137221382213922140221412214222143221442214522146221472214822149221502215122152221532215422155221562215722158221592216022161221622216322164221652216622167221682216922170221712217222173221742217522176221772217822179221802218122182221832218422185221862218722188221892219022191221922219322194221952219622197221982219922200222012220222203222042220522206222072220822209222102221122212222132221422215222162221722218222192222022221222222222322224222252222622227222282222922230222312223222233222342223522236222372223822239222402224122242222432224422245222462224722248222492225022251222522225322254222552225622257222582225922260222612226222263222642226522266222672226822269222702227122272222732227422275222762227722278222792228022281222822228322284222852228622287222882228922290222912229222293222942229522296222972229822299223002230122302223032230422305223062230722308223092231022311223122231322314223152231622317223182231922320223212232222323223242232522326223272232822329223302233122332223332233422335223362233722338223392234022341223422234322344223452234622347223482234922350223512235222353223542235522356223572235822359223602236122362223632236422365223662236722368223692237022371223722237322374223752237622377223782237922380223812238222383223842238522386223872238822389223902239122392223932239422395223962239722398223992240022401224022240322404224052240622407224082240922410224112241222413224142241522416224172241822419224202242122422224232242422425224262242722428224292243022431224322243322434224352243622437224382243922440224412244222443224442244522446224472244822449224502245122452224532245422455224562245722458224592246022461224622246322464224652246622467224682246922470224712247222473224742247522476224772247822479224802248122482224832248422485224862248722488224892249022491224922249322494224952249622497224982249922500225012250222503225042250522506225072250822509225102251122512225132251422515225162251722518225192252022521225222252322524225252252622527225282252922530225312253222533225342253522536225372253822539225402254122542225432254422545225462254722548225492255022551225522255322554225552255622557225582255922560225612256222563225642256522566225672256822569225702257122572225732257422575225762257722578225792258022581225822258322584225852258622587225882258922590225912259222593225942259522596225972259822599226002260122602226032260422605226062260722608226092261022611226122261322614226152261622617226182261922620226212262222623226242262522626226272262822629226302263122632226332263422635226362263722638226392264022641226422264322644226452264622647226482264922650226512265222653226542265522656226572265822659226602266122662226632266422665226662266722668226692267022671226722267322674226752267622677226782267922680226812268222683226842268522686226872268822689226902269122692226932269422695226962269722698226992270022701227022270322704227052270622707227082270922710227112271222713227142271522716227172271822719227202272122722227232272422725227262272722728227292273022731227322273322734227352273622737227382273922740227412274222743227442274522746227472274822749227502275122752227532275422755227562275722758227592276022761227622276322764227652276622767227682276922770227712277222773227742277522776227772277822779227802278122782227832278422785227862278722788227892279022791227922279322794227952279622797227982279922800228012280222803228042280522806228072280822809228102281122812228132281422815228162281722818228192282022821228222282322824228252282622827228282282922830228312283222833228342283522836228372283822839228402284122842228432284422845228462284722848228492285022851228522285322854228552285622857228582285922860228612286222863228642286522866228672286822869228702287122872228732287422875228762287722878228792288022881228822288322884228852288622887228882288922890228912289222893228942289522896228972289822899229002290122902229032290422905229062290722908229092291022911229122291322914229152291622917229182291922920229212292222923229242292522926229272292822929229302293122932229332293422935229362293722938229392294022941229422294322944229452294622947229482294922950229512295222953229542295522956229572295822959229602296122962229632296422965229662296722968229692297022971229722297322974229752297622977229782297922980229812298222983229842298522986229872298822989229902299122992229932299422995229962299722998229992300023001230022300323004230052300623007230082300923010230112301223013230142301523016230172301823019230202302123022230232302423025230262302723028230292303023031230322303323034230352303623037230382303923040230412304223043230442304523046230472304823049230502305123052230532305423055230562305723058230592306023061230622306323064230652306623067230682306923070230712307223073230742307523076230772307823079230802308123082230832308423085230862308723088230892309023091230922309323094230952309623097230982309923100231012310223103231042310523106231072310823109231102311123112231132311423115231162311723118231192312023121231222312323124231252312623127231282312923130231312313223133231342313523136231372313823139231402314123142231432314423145231462314723148231492315023151231522315323154231552315623157231582315923160231612316223163231642316523166231672316823169231702317123172231732317423175231762317723178231792318023181231822318323184231852318623187231882318923190231912319223193231942319523196231972319823199232002320123202232032320423205232062320723208232092321023211232122321323214232152321623217232182321923220232212322223223232242322523226232272322823229232302323123232232332323423235232362323723238232392324023241232422324323244232452324623247232482324923250232512325223253232542325523256232572325823259232602326123262232632326423265232662326723268232692327023271232722327323274232752327623277232782327923280232812328223283232842328523286232872328823289232902329123292232932329423295232962329723298232992330023301233022330323304233052330623307233082330923310233112331223313233142331523316233172331823319233202332123322233232332423325233262332723328233292333023331233322333323334233352333623337233382333923340233412334223343233442334523346233472334823349233502335123352233532335423355233562335723358233592336023361233622336323364233652336623367233682336923370233712337223373233742337523376233772337823379233802338123382233832338423385233862338723388233892339023391233922339323394233952339623397233982339923400234012340223403234042340523406234072340823409234102341123412234132341423415234162341723418234192342023421234222342323424234252342623427234282342923430234312343223433234342343523436234372343823439234402344123442234432344423445234462344723448234492345023451234522345323454234552345623457234582345923460234612346223463234642346523466234672346823469234702347123472234732347423475234762347723478234792348023481234822348323484234852348623487234882348923490234912349223493234942349523496234972349823499235002350123502235032350423505235062350723508235092351023511235122351323514235152351623517235182351923520235212352223523235242352523526235272352823529235302353123532235332353423535235362353723538235392354023541235422354323544235452354623547235482354923550235512355223553235542355523556235572355823559235602356123562235632356423565235662356723568235692357023571235722357323574235752357623577235782357923580235812358223583235842358523586235872358823589235902359123592235932359423595235962359723598235992360023601236022360323604236052360623607236082360923610236112361223613236142361523616236172361823619236202362123622236232362423625236262362723628236292363023631236322363323634236352363623637236382363923640236412364223643236442364523646236472364823649236502365123652236532365423655236562365723658236592366023661236622366323664236652366623667236682366923670236712367223673236742367523676236772367823679236802368123682236832368423685236862368723688236892369023691236922369323694236952369623697236982369923700237012370223703237042370523706237072370823709237102371123712237132371423715237162371723718237192372023721237222372323724237252372623727237282372923730237312373223733237342373523736237372373823739237402374123742237432374423745237462374723748237492375023751237522375323754237552375623757237582375923760237612376223763237642376523766237672376823769237702377123772237732377423775237762377723778237792378023781237822378323784237852378623787237882378923790237912379223793237942379523796237972379823799238002380123802238032380423805238062380723808238092381023811238122381323814238152381623817238182381923820238212382223823238242382523826238272382823829238302383123832238332383423835238362383723838238392384023841238422384323844238452384623847238482384923850238512385223853238542385523856238572385823859238602386123862238632386423865238662386723868238692387023871238722387323874238752387623877238782387923880238812388223883238842388523886238872388823889238902389123892238932389423895238962389723898238992390023901239022390323904239052390623907239082390923910239112391223913239142391523916239172391823919239202392123922239232392423925239262392723928239292393023931239322393323934239352393623937239382393923940239412394223943239442394523946239472394823949239502395123952239532395423955239562395723958239592396023961239622396323964239652396623967239682396923970239712397223973239742397523976239772397823979239802398123982239832398423985239862398723988239892399023991239922399323994239952399623997239982399924000240012400224003240042400524006240072400824009240102401124012240132401424015240162401724018240192402024021240222402324024240252402624027240282402924030240312403224033240342403524036240372403824039240402404124042240432404424045240462404724048240492405024051240522405324054240552405624057240582405924060240612406224063240642406524066240672406824069240702407124072240732407424075240762407724078240792408024081240822408324084240852408624087240882408924090240912409224093240942409524096240972409824099241002410124102241032410424105241062410724108241092411024111241122411324114241152411624117241182411924120241212412224123241242412524126241272412824129241302413124132241332413424135241362413724138241392414024141241422414324144241452414624147241482414924150241512415224153241542415524156241572415824159241602416124162241632416424165241662416724168241692417024171241722417324174241752417624177241782417924180241812418224183241842418524186241872418824189241902419124192241932419424195241962419724198241992420024201242022420324204242052420624207242082420924210242112421224213242142421524216242172421824219242202422124222242232422424225242262422724228242292423024231242322423324234242352423624237242382423924240242412424224243242442424524246242472424824249242502425124252242532425424255242562425724258242592426024261242622426324264242652426624267242682426924270242712427224273242742427524276242772427824279242802428124282242832428424285242862428724288242892429024291242922429324294242952429624297242982429924300243012430224303243042430524306243072430824309243102431124312243132431424315243162431724318243192432024321243222432324324243252432624327243282432924330243312433224333243342433524336243372433824339243402434124342243432434424345243462434724348243492435024351243522435324354243552435624357243582435924360243612436224363243642436524366243672436824369243702437124372243732437424375243762437724378243792438024381243822438324384243852438624387243882438924390243912439224393243942439524396243972439824399244002440124402244032440424405244062440724408244092441024411244122441324414244152441624417244182441924420244212442224423244242442524426244272442824429244302443124432244332443424435244362443724438244392444024441244422444324444244452444624447244482444924450244512445224453244542445524456244572445824459244602446124462244632446424465244662446724468244692447024471244722447324474244752447624477244782447924480244812448224483244842448524486244872448824489244902449124492244932449424495244962449724498244992450024501245022450324504245052450624507245082450924510245112451224513245142451524516245172451824519245202452124522245232452424525245262452724528245292453024531245322453324534245352453624537245382453924540245412454224543245442454524546245472454824549245502455124552245532455424555245562455724558245592456024561245622456324564245652456624567245682456924570245712457224573245742457524576245772457824579245802458124582245832458424585245862458724588245892459024591245922459324594245952459624597245982459924600246012460224603246042460524606246072460824609246102461124612246132461424615246162461724618246192462024621246222462324624246252462624627246282462924630246312463224633246342463524636246372463824639246402464124642246432464424645246462464724648246492465024651246522465324654246552465624657246582465924660246612466224663246642466524666246672466824669246702467124672246732467424675246762467724678246792468024681246822468324684246852468624687246882468924690246912469224693246942469524696246972469824699247002470124702247032470424705247062470724708247092471024711247122471324714247152471624717247182471924720247212472224723247242472524726247272472824729247302473124732247332473424735247362473724738247392474024741247422474324744247452474624747247482474924750247512475224753247542475524756247572475824759247602476124762247632476424765247662476724768247692477024771247722477324774247752477624777247782477924780247812478224783247842478524786247872478824789247902479124792247932479424795247962479724798247992480024801248022480324804248052480624807248082480924810248112481224813248142481524816248172481824819248202482124822248232482424825248262482724828248292483024831248322483324834248352483624837248382483924840248412484224843248442484524846248472484824849248502485124852248532485424855248562485724858248592486024861248622486324864248652486624867248682486924870248712487224873248742487524876248772487824879248802488124882248832488424885248862488724888248892489024891248922489324894248952489624897248982489924900249012490224903249042490524906249072490824909249102491124912249132491424915249162491724918249192492024921249222492324924249252492624927249282492924930249312493224933249342493524936249372493824939249402494124942249432494424945249462494724948249492495024951249522495324954249552495624957249582495924960249612496224963249642496524966249672496824969249702497124972249732497424975249762497724978249792498024981249822498324984249852498624987249882498924990249912499224993249942499524996249972499824999250002500125002250032500425005250062500725008250092501025011250122501325014250152501625017250182501925020250212502225023250242502525026250272502825029250302503125032250332503425035250362503725038250392504025041250422504325044250452504625047250482504925050250512505225053250542505525056250572505825059250602506125062250632506425065250662506725068250692507025071250722507325074250752507625077250782507925080250812508225083250842508525086250872508825089250902509125092250932509425095250962509725098250992510025101251022510325104251052510625107251082510925110251112511225113251142511525116251172511825119251202512125122251232512425125251262512725128251292513025131251322513325134251352513625137251382513925140251412514225143251442514525146251472514825149251502515125152251532515425155251562515725158251592516025161251622516325164251652516625167251682516925170251712517225173251742517525176251772517825179251802518125182251832518425185251862518725188251892519025191251922519325194251952519625197251982519925200252012520225203252042520525206252072520825209252102521125212252132521425215252162521725218252192522025221252222522325224252252522625227252282522925230252312523225233252342523525236252372523825239252402524125242252432524425245252462524725248252492525025251252522525325254252552525625257252582525925260252612526225263252642526525266252672526825269252702527125272252732527425275252762527725278252792528025281252822528325284252852528625287252882528925290252912529225293252942529525296252972529825299253002530125302253032530425305253062530725308253092531025311253122531325314253152531625317253182531925320253212532225323253242532525326253272532825329253302533125332253332533425335253362533725338253392534025341253422534325344253452534625347253482534925350253512535225353253542535525356253572535825359253602536125362253632536425365253662536725368253692537025371253722537325374253752537625377253782537925380253812538225383253842538525386253872538825389253902539125392253932539425395253962539725398253992540025401254022540325404254052540625407254082540925410254112541225413254142541525416254172541825419254202542125422254232542425425254262542725428254292543025431254322543325434254352543625437254382543925440254412544225443254442544525446254472544825449254502545125452254532545425455254562545725458254592546025461254622546325464254652546625467254682546925470254712547225473254742547525476254772547825479254802548125482254832548425485254862548725488254892549025491254922549325494254952549625497254982549925500255012550225503255042550525506255072550825509255102551125512255132551425515255162551725518255192552025521255222552325524255252552625527255282552925530255312553225533255342553525536255372553825539255402554125542255432554425545255462554725548255492555025551255522555325554255552555625557255582555925560255612556225563255642556525566255672556825569255702557125572255732557425575255762557725578255792558025581255822558325584255852558625587255882558925590255912559225593255942559525596255972559825599256002560125602256032560425605256062560725608256092561025611256122561325614256152561625617256182561925620256212562225623256242562525626256272562825629256302563125632256332563425635256362563725638256392564025641256422564325644256452564625647256482564925650256512565225653256542565525656256572565825659256602566125662256632566425665256662566725668256692567025671
  1. # @generated from torch/_C/_VariableFunctions.pyi.in
  2. # mypy: disable-error-code="type-arg"
  3. # mypy: allow-untyped-defs
  4. import builtins
  5. from typing import (
  6. Any,
  7. Callable,
  8. ContextManager,
  9. Iterator,
  10. List,
  11. Literal,
  12. NamedTuple,
  13. Optional,
  14. overload,
  15. Sequence,
  16. Tuple,
  17. TypeVar,
  18. Union,
  19. )
  20. import torch
  21. from torch import contiguous_format, Generator, inf, memory_format, strided, SymInt, Tensor
  22. from torch.types import (
  23. _bool,
  24. _complex,
  25. _device,
  26. _dtype,
  27. _float,
  28. _int,
  29. _layout,
  30. _qscheme,
  31. _size,
  32. Device,
  33. Number,
  34. )
  35. from torch._prims_common import DeviceLikeType
  36. @overload
  37. def __and__(input: Tensor, other: Tensor) -> Tensor: ...
  38. @overload
  39. def __and__(input: Tensor, other: Union[Number, _complex]) -> Tensor: ...
  40. @overload
  41. def __lshift__(input: Tensor, other: Tensor) -> Tensor: ...
  42. @overload
  43. def __lshift__(input: Tensor, other: Union[Number, _complex]) -> Tensor: ...
  44. @overload
  45. def __or__(input: Tensor, other: Tensor) -> Tensor: ...
  46. @overload
  47. def __or__(input: Tensor, other: Union[Number, _complex]) -> Tensor: ...
  48. @overload
  49. def __rshift__(input: Tensor, other: Tensor) -> Tensor: ...
  50. @overload
  51. def __rshift__(input: Tensor, other: Union[Number, _complex]) -> Tensor: ...
  52. @overload
  53. def __xor__(input: Tensor, other: Tensor) -> Tensor: ...
  54. @overload
  55. def __xor__(input: Tensor, other: Union[Number, _complex]) -> Tensor: ...
  56. def _adaptive_avg_pool2d(input: Tensor, output_size: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]]) -> Tensor: ...
  57. def _adaptive_avg_pool3d(input: Tensor, output_size: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]]) -> Tensor: ...
  58. def _add_batch_dim(input: Tensor, batch_dim: _int, level: _int) -> Tensor: ...
  59. @overload
  60. def _add_relu(input: Tensor, other: Tensor, *, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: ...
  61. @overload
  62. def _add_relu(input: Tensor, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor: ...
  63. @overload
  64. def _add_relu_(input: Tensor, other: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor: ...
  65. @overload
  66. def _add_relu_(input: Tensor, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor: ...
  67. def _addmm_activation(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, use_gelu: _bool = False, out: Optional[Tensor] = None) -> Tensor: ...
  68. @overload
  69. def _aminmax(input: Tensor) -> Tuple[Tensor, Tensor]: ...
  70. @overload
  71. def _aminmax(input: Tensor, dim: _int, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: ...
  72. def _amp_foreach_non_finite_check_and_unscale_(self: Union[Tuple[Tensor, ...], List[Tensor]], found_inf: Tensor, inv_scale: Tensor) -> None: ...
  73. def _amp_update_scale_(input: Tensor, growth_tracker: Tensor, found_inf: Tensor, scale_growth_factor: _float, scale_backoff_factor: _float, growth_interval: _int) -> Tensor: ...
  74. @overload
  75. def _assert_async(input: Tensor) -> None:
  76. r"""
  77. _assert_async(tensor) -> void
  78. Asynchronously assert that the contents of tensor are nonzero. For CPU tensors,
  79. this is equivalent to ``assert tensor`` or ``assert tensor.is_nonzero()``; for
  80. CUDA tensors, we DO NOT synchronize and you may only find out the assertion
  81. failed at a later CUDA kernel launch. Asynchronous assertion can be helpful for
  82. testing invariants in CUDA tensors without giving up performance. This function
  83. is NOT intended to be used for regular error checking, as it will trash your CUDA
  84. context if the assert fails (forcing you to restart your PyTorch process.)
  85. Args:
  86. tensor (Tensor): a one element tensor to test to see if it is nonzero. Zero
  87. elements (including False for boolean tensors) cause an assertion failure
  88. to be raised.
  89. """
  90. ...
  91. @overload
  92. def _assert_async(input: Tensor, assert_msg: str) -> None:
  93. r"""
  94. _assert_async(tensor) -> void
  95. Asynchronously assert that the contents of tensor are nonzero. For CPU tensors,
  96. this is equivalent to ``assert tensor`` or ``assert tensor.is_nonzero()``; for
  97. CUDA tensors, we DO NOT synchronize and you may only find out the assertion
  98. failed at a later CUDA kernel launch. Asynchronous assertion can be helpful for
  99. testing invariants in CUDA tensors without giving up performance. This function
  100. is NOT intended to be used for regular error checking, as it will trash your CUDA
  101. context if the assert fails (forcing you to restart your PyTorch process.)
  102. Args:
  103. tensor (Tensor): a one element tensor to test to see if it is nonzero. Zero
  104. elements (including False for boolean tensors) cause an assertion failure
  105. to be raised.
  106. """
  107. ...
  108. def _assert_scalar(self: Union[Number, _complex], assert_msg: str) -> None: ...
  109. def _assert_tensor_metadata(a: Tensor, size: Optional[Sequence[Union[_int, SymInt]]] = None, stride: Optional[Sequence[Union[_int, SymInt]]] = None, dtype: Optional[_dtype] = None) -> None: ...
  110. def _batch_norm_impl_index(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, momentum: _float, eps: _float, cudnn_enabled: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor, _int]: ...
  111. def _cast_Byte(input: Tensor, non_blocking: _bool = False) -> Tensor: ...
  112. def _cast_Char(input: Tensor, non_blocking: _bool = False) -> Tensor: ...
  113. def _cast_Double(input: Tensor, non_blocking: _bool = False) -> Tensor: ...
  114. def _cast_Float(input: Tensor, non_blocking: _bool = False) -> Tensor: ...
  115. def _cast_Half(input: Tensor, non_blocking: _bool = False) -> Tensor: ...
  116. def _cast_Int(input: Tensor, non_blocking: _bool = False) -> Tensor: ...
  117. def _cast_Long(input: Tensor, non_blocking: _bool = False) -> Tensor: ...
  118. def _cast_Short(input: Tensor, non_blocking: _bool = False) -> Tensor: ...
  119. def _choose_qparams_per_tensor(input: Tensor, reduce_range: _bool = False) -> Tuple[_float, _int]: ...
  120. def _chunk_cat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int, num_chunks: _int, *, out: Optional[Tensor] = None) -> Tensor: ...
  121. def _coalesce(input: Tensor) -> Tensor: ...
  122. def _compute_linear_combination(input: Tensor, coefficients: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
  123. def _conj(input: Tensor) -> Tensor: ...
  124. def _conj_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
  125. def _conj_physical(input: Tensor) -> Tensor: ...
  126. def _convert_indices_from_coo_to_csr(input: Tensor, size: _int, *, out_int32: _bool = False, out: Optional[Tensor] = None) -> Tensor: ...
  127. def _convert_indices_from_csr_to_coo(crow_indices: Tensor, col_indices: Tensor, *, out_int32: _bool = False, transpose: _bool = False, out: Optional[Tensor] = None) -> Tensor: ...
  128. def _convert_weight_to_int4pack(input: Tensor, innerKTiles: _int) -> Tensor: ...
  129. @overload
  130. def _convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], transposed: _bool, output_padding: _size, groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool, cudnn_enabled: _bool) -> Tensor: ...
  131. @overload
  132. def _convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], transposed: _bool, output_padding: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool, cudnn_enabled: _bool, allow_tf32: _bool) -> Tensor: ...
  133. def _convolution_mode(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: str, dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
  134. def _copy_from(input: Tensor, dst: Tensor, non_blocking: _bool = False) -> Tensor: ...
  135. def _copy_from_and_resize(input: Tensor, dst: Tensor) -> Tensor: ...
  136. def _cslt_compress(input: Tensor) -> Tensor: ...
  137. def _cslt_sparse_mm(compressed_A: Tensor, dense_B: Tensor, bias: Optional[Tensor] = None, alpha: Optional[Tensor] = None, out_dtype: Optional[_dtype] = None, transpose_result: _bool = False, alg_id: _int = 0) -> Tensor: ...
  138. def _cslt_sparse_mm_search(compressed_A: Tensor, dense_B: Tensor, bias: Optional[Tensor] = None, alpha: Optional[Tensor] = None, out_dtype: Optional[_dtype] = None, transpose_result: _bool = False) -> _int: ...
  139. @overload
  140. def _ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int = 0, zero_infinity: _bool = False) -> Tuple[Tensor, Tensor]: ...
  141. @overload
  142. def _ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int = 0, zero_infinity: _bool = False) -> Tuple[Tensor, Tensor]: ...
  143. @overload
  144. def _cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int, deterministic: _bool, zero_infinity: _bool) -> Tuple[Tensor, Tensor]: ...
  145. @overload
  146. def _cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int, deterministic: _bool, zero_infinity: _bool) -> Tuple[Tensor, Tensor]: ...
  147. def _cudnn_init_dropout_state(dropout: _float, train: _bool, dropout_seed: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
  148. def _cudnn_rnn(input: Tensor, weight: Union[Tuple[Tensor, ...], List[Tensor]], weight_stride0: _int, weight_buf: Optional[Tensor], hx: Tensor, cx: Optional[Tensor], mode: _int, hidden_size: Union[_int, SymInt], proj_size: Union[_int, SymInt], num_layers: _int, batch_first: _bool, dropout: _float, train: _bool, bidirectional: _bool, batch_sizes: Sequence[Union[_int, SymInt]], dropout_state: Optional[Tensor]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: ...
  149. def _cudnn_rnn_flatten_weight(weight_arr: Union[Tuple[Tensor, ...], List[Tensor]], weight_stride0: _int, input_size: Union[_int, SymInt], mode: _int, hidden_size: Union[_int, SymInt], proj_size: Union[_int, SymInt], num_layers: _int, batch_first: _bool, bidirectional: _bool) -> Tensor: ...
  150. def _cufft_clear_plan_cache(device_index: _int) -> None: ...
  151. def _cufft_get_plan_cache_max_size(device_index: _int) -> _int: ...
  152. def _cufft_get_plan_cache_size(device_index: _int) -> _int: ...
  153. def _cufft_set_plan_cache_max_size(device_index: _int, max_size: _int) -> None: ...
  154. def _cummax_helper(input: Tensor, values: Tensor, indices: Tensor, dim: _int) -> None: ...
  155. def _cummin_helper(input: Tensor, values: Tensor, indices: Tensor, dim: _int) -> None: ...
  156. def _debug_has_internal_overlap(input: Tensor) -> _int: ...
  157. def _dim_arange(like: Tensor, dim: _int) -> Tensor: ...
  158. def _dirichlet_grad(x: Tensor, alpha: Tensor, total: Tensor) -> Tensor: ...
  159. def _disable_functionalization(): ...
  160. @overload
  161. def _efficientzerotensor(size: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
  162. @overload
  163. def _efficientzerotensor(*size: _int, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
  164. def _embedding_bag(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool = False, mode: _int = 0, sparse: _bool = False, per_sample_weights: Optional[Tensor] = None, include_last_offset: _bool = False, padding_idx: _int = -1) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
  165. def _embedding_bag_forward_only(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool = False, mode: _int = 0, sparse: _bool = False, per_sample_weights: Optional[Tensor] = None, include_last_offset: _bool = False, padding_idx: _int = -1) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
  166. @overload
  167. def _empty_affine_quantized(size: Sequence[Union[_int, SymInt]], *, scale: _float = 1, zero_point: _int = 0, memory_format: Optional[memory_format] = contiguous_format, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
  168. @overload
  169. def _empty_affine_quantized(*size: _int, scale: _float = 1, zero_point: _int = 0, memory_format: Optional[memory_format] = contiguous_format, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
  170. @overload
  171. def _empty_per_channel_affine_quantized(size: Sequence[Union[_int, SymInt]], *, scales: Tensor, zero_points: Tensor, axis: _int, memory_format: Optional[memory_format] = contiguous_format, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
  172. @overload
  173. def _empty_per_channel_affine_quantized(*size: _int, scales: Tensor, zero_points: Tensor, axis: _int, memory_format: Optional[memory_format] = contiguous_format, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
  174. def _enable_functionalization(*, reapply_views: _bool = False): ...
  175. def _euclidean_dist(x1: Tensor, x2: Tensor) -> Tensor: ...
  176. def _fake_quantize_learnable_per_channel_affine(input: Tensor, scale: Tensor, zero_point: Tensor, axis: _int, quant_min: _int, quant_max: _int, grad_factor: _float = 1.0) -> Tensor: ...
  177. def _fake_quantize_learnable_per_tensor_affine(input: Tensor, scale: Tensor, zero_point: Tensor, quant_min: _int, quant_max: _int, grad_factor: _float = 1.0) -> Tensor: ...
  178. def _fake_quantize_per_tensor_affine_cachemask_tensor_qparams(input: Tensor, scale: Tensor, zero_point: Tensor, fake_quant_enabled: Tensor, quant_min: _int, quant_max: _int) -> torch.return_types._fake_quantize_per_tensor_affine_cachemask_tensor_qparams: ...
  179. def _fft_c2c(input: Tensor, dim: Sequence[Union[_int, SymInt]], normalization: _int, forward: _bool, *, out: Optional[Tensor] = None) -> Tensor: ...
  180. def _fft_c2r(input: Tensor, dim: _size, normalization: _int, last_dim_size: Union[_int, SymInt], *, out: Optional[Tensor] = None) -> Tensor: ...
  181. def _fft_r2c(input: Tensor, dim: _size, normalization: _int, onesided: _bool, *, out: Optional[Tensor] = None) -> Tensor: ...
  182. def _fill_mem_eff_dropout_mask_(input: Tensor, dropout_p: _float, seed: _int, offset: _int) -> Tensor: ...
  183. def _foobar(input: Tensor, arg1: _bool = True, arg2: _bool = True, *, arg3: _bool = True) -> Tensor: ...
  184. def _foreach_abs(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
  185. r"""
  186. _foreach_abs(self: List[Tensor]) -> List[Tensor]
  187. Apply :func:`torch.abs` to each Tensor of the input list.
  188. """
  189. ...
  190. def _foreach_abs_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
  191. r"""
  192. _foreach_abs_(self: List[Tensor]) -> None
  193. Apply :func:`torch.abs` to each Tensor of the input list.
  194. """
  195. ...
  196. def _foreach_acos(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
  197. r"""
  198. _foreach_acos(self: List[Tensor]) -> List[Tensor]
  199. Apply :func:`torch.acos` to each Tensor of the input list.
  200. """
  201. ...
  202. def _foreach_acos_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
  203. r"""
  204. _foreach_acos_(self: List[Tensor]) -> None
  205. Apply :func:`torch.acos` to each Tensor of the input list.
  206. """
  207. ...
  208. @overload
  209. def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ...
  210. @overload
  211. def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Union[Number, _complex] = 1) -> Tuple[Tensor, ...]: ...
  212. @overload
  213. def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tuple[Tensor, ...]: ...
  214. @overload
  215. def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ...
  216. @overload
  217. def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
  218. @overload
  219. def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Union[Number, _complex] = 1) -> None: ...
  220. @overload
  221. def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor, *, alpha: Union[Number, _complex] = 1) -> None: ...
  222. @overload
  223. def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ...
  224. @overload
  225. def _foreach_addcdiv(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ...
  226. @overload
  227. def _foreach_addcdiv(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> Tuple[Tensor, ...]: ...
  228. @overload
  229. def _foreach_addcdiv(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Union[Number, _complex] = 1) -> Tuple[Tensor, ...]: ...
  230. @overload
  231. def _foreach_addcdiv_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
  232. @overload
  233. def _foreach_addcdiv_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> None: ...
  234. @overload
  235. def _foreach_addcdiv_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Union[Number, _complex] = 1) -> None: ...
  236. @overload
  237. def _foreach_addcmul(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ...
  238. @overload
  239. def _foreach_addcmul(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> Tuple[Tensor, ...]: ...
  240. @overload
  241. def _foreach_addcmul(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Union[Number, _complex] = 1) -> Tuple[Tensor, ...]: ...
  242. @overload
  243. def _foreach_addcmul_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
  244. @overload
  245. def _foreach_addcmul_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> None: ...
  246. @overload
  247. def _foreach_addcmul_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Union[Number, _complex] = 1) -> None: ...
  248. def _foreach_asin(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
  249. r"""
  250. _foreach_asin(self: List[Tensor]) -> List[Tensor]
  251. Apply :func:`torch.asin` to each Tensor of the input list.
  252. """
  253. ...
  254. def _foreach_asin_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
  255. r"""
  256. _foreach_asin_(self: List[Tensor]) -> None
  257. Apply :func:`torch.asin` to each Tensor of the input list.
  258. """
  259. ...
  260. def _foreach_atan(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
  261. r"""
  262. _foreach_atan(self: List[Tensor]) -> List[Tensor]
  263. Apply :func:`torch.atan` to each Tensor of the input list.
  264. """
  265. ...
  266. def _foreach_atan_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
  267. r"""
  268. _foreach_atan_(self: List[Tensor]) -> None
  269. Apply :func:`torch.atan` to each Tensor of the input list.
  270. """
  271. ...
  272. def _foreach_ceil(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
  273. r"""
  274. _foreach_ceil(self: List[Tensor]) -> List[Tensor]
  275. Apply :func:`torch.ceil` to each Tensor of the input list.
  276. """
  277. ...
  278. def _foreach_ceil_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
  279. r"""
  280. _foreach_ceil_(self: List[Tensor]) -> None
  281. Apply :func:`torch.ceil` to each Tensor of the input list.
  282. """
  283. ...
  284. @overload
  285. def _foreach_clamp_max(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ...
  286. @overload
  287. def _foreach_clamp_max(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ...
  288. @overload
  289. def _foreach_clamp_max(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ...
  290. @overload
  291. def _foreach_clamp_max_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
  292. @overload
  293. def _foreach_clamp_max_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ...
  294. @overload
  295. def _foreach_clamp_max_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  296. @overload
  297. def _foreach_clamp_min(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ...
  298. @overload
  299. def _foreach_clamp_min(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ...
  300. @overload
  301. def _foreach_clamp_min(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ...
  302. @overload
  303. def _foreach_clamp_min_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
  304. @overload
  305. def _foreach_clamp_min_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ...
  306. @overload
  307. def _foreach_clamp_min_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  308. def _foreach_copy_(self: Union[Tuple[Tensor, ...], List[Tensor]], src: Union[Tuple[Tensor, ...], List[Tensor]], non_blocking: _bool = False) -> None: ...
  309. def _foreach_cos(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
  310. r"""
  311. _foreach_cos(self: List[Tensor]) -> List[Tensor]
  312. Apply :func:`torch.cos` to each Tensor of the input list.
  313. """
  314. ...
  315. def _foreach_cos_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
  316. r"""
  317. _foreach_cos_(self: List[Tensor]) -> None
  318. Apply :func:`torch.cos` to each Tensor of the input list.
  319. """
  320. ...
  321. def _foreach_cosh(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
  322. r"""
  323. _foreach_cosh(self: List[Tensor]) -> List[Tensor]
  324. Apply :func:`torch.cosh` to each Tensor of the input list.
  325. """
  326. ...
  327. def _foreach_cosh_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
  328. r"""
  329. _foreach_cosh_(self: List[Tensor]) -> None
  330. Apply :func:`torch.cosh` to each Tensor of the input list.
  331. """
  332. ...
  333. @overload
  334. def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ...
  335. @overload
  336. def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor) -> Tuple[Tensor, ...]: ...
  337. @overload
  338. def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ...
  339. @overload
  340. def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ...
  341. @overload
  342. def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
  343. @overload
  344. def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor) -> None: ...
  345. @overload
  346. def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ...
  347. @overload
  348. def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  349. def _foreach_erf(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
  350. r"""
  351. _foreach_erf(self: List[Tensor]) -> List[Tensor]
  352. Apply :func:`torch.erf` to each Tensor of the input list.
  353. """
  354. ...
  355. def _foreach_erf_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
  356. r"""
  357. _foreach_erf_(self: List[Tensor]) -> None
  358. Apply :func:`torch.erf` to each Tensor of the input list.
  359. """
  360. ...
  361. def _foreach_erfc(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
  362. r"""
  363. _foreach_erfc(self: List[Tensor]) -> List[Tensor]
  364. Apply :func:`torch.erfc` to each Tensor of the input list.
  365. """
  366. ...
  367. def _foreach_erfc_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
  368. r"""
  369. _foreach_erfc_(self: List[Tensor]) -> None
  370. Apply :func:`torch.erfc` to each Tensor of the input list.
  371. """
  372. ...
  373. def _foreach_exp(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
  374. r"""
  375. _foreach_exp(self: List[Tensor]) -> List[Tensor]
  376. Apply :func:`torch.exp` to each Tensor of the input list.
  377. """
  378. ...
  379. def _foreach_exp_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
  380. r"""
  381. _foreach_exp_(self: List[Tensor]) -> None
  382. Apply :func:`torch.exp` to each Tensor of the input list.
  383. """
  384. ...
  385. def _foreach_expm1(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
  386. r"""
  387. _foreach_expm1(self: List[Tensor]) -> List[Tensor]
  388. Apply :func:`torch.expm1` to each Tensor of the input list.
  389. """
  390. ...
  391. def _foreach_expm1_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
  392. r"""
  393. _foreach_expm1_(self: List[Tensor]) -> None
  394. Apply :func:`torch.expm1` to each Tensor of the input list.
  395. """
  396. ...
  397. def _foreach_floor(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
  398. r"""
  399. _foreach_floor(self: List[Tensor]) -> List[Tensor]
  400. Apply :func:`torch.floor` to each Tensor of the input list.
  401. """
  402. ...
  403. def _foreach_floor_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
  404. r"""
  405. _foreach_floor_(self: List[Tensor]) -> None
  406. Apply :func:`torch.floor` to each Tensor of the input list.
  407. """
  408. ...
  409. def _foreach_frac(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
  410. r"""
  411. _foreach_frac(self: List[Tensor]) -> List[Tensor]
  412. Apply :func:`torch.frac` to each Tensor of the input list.
  413. """
  414. ...
  415. def _foreach_frac_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
  416. r"""
  417. _foreach_frac_(self: List[Tensor]) -> None
  418. Apply :func:`torch.frac` to each Tensor of the input list.
  419. """
  420. ...
  421. @overload
  422. def _foreach_lerp(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weight: Union[Number, _complex]) -> Tuple[Tensor, ...]: ...
  423. @overload
  424. def _foreach_lerp(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weights: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ...
  425. @overload
  426. def _foreach_lerp_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weight: Union[Number, _complex]) -> None: ...
  427. @overload
  428. def _foreach_lerp_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weights: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  429. def _foreach_lgamma(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
  430. r"""
  431. _foreach_lgamma(self: List[Tensor]) -> List[Tensor]
  432. Apply :func:`torch.lgamma` to each Tensor of the input list.
  433. """
  434. ...
  435. def _foreach_lgamma_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
  436. r"""
  437. _foreach_lgamma_(self: List[Tensor]) -> None
  438. Apply :func:`torch.lgamma` to each Tensor of the input list.
  439. """
  440. ...
  441. def _foreach_log(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
  442. r"""
  443. _foreach_log(self: List[Tensor]) -> List[Tensor]
  444. Apply :func:`torch.log` to each Tensor of the input list.
  445. """
  446. ...
  447. def _foreach_log10(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
  448. r"""
  449. _foreach_log10(self: List[Tensor]) -> List[Tensor]
  450. Apply :func:`torch.log10` to each Tensor of the input list.
  451. """
  452. ...
  453. def _foreach_log10_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
  454. r"""
  455. _foreach_log10_(self: List[Tensor]) -> None
  456. Apply :func:`torch.log10` to each Tensor of the input list.
  457. """
  458. ...
  459. def _foreach_log1p(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
  460. r"""
  461. _foreach_log1p(self: List[Tensor]) -> List[Tensor]
  462. Apply :func:`torch.log1p` to each Tensor of the input list.
  463. """
  464. ...
  465. def _foreach_log1p_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
  466. r"""
  467. _foreach_log1p_(self: List[Tensor]) -> None
  468. Apply :func:`torch.log1p` to each Tensor of the input list.
  469. """
  470. ...
  471. def _foreach_log2(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
  472. r"""
  473. _foreach_log2(self: List[Tensor]) -> List[Tensor]
  474. Apply :func:`torch.log2` to each Tensor of the input list.
  475. """
  476. ...
  477. def _foreach_log2_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
  478. r"""
  479. _foreach_log2_(self: List[Tensor]) -> None
  480. Apply :func:`torch.log2` to each Tensor of the input list.
  481. """
  482. ...
  483. def _foreach_log_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
  484. r"""
  485. _foreach_log_(self: List[Tensor]) -> None
  486. Apply :func:`torch.log` to each Tensor of the input list.
  487. """
  488. ...
  489. def _foreach_max(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ...
  490. @overload
  491. def _foreach_maximum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ...
  492. @overload
  493. def _foreach_maximum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ...
  494. @overload
  495. def _foreach_maximum(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ...
  496. @overload
  497. def _foreach_maximum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
  498. @overload
  499. def _foreach_maximum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ...
  500. @overload
  501. def _foreach_maximum_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  502. @overload
  503. def _foreach_minimum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ...
  504. @overload
  505. def _foreach_minimum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ...
  506. @overload
  507. def _foreach_minimum(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ...
  508. @overload
  509. def _foreach_minimum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
  510. @overload
  511. def _foreach_minimum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ...
  512. @overload
  513. def _foreach_minimum_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  514. @overload
  515. def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ...
  516. @overload
  517. def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor) -> Tuple[Tensor, ...]: ...
  518. @overload
  519. def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ...
  520. @overload
  521. def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ...
  522. @overload
  523. def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
  524. @overload
  525. def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor) -> None: ...
  526. @overload
  527. def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ...
  528. @overload
  529. def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  530. def _foreach_neg(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
  531. r"""
  532. _foreach_neg(self: List[Tensor]) -> List[Tensor]
  533. Apply :func:`torch.neg` to each Tensor of the input list.
  534. """
  535. ...
  536. def _foreach_neg_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
  537. r"""
  538. _foreach_neg_(self: List[Tensor]) -> None
  539. Apply :func:`torch.neg` to each Tensor of the input list.
  540. """
  541. ...
  542. def _foreach_norm(self: Union[Tuple[Tensor, ...], List[Tensor]], ord: Union[Number, _complex] = 2, dtype: Optional[_dtype] = None) -> Tuple[Tensor, ...]: ...
  543. @overload
  544. def _foreach_pow(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ...
  545. @overload
  546. def _foreach_pow(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Union[Number, _complex]) -> Tuple[Tensor, ...]: ...
  547. @overload
  548. def _foreach_pow(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ...
  549. @overload
  550. def _foreach_pow(self: Union[Number, _complex], exponent: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ...
  551. @overload
  552. def _foreach_pow_(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Sequence[Union[Number, _complex]]) -> None: ...
  553. @overload
  554. def _foreach_pow_(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Union[Number, _complex]) -> None: ...
  555. @overload
  556. def _foreach_pow_(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  557. def _foreach_reciprocal(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
  558. r"""
  559. _foreach_reciprocal(self: List[Tensor]) -> List[Tensor]
  560. Apply :func:`torch.reciprocal` to each Tensor of the input list.
  561. """
  562. ...
  563. def _foreach_reciprocal_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
  564. r"""
  565. _foreach_reciprocal_(self: List[Tensor]) -> None
  566. Apply :func:`torch.reciprocal` to each Tensor of the input list.
  567. """
  568. ...
  569. def _foreach_round(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
  570. r"""
  571. _foreach_round(self: List[Tensor]) -> List[Tensor]
  572. Apply :func:`torch.round` to each Tensor of the input list.
  573. """
  574. ...
  575. def _foreach_round_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
  576. r"""
  577. _foreach_round_(self: List[Tensor]) -> None
  578. Apply :func:`torch.round` to each Tensor of the input list.
  579. """
  580. ...
  581. def _foreach_sigmoid(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
  582. r"""
  583. _foreach_sigmoid(self: List[Tensor]) -> List[Tensor]
  584. Apply :func:`torch.sigmoid` to each Tensor of the input list.
  585. """
  586. ...
  587. def _foreach_sigmoid_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
  588. r"""
  589. _foreach_sigmoid_(self: List[Tensor]) -> None
  590. Apply :func:`torch.sigmoid` to each Tensor of the input list.
  591. """
  592. ...
  593. def _foreach_sign(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ...
  594. def _foreach_sign_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
  595. def _foreach_sin(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
  596. r"""
  597. _foreach_sin(self: List[Tensor]) -> List[Tensor]
  598. Apply :func:`torch.sin` to each Tensor of the input list.
  599. """
  600. ...
  601. def _foreach_sin_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
  602. r"""
  603. _foreach_sin_(self: List[Tensor]) -> None
  604. Apply :func:`torch.sin` to each Tensor of the input list.
  605. """
  606. ...
  607. def _foreach_sinh(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
  608. r"""
  609. _foreach_sinh(self: List[Tensor]) -> List[Tensor]
  610. Apply :func:`torch.sinh` to each Tensor of the input list.
  611. """
  612. ...
  613. def _foreach_sinh_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
  614. r"""
  615. _foreach_sinh_(self: List[Tensor]) -> None
  616. Apply :func:`torch.sinh` to each Tensor of the input list.
  617. """
  618. ...
  619. def _foreach_sqrt(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
  620. r"""
  621. _foreach_sqrt(self: List[Tensor]) -> List[Tensor]
  622. Apply :func:`torch.sqrt` to each Tensor of the input list.
  623. """
  624. ...
  625. def _foreach_sqrt_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
  626. r"""
  627. _foreach_sqrt_(self: List[Tensor]) -> None
  628. Apply :func:`torch.sqrt` to each Tensor of the input list.
  629. """
  630. ...
  631. @overload
  632. def _foreach_sub(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ...
  633. @overload
  634. def _foreach_sub(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Union[Number, _complex] = 1) -> Tuple[Tensor, ...]: ...
  635. @overload
  636. def _foreach_sub(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ...
  637. @overload
  638. def _foreach_sub_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
  639. @overload
  640. def _foreach_sub_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Union[Number, _complex] = 1) -> None: ...
  641. @overload
  642. def _foreach_sub_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ...
  643. def _foreach_tan(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
  644. r"""
  645. _foreach_tan(self: List[Tensor]) -> List[Tensor]
  646. Apply :func:`torch.tan` to each Tensor of the input list.
  647. """
  648. ...
  649. def _foreach_tan_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
  650. r"""
  651. _foreach_tan_(self: List[Tensor]) -> None
  652. Apply :func:`torch.tan` to each Tensor of the input list.
  653. """
  654. ...
  655. def _foreach_tanh(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
  656. r"""
  657. _foreach_tanh(self: List[Tensor]) -> List[Tensor]
  658. Apply :func:`torch.tanh` to each Tensor of the input list.
  659. """
  660. ...
  661. def _foreach_tanh_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
  662. r"""
  663. _foreach_tanh_(self: List[Tensor]) -> None
  664. Apply :func:`torch.tanh` to each Tensor of the input list.
  665. """
  666. ...
  667. def _foreach_trunc(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
  668. r"""
  669. _foreach_trunc(self: List[Tensor]) -> List[Tensor]
  670. Apply :func:`torch.trunc` to each Tensor of the input list.
  671. """
  672. ...
  673. def _foreach_trunc_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
  674. r"""
  675. _foreach_trunc_(self: List[Tensor]) -> None
  676. Apply :func:`torch.trunc` to each Tensor of the input list.
  677. """
  678. ...
  679. def _foreach_zero_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
  680. r"""
  681. _foreach_zero_(self: List[Tensor]) -> None
  682. Apply :func:`torch.zero` to each Tensor of the input list.
  683. """
  684. ...
  685. def _from_functional_tensor(t: Tensor) -> Tensor: ...
  686. def _functional_assert_async(input: Tensor, assert_msg: str, dep_token: Tensor) -> Tensor: ...
  687. def _functional_assert_scalar(self: Union[Number, _complex], assert_msg: str, dep_token: Tensor) -> Tensor: ...
  688. def _functional_sym_constrain_range(size: Union[Number, _complex], min: Optional[_int], max: Optional[_int], dep_token: Tensor) -> Tensor: ...
  689. def _functional_sym_constrain_range_for_size(size: Union[Number, _complex], min: Optional[_int], max: Optional[_int], dep_token: Tensor) -> Tensor: ...
  690. def _functionalize_apply_view_metas(tensor: Tensor, base: Tensor) -> Tensor: ...
  691. def _functionalize_are_all_mutations_hidden_from_autograd(t: Tensor) -> _bool: ...
  692. def _functionalize_are_all_mutations_under_no_grad_or_inference_mode(t: Tensor) -> _bool: ...
  693. def _functionalize_commit_update(t: Tensor) -> None: ...
  694. def _functionalize_has_metadata_mutation(tensor: Tensor) -> _bool: ...
  695. def _functionalize_is_symbolic(tensor: Tensor) -> _bool: ...
  696. def _functionalize_mark_mutation_hidden_from_autograd(t: Tensor) -> None: ...
  697. def _functionalize_replace(self_: Tensor, other: Tensor) -> None: ...
  698. def _functionalize_sync(t: Tensor) -> None: ...
  699. def _functionalize_was_inductor_storage_resized(t: Tensor) -> _bool: ...
  700. def _functionalize_was_storage_changed(tensor: Tensor) -> _bool: ...
  701. def _fused_adagrad_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], state_sums: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: _float, lr_decay: _float, weight_decay: _float, eps: _float, maximize: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ...
  702. @overload
  703. def _fused_adam_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], exp_avgs: Union[Tuple[Tensor, ...], List[Tensor]], exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], max_exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: Tensor, beta1: _float, beta2: _float, weight_decay: _float, eps: _float, amsgrad: _bool, maximize: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ...
  704. @overload
  705. def _fused_adam_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], exp_avgs: Union[Tuple[Tensor, ...], List[Tensor]], exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], max_exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: _float, beta1: _float, beta2: _float, weight_decay: _float, eps: _float, amsgrad: _bool, maximize: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ...
  706. @overload
  707. def _fused_adamw_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], exp_avgs: Union[Tuple[Tensor, ...], List[Tensor]], exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], max_exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: Tensor, beta1: _float, beta2: _float, weight_decay: _float, eps: _float, amsgrad: _bool, maximize: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ...
  708. @overload
  709. def _fused_adamw_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], exp_avgs: Union[Tuple[Tensor, ...], List[Tensor]], exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], max_exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: _float, beta1: _float, beta2: _float, weight_decay: _float, eps: _float, amsgrad: _bool, maximize: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ...
  710. def _fused_dropout(input: Tensor, p: _float, generator: Optional[Generator] = None) -> Tuple[Tensor, Tensor]: ...
  711. def _fused_moving_avg_obs_fq_helper(input: Tensor, observer_on: Tensor, fake_quant_on: Tensor, running_min: Tensor, running_max: Tensor, scale: Tensor, zero_point: Tensor, averaging_const: _float, quant_min: _int, quant_max: _int, ch_axis: _int, per_row_fake_quant: _bool = False, symmetric_quant: _bool = False) -> torch.return_types._fused_moving_avg_obs_fq_helper: ...
  712. def _fused_sdp_choice(query: Tensor, key: Tensor, value: Tensor, attn_mask: Optional[Tensor] = None, dropout_p: _float = 0.0, is_causal: _bool = False, *, scale: Optional[_float] = None) -> _int: ...
  713. @overload
  714. def _fused_sgd_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], momentum_buffer_list: Union[Tuple[Tensor, ...], List[Tensor]], *, weight_decay: _float, momentum: _float, lr: Tensor, dampening: _float, nesterov: _bool, maximize: _bool, is_first_step: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ...
  715. @overload
  716. def _fused_sgd_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], momentum_buffer_list: Union[Tuple[Tensor, ...], List[Tensor]], *, weight_decay: _float, momentum: _float, lr: _float, dampening: _float, nesterov: _bool, maximize: _bool, is_first_step: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ...
  717. def _fw_primal_copy(input: Tensor, level: _int, *, out: Optional[Tensor] = None) -> Tensor: ...
  718. def _grid_sampler_2d_cpu_fallback(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ...
  719. def _has_compatible_shallow_copy_type(input: Tensor, from_: Tensor) -> _bool: ...
  720. def _histogramdd_bin_edges(input: Tensor, bins: _size, *, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> Tuple[Tensor, ...]: ...
  721. def _histogramdd_from_bin_cts(input: Tensor, bins: _size, *, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> Tensor: ...
  722. def _histogramdd_from_bin_tensors(input: Tensor, bins: Union[Tuple[Tensor, ...], List[Tensor]], *, weight: Optional[Tensor] = None, density: _bool = False) -> Tensor: ...
  723. def _index_put_impl_(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False, unsafe: _bool = False) -> Tensor: ...
  724. def _indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
  725. def _int_mm(input: Tensor, mat2: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
  726. def _is_all_true(input: Tensor) -> Tensor: ...
  727. def _is_any_true(input: Tensor) -> Tensor: ...
  728. def _is_functional_tensor(t: Tensor) -> _bool: ...
  729. def _is_zerotensor(input: Tensor) -> _bool: ...
  730. def _lazy_clone(input: Tensor) -> Tensor: ...
  731. def _linalg_check_errors(info: Tensor, api_name: str, *, is_matrix: _bool) -> None: ...
  732. def _linalg_det(A: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types._linalg_det: ...
  733. def _linalg_eigh(A: Tensor, UPLO: str = "L", compute_v: _bool = True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types._linalg_eigh: ...
  734. def _linalg_slogdet(A: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types._linalg_slogdet: ...
  735. def _linalg_solve_ex(A: Tensor, B: Tensor, *, left: _bool = True, check_errors: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types._linalg_solve_ex: ...
  736. def _linalg_svd(A: Tensor, full_matrices: _bool = False, compute_uv: _bool = True, *, driver: Optional[str] = None, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types._linalg_svd: ...
  737. def _log_softmax(input: Tensor, dim: _int, half_to_float: _bool, *, out: Optional[Tensor] = None) -> Tensor: ...
  738. def _log_softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input_dtype: _dtype, *, out: Optional[Tensor] = None) -> Tensor: ...
  739. def _logcumsumexp(input: Tensor, dim: _int, *, out: Optional[Tensor] = None) -> Tensor: ...
  740. def _lstm_mps(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]: ...
  741. def _lu_with_info(input: Tensor, pivot: _bool = True, check_errors: _bool = True) -> torch.return_types._lu_with_info: ...
  742. def _make_dep_token(*, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
  743. def _make_dual(primal: Tensor, tangent: Tensor, level: _int) -> Tensor: ...
  744. def _make_dual_copy(primal: Tensor, tangent: Tensor, level: _int, *, out: Optional[Tensor] = None) -> Tensor: ...
  745. def _make_per_channel_quantized_tensor(input: Tensor, scale: Tensor, zero_point: Tensor, axis: _int) -> Tensor: ...
  746. def _make_per_tensor_quantized_tensor(input: Tensor, scale: _float, zero_point: _int) -> Tensor: ...
  747. def _masked_scale(input: Tensor, mask: Tensor, scale: _float) -> Tensor: ...
  748. def _masked_softmax(input: Tensor, mask: Tensor, dim: Optional[_int] = None, mask_type: Optional[_int] = None) -> Tensor: ...
  749. def _mixed_dtypes_linear(input: Tensor, weight: Tensor, scale: Tensor, *, bias: Optional[Tensor] = None, activation: Optional[str] = None) -> Tensor: ...
  750. def _mkldnn_reshape(input: Tensor, shape: _size) -> Tensor: ...
  751. def _mkldnn_transpose(input: Tensor, dim0: _int, dim1: _int) -> Tensor: ...
  752. def _mkldnn_transpose_(input: Tensor, dim0: _int, dim1: _int) -> Tensor: ...
  753. def _mps_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
  754. def _mps_convolution_transpose(input: Tensor, weight: Tensor, padding: Sequence[Union[_int, SymInt]], output_padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
  755. @overload
  756. def _native_batch_norm_legit(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Tensor, running_var: Tensor, training: _bool, momentum: _float, eps: _float, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> Tuple[Tensor, Tensor, Tensor]: ...
  757. @overload
  758. def _native_batch_norm_legit(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], training: _bool, momentum: _float, eps: _float, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> Tuple[Tensor, Tensor, Tensor]: ...
  759. def _native_batch_norm_legit_no_training(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Tensor, running_var: Tensor, momentum: _float, eps: _float) -> Tuple[Tensor, Tensor, Tensor]: ...
  760. def _native_multi_head_attention(query: Tensor, key: Tensor, value: Tensor, embed_dim: _int, num_head: _int, qkv_weight: Tensor, qkv_bias: Tensor, proj_weight: Tensor, proj_bias: Tensor, mask: Optional[Tensor] = None, need_weights: _bool = True, average_attn_weights: _bool = True, mask_type: Optional[_int] = None) -> Tuple[Tensor, Tensor]: ...
  761. def _neg_view(input: Tensor) -> Tensor: ...
  762. def _neg_view_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
  763. def _nested_compute_contiguous_strides_offsets(nested_size: Tensor) -> Tuple[Tensor, Tensor]: ...
  764. def _nested_from_padded(padded: Tensor, cpu_nested_shape_example: Tensor, fuse_transform_0213: _bool = False) -> Tensor: ...
  765. def _nested_from_padded_and_nested_example(padded: Tensor, nt_example: Tensor) -> Tensor: ...
  766. def _nested_get_jagged_dummy(any: Tensor) -> Tensor: ...
  767. def _nested_get_lengths(input: Tensor) -> Tensor: ...
  768. def _nested_get_offsets(input: Tensor) -> Tensor: ...
  769. def _nested_get_ragged_idx(input: Tensor) -> _int: ...
  770. def _nested_get_values(input: Tensor) -> Tensor: ...
  771. def _nested_get_values_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
  772. def _nested_tensor_from_mask(t: Tensor, mask: Tensor, mask_check: _bool = True) -> Tensor: ...
  773. def _nested_tensor_from_mask_left_aligned(t: Tensor, mask: Tensor) -> _bool: ...
  774. def _nested_tensor_from_tensor_list(list: Union[Tuple[Tensor, ...], List[Tensor]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = None) -> Tensor: ...
  775. def _nested_tensor_softmax_with_shape(input: Tensor, query: Tensor) -> Tensor: ...
  776. def _nested_view_from_buffer(input: Tensor, nested_size: Tensor, nested_strides: Tensor, offsets: Tensor) -> Tensor: ...
  777. def _nested_view_from_buffer_copy(input: Tensor, nested_size: Tensor, nested_strides: Tensor, offsets: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
  778. def _nested_view_from_jagged(input: Tensor, offsets: Tensor, dummy: Tensor, lengths: Optional[Tensor] = None, ragged_idx: _int = 1) -> Tensor: ...
  779. def _nested_view_from_jagged_copy(input: Tensor, offsets: Tensor, dummy: Tensor, lengths: Optional[Tensor] = None, ragged_idx: _int = 1, *, out: Optional[Tensor] = None) -> Tensor: ...
  780. def _nnpack_available() -> _bool: ...
  781. def _nnpack_spatial_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]], stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1) -> Tensor: ...
  782. def _pack_padded_sequence(input: Tensor, lengths: Tensor, batch_first: _bool) -> Tuple[Tensor, Tensor]: ...
  783. def _pad_packed_sequence(data: Tensor, batch_sizes: Tensor, batch_first: _bool, padding_value: Union[Number, _complex], total_length: _int) -> Tuple[Tensor, Tensor]: ...
  784. def _pin_memory(input: Tensor, device: Optional[Optional[DeviceLikeType]] = None) -> Tensor: ...
  785. def _prelu_kernel(input: Tensor, weight: Tensor) -> Tensor: ...
  786. def _print(s: str) -> None: ...
  787. def _propagate_xla_data(input: Tensor, output: Tensor) -> None: ...
  788. def _remove_batch_dim(input: Tensor, level: _int, batch_size: _int, out_dim: _int) -> Tensor: ...
  789. def _reshape_alias_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None) -> Tensor: ...
  790. def _reshape_from_tensor(input: Tensor, shape: Tensor) -> Tensor: ...
  791. def _resize_output_(input: Tensor, size: Sequence[Union[_int, SymInt]], device: Optional[DeviceLikeType]) -> Tensor: ...
  792. def _rowwise_prune(weight: Tensor, mask: Tensor, compressed_indices_dtype: _dtype) -> Tuple[Tensor, Tensor]: ...
  793. def _sample_dirichlet(input: Tensor, generator: Optional[Generator] = None) -> Tensor: ...
  794. def _saturate_weight_to_fp16(weight: Tensor) -> Tensor: ...
  795. def _scaled_dot_product_attention_math(query: Tensor, key: Tensor, value: Tensor, attn_mask: Optional[Tensor] = None, dropout_p: _float = 0.0, is_causal: _bool = False, dropout_mask: Optional[Tensor] = None, *, scale: Optional[_float] = None) -> Tuple[Tensor, Tensor]: ...
  796. def _scaled_dot_product_cudnn_attention(query: Tensor, key: Tensor, value: Tensor, dropout_p: _float = 0.0, is_causal: _bool = False, return_debug_mask: _bool = False, *, scale: Optional[_float] = None) -> torch.return_types._scaled_dot_product_cudnn_attention: ...
  797. def _scaled_dot_product_efficient_attention(query: Tensor, key: Tensor, value: Tensor, attn_bias: Optional[Tensor], compute_log_sumexp: _bool, dropout_p: _float = 0.0, is_causal: _bool = False, *, scale: Optional[_float] = None) -> torch.return_types._scaled_dot_product_efficient_attention: ...
  798. def _scaled_dot_product_flash_attention(query: Tensor, key: Tensor, value: Tensor, dropout_p: _float = 0.0, is_causal: _bool = False, return_debug_mask: _bool = False, *, scale: Optional[_float] = None) -> torch.return_types._scaled_dot_product_flash_attention: ...
  799. def _scaled_dot_product_flash_attention_for_cpu(query: Tensor, key: Tensor, value: Tensor, dropout_p: _float = 0.0, is_causal: _bool = False, *, attn_mask: Optional[Tensor] = None, scale: Optional[_float] = None) -> torch.return_types._scaled_dot_product_flash_attention_for_cpu: ...
  800. def _scaled_mm(input: Tensor, mat2: Tensor, *, bias: Optional[Tensor] = None, out_dtype: Optional[_dtype] = None, scale_a: Optional[Tensor] = None, scale_b: Optional[Tensor] = None, scale_result: Optional[Tensor] = None, use_fast_accum: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> Tuple[Tensor, Tensor]: ...
  801. def _shape_as_tensor(input: Tensor) -> Tensor: ...
  802. def _sobol_engine_draw(quasi: Tensor, n: _int, sobolstate: Tensor, dimension: _int, num_generated: _int, dtype: Optional[_dtype]) -> Tuple[Tensor, Tensor]: ...
  803. def _sobol_engine_ff_(input: Tensor, n: _int, sobolstate: Tensor, dimension: _int, num_generated: _int) -> Tensor: ...
  804. def _sobol_engine_initialize_state_(input: Tensor, dimension: _int) -> Tensor: ...
  805. def _sobol_engine_scramble_(input: Tensor, ltm: Tensor, dimension: _int) -> Tensor: ...
  806. def _softmax(input: Tensor, dim: _int, half_to_float: _bool, *, out: Optional[Tensor] = None) -> Tensor: ...
  807. def _softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input_dtype: _dtype, *, grad_input: Optional[Tensor] = None) -> Tensor: ...
  808. def _sparse_broadcast_to(input: Tensor, size: _size) -> Tensor: ...
  809. def _sparse_broadcast_to_copy(input: Tensor, size: _size, *, out: Optional[Tensor] = None) -> Tensor: ...
  810. def _sparse_csr_prod(input: Tensor, dim: Union[_int, _size], keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor: ...
  811. def _sparse_csr_sum(input: Tensor, dim: Union[_int, _size], keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor: ...
  812. def _sparse_log_softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input: Tensor) -> Tensor: ...
  813. def _sparse_semi_structured_addmm(input: Tensor, mat1: Tensor, mat1_meta: Tensor, mat2: Tensor, *, alpha: Union[Number, _complex] = 1, beta: Union[Number, _complex] = 1, out_dtype: Optional[_dtype] = None) -> Tensor: ...
  814. def _sparse_semi_structured_apply(input: Tensor, thread_masks: Tensor) -> Tuple[Tensor, Tensor]: ...
  815. def _sparse_semi_structured_apply_dense(input: Tensor, thread_masks: Tensor) -> Tensor: ...
  816. def _sparse_semi_structured_linear(input: Tensor, weight: Tensor, meta: Tensor, *, bias: Optional[Tensor] = None, activation: Optional[str] = None, out_dtype: Optional[_dtype] = None) -> Tensor: ...
  817. def _sparse_semi_structured_mm(mat1: Tensor, mat1_meta: Tensor, mat2: Tensor, *, out_dtype: Optional[_dtype] = None) -> Tensor: ...
  818. def _sparse_semi_structured_tile(input: Tensor, algorithm: str = "", use_cutlass: _bool = True) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: ...
  819. def _sparse_softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input: Tensor) -> Tensor: ...
  820. def _sparse_sparse_matmul(input: Tensor, other: Tensor) -> Tensor: ...
  821. @overload
  822. def _sparse_sum(input: Tensor) -> Tensor: ...
  823. @overload
  824. def _sparse_sum(input: Tensor, *, dtype: _dtype) -> Tensor: ...
  825. @overload
  826. def _sparse_sum(input: Tensor, dim: Union[_int, _size]) -> Tensor: ...
  827. @overload
  828. def _sparse_sum(input: Tensor, dim: Union[_int, _size], *, dtype: _dtype) -> Tensor: ...
  829. def _stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int = 0, *, out: Optional[Tensor] = None) -> Tensor: ...
  830. def _standard_gamma(input: Tensor, generator: Optional[Generator] = None) -> Tensor: ...
  831. def _standard_gamma_grad(input: Tensor, output: Tensor) -> Tensor: ...
  832. def _sync(t: Tensor) -> None: ...
  833. @overload
  834. def _test_autograd_multiple_dispatch(input: Tensor) -> Tensor: ...
  835. @overload
  836. def _test_autograd_multiple_dispatch(input: Tensor, b: _bool) -> Tensor: ...
  837. def _test_autograd_multiple_dispatch_view(input: Tensor) -> Tensor: ...
  838. def _test_autograd_multiple_dispatch_view_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
  839. def _test_check_tensor(input: Tensor) -> Tensor: ...
  840. def _test_functorch_fallback(input: Tensor, other: Tensor) -> Tensor: ...
  841. def _test_parallel_materialize(input: Tensor, num_parallel: _int, skip_first: _bool = False) -> Tensor: ...
  842. def _test_serialization_subcmul(input: Tensor, other: Tensor, alpha: Union[Number, _complex] = 1) -> Tensor: ...
  843. def _to_cpu(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ...
  844. def _to_functional_tensor(t: Tensor) -> Tensor: ...
  845. def _to_sparse_semi_structured(dense: Tensor) -> Tuple[Tensor, Tensor]: ...
  846. def _transform_bias_rescale_qkv(qkv: Tensor, qkv_bias: Tensor, num_heads: _int) -> Tuple[Tensor, Tensor, Tensor]: ...
  847. def _transformer_encoder_layer_fwd(src: Tensor, embed_dim: _int, num_heads: _int, qkv_weight: Tensor, qkv_bias: Tensor, proj_weight: Tensor, proj_bias: Tensor, use_gelu: _bool, norm_first: _bool, eps: _float, norm_weight_1: Tensor, norm_bias_1: Tensor, norm_weight_2: Tensor, norm_bias_2: Tensor, ffn_weight_1: Tensor, ffn_bias_1: Tensor, ffn_weight_2: Tensor, ffn_bias_2: Tensor, mask: Optional[Tensor] = None, mask_type: Optional[_int] = None) -> Tensor: ...
  848. def _trilinear(i1: Tensor, i2: Tensor, i3: Tensor, expand1: _size, expand2: _size, expand3: _size, sumdim: _size, unroll_dim: _int = 1) -> Tensor: ...
  849. def _triton_multi_head_attention(query: Tensor, key: Tensor, value: Tensor, embed_dim: _int, num_head: _int, qkv_weight: Tensor, qkv_bias: Tensor, proj_weight: Tensor, proj_bias: Tensor, mask: Optional[Tensor] = None) -> Tensor: ...
  850. def _triton_scaled_dot_attention(q: Tensor, k: Tensor, v: Tensor, dropout_p: _float = 0.0) -> Tensor: ...
  851. def _unique(input: Tensor, sorted: _bool = True, return_inverse: _bool = False) -> Tuple[Tensor, Tensor]: ...
  852. def _unique2(input: Tensor, sorted: _bool = True, return_inverse: _bool = False, return_counts: _bool = False) -> Tuple[Tensor, Tensor, Tensor]: ...
  853. def _unpack_dual(dual: Tensor, level: _int) -> torch.return_types._unpack_dual: ...
  854. def _unsafe_index(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]]) -> Tensor: ...
  855. def _unsafe_index_put(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False) -> Tensor: ...
  856. @overload
  857. def _use_cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int) -> _bool: ...
  858. @overload
  859. def _use_cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int) -> _bool: ...
  860. def _use_cudnn_rnn_flatten_weight() -> _bool: ...
  861. def _validate_compressed_sparse_indices(is_crow: _bool, compressed_idx: Tensor, plain_idx: Tensor, cdim: _int, dim: _int, nnz: _int) -> None: ...
  862. def _validate_sparse_bsc_tensor_args(ccol_indices: Tensor, row_indices: Tensor, values: Tensor, size: _size) -> None: ...
  863. def _validate_sparse_bsr_tensor_args(crow_indices: Tensor, col_indices: Tensor, values: Tensor, size: _size) -> None: ...
  864. def _validate_sparse_compressed_tensor_args(compressed_indices: Tensor, plain_indices: Tensor, values: Tensor, size: _size, layout: _layout) -> None: ...
  865. def _validate_sparse_coo_tensor_args(indices: Tensor, values: Tensor, size: _size, is_coalesced: Optional[_bool] = None) -> None: ...
  866. def _validate_sparse_csc_tensor_args(ccol_indices: Tensor, row_indices: Tensor, values: Tensor, size: _size) -> None: ...
  867. def _validate_sparse_csr_tensor_args(crow_indices: Tensor, col_indices: Tensor, values: Tensor, size: _size) -> None: ...
  868. def _values_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
  869. def _weight_int4pack_mm(input: Tensor, mat2: Tensor, qGroupSize: _int, qScaleAndZeros: Tensor) -> Tensor: ...
  870. def _weight_int8pack_mm(input: Tensor, mat2: Tensor, scales: Tensor) -> Tensor: ...
  871. def _weight_norm(v: Tensor, g: Tensor, dim: _int = 0) -> Tensor: ...
  872. def _weight_norm_interface(v: Tensor, g: Tensor, dim: _int = 0) -> Tuple[Tensor, Tensor]: ...
  873. def abs(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  874. r"""
  875. abs(input, *, out=None) -> Tensor
  876. Computes the absolute value of each element in :attr:`input`.
  877. .. math::
  878. \text{out}_{i} = |\text{input}_{i}|
  879. Args:
  880. input (Tensor): the input tensor.
  881. Keyword args:
  882. out (Tensor, optional): the output tensor.
  883. Example::
  884. >>> torch.abs(torch.tensor([-1, -2, 3]))
  885. tensor([ 1, 2, 3])
  886. """
  887. ...
  888. def abs_(input: Tensor) -> Tensor: ...
  889. def absolute(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  890. r"""
  891. absolute(input, *, out=None) -> Tensor
  892. Alias for :func:`torch.abs`
  893. """
  894. ...
  895. def acos(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  896. r"""
  897. acos(input, *, out=None) -> Tensor
  898. Computes the inverse cosine of each element in :attr:`input`.
  899. .. math::
  900. \text{out}_{i} = \cos^{-1}(\text{input}_{i})
  901. Args:
  902. input (Tensor): the input tensor.
  903. Keyword args:
  904. out (Tensor, optional): the output tensor.
  905. Example::
  906. >>> a = torch.randn(4)
  907. >>> a
  908. tensor([ 0.3348, -0.5889, 0.2005, -0.1584])
  909. >>> torch.acos(a)
  910. tensor([ 1.2294, 2.2004, 1.3690, 1.7298])
  911. """
  912. ...
  913. def acos_(input: Tensor) -> Tensor: ...
  914. def acosh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  915. r"""
  916. acosh(input, *, out=None) -> Tensor
  917. Returns a new tensor with the inverse hyperbolic cosine of the elements of :attr:`input`.
  918. .. math::
  919. \text{out}_{i} = \cosh^{-1}(\text{input}_{i})
  920. Note:
  921. The domain of the inverse hyperbolic cosine is `[1, inf)` and values outside this range
  922. will be mapped to ``NaN``, except for `+ INF` for which the output is mapped to `+ INF`.
  923. Args:
  924. input (Tensor): the input tensor.
  925. Keyword arguments:
  926. out (Tensor, optional): the output tensor.
  927. Example::
  928. >>> a = torch.randn(4).uniform_(1, 2)
  929. >>> a
  930. tensor([ 1.3192, 1.9915, 1.9674, 1.7151 ])
  931. >>> torch.acosh(a)
  932. tensor([ 0.7791, 1.3120, 1.2979, 1.1341 ])
  933. """
  934. ...
  935. def acosh_(input: Tensor) -> Tensor: ...
  936. def adaptive_avg_pool1d(input: Tensor, output_size: Union[_int, _size]) -> Tensor: ...
  937. def adaptive_max_pool1d(input: Tensor, output_size: Union[_int, _size]) -> Tuple[Tensor, Tensor]: ...
  938. @overload
  939. def add(input: Union[Tensor, Number, _complex], other: Union[Tensor, Number, _complex], *, alpha: Optional[Union[Number, _complex]] = 1, out: Optional[Tensor] = None) -> Tensor:
  940. r"""
  941. add(input, other, *, alpha=1, out=None) -> Tensor
  942. Adds :attr:`other`, scaled by :attr:`alpha`, to :attr:`input`.
  943. .. math::
  944. \text{{out}}_i = \text{{input}}_i + \text{{alpha}} \times \text{{other}}_i
  945. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
  946. :ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
  947. Args:
  948. input (Tensor): the input tensor.
  949. other (Tensor or Number): the tensor or number to add to :attr:`input`.
  950. Keyword arguments:
  951. alpha (Number): the multiplier for :attr:`other`.
  952. out (Tensor, optional): the output tensor.
  953. Examples::
  954. >>> a = torch.randn(4)
  955. >>> a
  956. tensor([ 0.0202, 1.0985, 1.3506, -0.6056])
  957. >>> torch.add(a, 20)
  958. tensor([ 20.0202, 21.0985, 21.3506, 19.3944])
  959. >>> b = torch.randn(4)
  960. >>> b
  961. tensor([-0.9732, -0.3497, 0.6245, 0.4022])
  962. >>> c = torch.randn(4, 1)
  963. >>> c
  964. tensor([[ 0.3743],
  965. [-1.7724],
  966. [-0.5811],
  967. [-0.8017]])
  968. >>> torch.add(b, c, alpha=10)
  969. tensor([[ 2.7695, 3.3930, 4.3672, 4.1450],
  970. [-18.6971, -18.0736, -17.0994, -17.3216],
  971. [ -6.7845, -6.1610, -5.1868, -5.4090],
  972. [ -8.9902, -8.3667, -7.3925, -7.6147]])
  973. """
  974. ...
  975. @overload
  976. def add(self: Tensor, alpha: Union[Number, _complex], other: Tensor) -> Tensor:
  977. r"""
  978. add(input, other, *, alpha=1, out=None) -> Tensor
  979. Adds :attr:`other`, scaled by :attr:`alpha`, to :attr:`input`.
  980. .. math::
  981. \text{{out}}_i = \text{{input}}_i + \text{{alpha}} \times \text{{other}}_i
  982. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
  983. :ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
  984. Args:
  985. input (Tensor): the input tensor.
  986. other (Tensor or Number): the tensor or number to add to :attr:`input`.
  987. Keyword arguments:
  988. alpha (Number): the multiplier for :attr:`other`.
  989. out (Tensor, optional): the output tensor.
  990. Examples::
  991. >>> a = torch.randn(4)
  992. >>> a
  993. tensor([ 0.0202, 1.0985, 1.3506, -0.6056])
  994. >>> torch.add(a, 20)
  995. tensor([ 20.0202, 21.0985, 21.3506, 19.3944])
  996. >>> b = torch.randn(4)
  997. >>> b
  998. tensor([-0.9732, -0.3497, 0.6245, 0.4022])
  999. >>> c = torch.randn(4, 1)
  1000. >>> c
  1001. tensor([[ 0.3743],
  1002. [-1.7724],
  1003. [-0.5811],
  1004. [-0.8017]])
  1005. >>> torch.add(b, c, alpha=10)
  1006. tensor([[ 2.7695, 3.3930, 4.3672, 4.1450],
  1007. [-18.6971, -18.0736, -17.0994, -17.3216],
  1008. [ -6.7845, -6.1610, -5.1868, -5.4090],
  1009. [ -8.9902, -8.3667, -7.3925, -7.6147]])
  1010. """
  1011. ...
  1012. @overload
  1013. def add(self: Tensor, alpha: Union[Number, _complex], other: Tensor, *, out: Tensor) -> Tensor:
  1014. r"""
  1015. add(input, other, *, alpha=1, out=None) -> Tensor
  1016. Adds :attr:`other`, scaled by :attr:`alpha`, to :attr:`input`.
  1017. .. math::
  1018. \text{{out}}_i = \text{{input}}_i + \text{{alpha}} \times \text{{other}}_i
  1019. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
  1020. :ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
  1021. Args:
  1022. input (Tensor): the input tensor.
  1023. other (Tensor or Number): the tensor or number to add to :attr:`input`.
  1024. Keyword arguments:
  1025. alpha (Number): the multiplier for :attr:`other`.
  1026. out (Tensor, optional): the output tensor.
  1027. Examples::
  1028. >>> a = torch.randn(4)
  1029. >>> a
  1030. tensor([ 0.0202, 1.0985, 1.3506, -0.6056])
  1031. >>> torch.add(a, 20)
  1032. tensor([ 20.0202, 21.0985, 21.3506, 19.3944])
  1033. >>> b = torch.randn(4)
  1034. >>> b
  1035. tensor([-0.9732, -0.3497, 0.6245, 0.4022])
  1036. >>> c = torch.randn(4, 1)
  1037. >>> c
  1038. tensor([[ 0.3743],
  1039. [-1.7724],
  1040. [-0.5811],
  1041. [-0.8017]])
  1042. >>> torch.add(b, c, alpha=10)
  1043. tensor([[ 2.7695, 3.3930, 4.3672, 4.1450],
  1044. [-18.6971, -18.0736, -17.0994, -17.3216],
  1045. [ -6.7845, -6.1610, -5.1868, -5.4090],
  1046. [ -8.9902, -8.3667, -7.3925, -7.6147]])
  1047. """
  1048. ...
  1049. @overload
  1050. def addbmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], batch1: Tensor, batch2: Tensor) -> Tensor:
  1051. r"""
  1052. addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
  1053. Performs a batch matrix-matrix product of matrices stored
  1054. in :attr:`batch1` and :attr:`batch2`,
  1055. with a reduced add step (all matrix multiplications get accumulated
  1056. along the first dimension).
  1057. :attr:`input` is added to the final result.
  1058. :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the
  1059. same number of matrices.
  1060. If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
  1061. :math:`(b \times m \times p)` tensor, :attr:`input` must be
  1062. :ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
  1063. and :attr:`out` will be a :math:`(n \times p)` tensor.
  1064. .. math::
  1065. out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i)
  1066. If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
  1067. it will not be propagated.
  1068. For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha`
  1069. must be real numbers, otherwise they should be integers.
  1070. This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
  1071. On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
  1072. Args:
  1073. batch1 (Tensor): the first batch of matrices to be multiplied
  1074. batch2 (Tensor): the second batch of matrices to be multiplied
  1075. Keyword args:
  1076. beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
  1077. input (Tensor): matrix to be added
  1078. alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`)
  1079. out (Tensor, optional): the output tensor.
  1080. Example::
  1081. >>> M = torch.randn(3, 5)
  1082. >>> batch1 = torch.randn(10, 3, 4)
  1083. >>> batch2 = torch.randn(10, 4, 5)
  1084. >>> torch.addbmm(M, batch1, batch2)
  1085. tensor([[ 6.6311, 0.0503, 6.9768, -12.0362, -2.1653],
  1086. [ -4.8185, -1.4255, -6.6760, 8.9453, 2.5743],
  1087. [ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]])
  1088. """
  1089. ...
  1090. @overload
  1091. def addbmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor:
  1092. r"""
  1093. addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
  1094. Performs a batch matrix-matrix product of matrices stored
  1095. in :attr:`batch1` and :attr:`batch2`,
  1096. with a reduced add step (all matrix multiplications get accumulated
  1097. along the first dimension).
  1098. :attr:`input` is added to the final result.
  1099. :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the
  1100. same number of matrices.
  1101. If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
  1102. :math:`(b \times m \times p)` tensor, :attr:`input` must be
  1103. :ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
  1104. and :attr:`out` will be a :math:`(n \times p)` tensor.
  1105. .. math::
  1106. out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i)
  1107. If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
  1108. it will not be propagated.
  1109. For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha`
  1110. must be real numbers, otherwise they should be integers.
  1111. This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
  1112. On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
  1113. Args:
  1114. batch1 (Tensor): the first batch of matrices to be multiplied
  1115. batch2 (Tensor): the second batch of matrices to be multiplied
  1116. Keyword args:
  1117. beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
  1118. input (Tensor): matrix to be added
  1119. alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`)
  1120. out (Tensor, optional): the output tensor.
  1121. Example::
  1122. >>> M = torch.randn(3, 5)
  1123. >>> batch1 = torch.randn(10, 3, 4)
  1124. >>> batch2 = torch.randn(10, 4, 5)
  1125. >>> torch.addbmm(M, batch1, batch2)
  1126. tensor([[ 6.6311, 0.0503, 6.9768, -12.0362, -2.1653],
  1127. [ -4.8185, -1.4255, -6.6760, 8.9453, 2.5743],
  1128. [ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]])
  1129. """
  1130. ...
  1131. @overload
  1132. def addbmm(input: Tensor, batch1: Tensor, batch2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor:
  1133. r"""
  1134. addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
  1135. Performs a batch matrix-matrix product of matrices stored
  1136. in :attr:`batch1` and :attr:`batch2`,
  1137. with a reduced add step (all matrix multiplications get accumulated
  1138. along the first dimension).
  1139. :attr:`input` is added to the final result.
  1140. :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the
  1141. same number of matrices.
  1142. If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
  1143. :math:`(b \times m \times p)` tensor, :attr:`input` must be
  1144. :ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
  1145. and :attr:`out` will be a :math:`(n \times p)` tensor.
  1146. .. math::
  1147. out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i)
  1148. If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
  1149. it will not be propagated.
  1150. For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha`
  1151. must be real numbers, otherwise they should be integers.
  1152. This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
  1153. On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
  1154. Args:
  1155. batch1 (Tensor): the first batch of matrices to be multiplied
  1156. batch2 (Tensor): the second batch of matrices to be multiplied
  1157. Keyword args:
  1158. beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
  1159. input (Tensor): matrix to be added
  1160. alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`)
  1161. out (Tensor, optional): the output tensor.
  1162. Example::
  1163. >>> M = torch.randn(3, 5)
  1164. >>> batch1 = torch.randn(10, 3, 4)
  1165. >>> batch2 = torch.randn(10, 4, 5)
  1166. >>> torch.addbmm(M, batch1, batch2)
  1167. tensor([[ 6.6311, 0.0503, 6.9768, -12.0362, -2.1653],
  1168. [ -4.8185, -1.4255, -6.6760, 8.9453, 2.5743],
  1169. [ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]])
  1170. """
  1171. ...
  1172. @overload
  1173. def addbmm(beta: Union[Number, _complex], self: Tensor, batch1: Tensor, batch2: Tensor) -> Tensor:
  1174. r"""
  1175. addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
  1176. Performs a batch matrix-matrix product of matrices stored
  1177. in :attr:`batch1` and :attr:`batch2`,
  1178. with a reduced add step (all matrix multiplications get accumulated
  1179. along the first dimension).
  1180. :attr:`input` is added to the final result.
  1181. :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the
  1182. same number of matrices.
  1183. If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
  1184. :math:`(b \times m \times p)` tensor, :attr:`input` must be
  1185. :ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
  1186. and :attr:`out` will be a :math:`(n \times p)` tensor.
  1187. .. math::
  1188. out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i)
  1189. If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
  1190. it will not be propagated.
  1191. For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha`
  1192. must be real numbers, otherwise they should be integers.
  1193. This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
  1194. On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
  1195. Args:
  1196. batch1 (Tensor): the first batch of matrices to be multiplied
  1197. batch2 (Tensor): the second batch of matrices to be multiplied
  1198. Keyword args:
  1199. beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
  1200. input (Tensor): matrix to be added
  1201. alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`)
  1202. out (Tensor, optional): the output tensor.
  1203. Example::
  1204. >>> M = torch.randn(3, 5)
  1205. >>> batch1 = torch.randn(10, 3, 4)
  1206. >>> batch2 = torch.randn(10, 4, 5)
  1207. >>> torch.addbmm(M, batch1, batch2)
  1208. tensor([[ 6.6311, 0.0503, 6.9768, -12.0362, -2.1653],
  1209. [ -4.8185, -1.4255, -6.6760, 8.9453, 2.5743],
  1210. [ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]])
  1211. """
  1212. ...
  1213. @overload
  1214. def addbmm(beta: Union[Number, _complex], self: Tensor, batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor:
  1215. r"""
  1216. addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
  1217. Performs a batch matrix-matrix product of matrices stored
  1218. in :attr:`batch1` and :attr:`batch2`,
  1219. with a reduced add step (all matrix multiplications get accumulated
  1220. along the first dimension).
  1221. :attr:`input` is added to the final result.
  1222. :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the
  1223. same number of matrices.
  1224. If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
  1225. :math:`(b \times m \times p)` tensor, :attr:`input` must be
  1226. :ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
  1227. and :attr:`out` will be a :math:`(n \times p)` tensor.
  1228. .. math::
  1229. out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i)
  1230. If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
  1231. it will not be propagated.
  1232. For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha`
  1233. must be real numbers, otherwise they should be integers.
  1234. This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
  1235. On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
  1236. Args:
  1237. batch1 (Tensor): the first batch of matrices to be multiplied
  1238. batch2 (Tensor): the second batch of matrices to be multiplied
  1239. Keyword args:
  1240. beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
  1241. input (Tensor): matrix to be added
  1242. alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`)
  1243. out (Tensor, optional): the output tensor.
  1244. Example::
  1245. >>> M = torch.randn(3, 5)
  1246. >>> batch1 = torch.randn(10, 3, 4)
  1247. >>> batch2 = torch.randn(10, 4, 5)
  1248. >>> torch.addbmm(M, batch1, batch2)
  1249. tensor([[ 6.6311, 0.0503, 6.9768, -12.0362, -2.1653],
  1250. [ -4.8185, -1.4255, -6.6760, 8.9453, 2.5743],
  1251. [ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]])
  1252. """
  1253. ...
  1254. @overload
  1255. def addcdiv(self: Tensor, value: Union[Number, _complex], tensor1: Tensor, tensor2: Tensor) -> Tensor:
  1256. r"""
  1257. addcdiv(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
  1258. Performs the element-wise division of :attr:`tensor1` by :attr:`tensor2`,
  1259. multiplies the result by the scalar :attr:`value` and adds it to :attr:`input`.
  1260. .. warning::
  1261. Integer division with addcdiv is no longer supported, and in a future
  1262. release addcdiv will perform a true division of tensor1 and tensor2.
  1263. The historic addcdiv behavior can be implemented as
  1264. (input + value * torch.trunc(tensor1 / tensor2)).to(input.dtype)
  1265. for integer inputs and as (input + value * tensor1 / tensor2) for float inputs.
  1266. The future addcdiv behavior is just the latter implementation:
  1267. (input + value * tensor1 / tensor2), for all dtypes.
  1268. .. math::
  1269. \text{out}_i = \text{input}_i + \text{value} \times \frac{\text{tensor1}_i}{\text{tensor2}_i}
  1270. The shapes of :attr:`input`, :attr:`tensor1`, and :attr:`tensor2` must be
  1271. :ref:`broadcastable <broadcasting-semantics>`.
  1272. For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
  1273. a real number, otherwise an integer.
  1274. Args:
  1275. input (Tensor): the tensor to be added
  1276. tensor1 (Tensor): the numerator tensor
  1277. tensor2 (Tensor): the denominator tensor
  1278. Keyword args:
  1279. value (Number, optional): multiplier for :math:`\text{tensor1} / \text{tensor2}`
  1280. out (Tensor, optional): the output tensor.
  1281. Example::
  1282. >>> t = torch.randn(1, 3)
  1283. >>> t1 = torch.randn(3, 1)
  1284. >>> t2 = torch.randn(1, 3)
  1285. >>> torch.addcdiv(t, t1, t2, value=0.1)
  1286. tensor([[-0.2312, -3.6496, 0.1312],
  1287. [-1.0428, 3.4292, -0.1030],
  1288. [-0.5369, -0.9829, 0.0430]])
  1289. """
  1290. ...
  1291. @overload
  1292. def addcdiv(self: Tensor, value: Union[Number, _complex], tensor1: Tensor, tensor2: Tensor, *, out: Tensor) -> Tensor:
  1293. r"""
  1294. addcdiv(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
  1295. Performs the element-wise division of :attr:`tensor1` by :attr:`tensor2`,
  1296. multiplies the result by the scalar :attr:`value` and adds it to :attr:`input`.
  1297. .. warning::
  1298. Integer division with addcdiv is no longer supported, and in a future
  1299. release addcdiv will perform a true division of tensor1 and tensor2.
  1300. The historic addcdiv behavior can be implemented as
  1301. (input + value * torch.trunc(tensor1 / tensor2)).to(input.dtype)
  1302. for integer inputs and as (input + value * tensor1 / tensor2) for float inputs.
  1303. The future addcdiv behavior is just the latter implementation:
  1304. (input + value * tensor1 / tensor2), for all dtypes.
  1305. .. math::
  1306. \text{out}_i = \text{input}_i + \text{value} \times \frac{\text{tensor1}_i}{\text{tensor2}_i}
  1307. The shapes of :attr:`input`, :attr:`tensor1`, and :attr:`tensor2` must be
  1308. :ref:`broadcastable <broadcasting-semantics>`.
  1309. For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
  1310. a real number, otherwise an integer.
  1311. Args:
  1312. input (Tensor): the tensor to be added
  1313. tensor1 (Tensor): the numerator tensor
  1314. tensor2 (Tensor): the denominator tensor
  1315. Keyword args:
  1316. value (Number, optional): multiplier for :math:`\text{tensor1} / \text{tensor2}`
  1317. out (Tensor, optional): the output tensor.
  1318. Example::
  1319. >>> t = torch.randn(1, 3)
  1320. >>> t1 = torch.randn(3, 1)
  1321. >>> t2 = torch.randn(1, 3)
  1322. >>> torch.addcdiv(t, t1, t2, value=0.1)
  1323. tensor([[-0.2312, -3.6496, 0.1312],
  1324. [-1.0428, 3.4292, -0.1030],
  1325. [-0.5369, -0.9829, 0.0430]])
  1326. """
  1327. ...
  1328. @overload
  1329. def addcdiv(input: Tensor, tensor1: Tensor, tensor2: Tensor, *, value: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor:
  1330. r"""
  1331. addcdiv(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
  1332. Performs the element-wise division of :attr:`tensor1` by :attr:`tensor2`,
  1333. multiplies the result by the scalar :attr:`value` and adds it to :attr:`input`.
  1334. .. warning::
  1335. Integer division with addcdiv is no longer supported, and in a future
  1336. release addcdiv will perform a true division of tensor1 and tensor2.
  1337. The historic addcdiv behavior can be implemented as
  1338. (input + value * torch.trunc(tensor1 / tensor2)).to(input.dtype)
  1339. for integer inputs and as (input + value * tensor1 / tensor2) for float inputs.
  1340. The future addcdiv behavior is just the latter implementation:
  1341. (input + value * tensor1 / tensor2), for all dtypes.
  1342. .. math::
  1343. \text{out}_i = \text{input}_i + \text{value} \times \frac{\text{tensor1}_i}{\text{tensor2}_i}
  1344. The shapes of :attr:`input`, :attr:`tensor1`, and :attr:`tensor2` must be
  1345. :ref:`broadcastable <broadcasting-semantics>`.
  1346. For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
  1347. a real number, otherwise an integer.
  1348. Args:
  1349. input (Tensor): the tensor to be added
  1350. tensor1 (Tensor): the numerator tensor
  1351. tensor2 (Tensor): the denominator tensor
  1352. Keyword args:
  1353. value (Number, optional): multiplier for :math:`\text{tensor1} / \text{tensor2}`
  1354. out (Tensor, optional): the output tensor.
  1355. Example::
  1356. >>> t = torch.randn(1, 3)
  1357. >>> t1 = torch.randn(3, 1)
  1358. >>> t2 = torch.randn(1, 3)
  1359. >>> torch.addcdiv(t, t1, t2, value=0.1)
  1360. tensor([[-0.2312, -3.6496, 0.1312],
  1361. [-1.0428, 3.4292, -0.1030],
  1362. [-0.5369, -0.9829, 0.0430]])
  1363. """
  1364. ...
  1365. @overload
  1366. def addcmul(self: Tensor, value: Union[Number, _complex], tensor1: Tensor, tensor2: Tensor) -> Tensor:
  1367. r"""
  1368. addcmul(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
  1369. Performs the element-wise multiplication of :attr:`tensor1`
  1370. by :attr:`tensor2`, multiplies the result by the scalar :attr:`value`
  1371. and adds it to :attr:`input`.
  1372. .. math::
  1373. \text{out}_i = \text{input}_i + \text{value} \times \text{tensor1}_i \times \text{tensor2}_i
  1374. The shapes of :attr:`tensor`, :attr:`tensor1`, and :attr:`tensor2` must be
  1375. :ref:`broadcastable <broadcasting-semantics>`.
  1376. For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
  1377. a real number, otherwise an integer.
  1378. Args:
  1379. input (Tensor): the tensor to be added
  1380. tensor1 (Tensor): the tensor to be multiplied
  1381. tensor2 (Tensor): the tensor to be multiplied
  1382. Keyword args:
  1383. value (Number, optional): multiplier for :math:`tensor1 .* tensor2`
  1384. out (Tensor, optional): the output tensor.
  1385. Example::
  1386. >>> t = torch.randn(1, 3)
  1387. >>> t1 = torch.randn(3, 1)
  1388. >>> t2 = torch.randn(1, 3)
  1389. >>> torch.addcmul(t, t1, t2, value=0.1)
  1390. tensor([[-0.8635, -0.6391, 1.6174],
  1391. [-0.7617, -0.5879, 1.7388],
  1392. [-0.8353, -0.6249, 1.6511]])
  1393. """
  1394. ...
  1395. @overload
  1396. def addcmul(self: Tensor, value: Union[Number, _complex], tensor1: Tensor, tensor2: Tensor, *, out: Tensor) -> Tensor:
  1397. r"""
  1398. addcmul(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
  1399. Performs the element-wise multiplication of :attr:`tensor1`
  1400. by :attr:`tensor2`, multiplies the result by the scalar :attr:`value`
  1401. and adds it to :attr:`input`.
  1402. .. math::
  1403. \text{out}_i = \text{input}_i + \text{value} \times \text{tensor1}_i \times \text{tensor2}_i
  1404. The shapes of :attr:`tensor`, :attr:`tensor1`, and :attr:`tensor2` must be
  1405. :ref:`broadcastable <broadcasting-semantics>`.
  1406. For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
  1407. a real number, otherwise an integer.
  1408. Args:
  1409. input (Tensor): the tensor to be added
  1410. tensor1 (Tensor): the tensor to be multiplied
  1411. tensor2 (Tensor): the tensor to be multiplied
  1412. Keyword args:
  1413. value (Number, optional): multiplier for :math:`tensor1 .* tensor2`
  1414. out (Tensor, optional): the output tensor.
  1415. Example::
  1416. >>> t = torch.randn(1, 3)
  1417. >>> t1 = torch.randn(3, 1)
  1418. >>> t2 = torch.randn(1, 3)
  1419. >>> torch.addcmul(t, t1, t2, value=0.1)
  1420. tensor([[-0.8635, -0.6391, 1.6174],
  1421. [-0.7617, -0.5879, 1.7388],
  1422. [-0.8353, -0.6249, 1.6511]])
  1423. """
  1424. ...
  1425. @overload
  1426. def addcmul(input: Tensor, tensor1: Tensor, tensor2: Tensor, *, value: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor:
  1427. r"""
  1428. addcmul(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
  1429. Performs the element-wise multiplication of :attr:`tensor1`
  1430. by :attr:`tensor2`, multiplies the result by the scalar :attr:`value`
  1431. and adds it to :attr:`input`.
  1432. .. math::
  1433. \text{out}_i = \text{input}_i + \text{value} \times \text{tensor1}_i \times \text{tensor2}_i
  1434. The shapes of :attr:`tensor`, :attr:`tensor1`, and :attr:`tensor2` must be
  1435. :ref:`broadcastable <broadcasting-semantics>`.
  1436. For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
  1437. a real number, otherwise an integer.
  1438. Args:
  1439. input (Tensor): the tensor to be added
  1440. tensor1 (Tensor): the tensor to be multiplied
  1441. tensor2 (Tensor): the tensor to be multiplied
  1442. Keyword args:
  1443. value (Number, optional): multiplier for :math:`tensor1 .* tensor2`
  1444. out (Tensor, optional): the output tensor.
  1445. Example::
  1446. >>> t = torch.randn(1, 3)
  1447. >>> t1 = torch.randn(3, 1)
  1448. >>> t2 = torch.randn(1, 3)
  1449. >>> torch.addcmul(t, t1, t2, value=0.1)
  1450. tensor([[-0.8635, -0.6391, 1.6174],
  1451. [-0.7617, -0.5879, 1.7388],
  1452. [-0.8353, -0.6249, 1.6511]])
  1453. """
  1454. ...
  1455. @overload
  1456. def addmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat1: Tensor, mat2: Tensor) -> Tensor:
  1457. r"""
  1458. addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
  1459. Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`.
  1460. The matrix :attr:`input` is added to the final result.
  1461. If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
  1462. :math:`(m \times p)` tensor, then :attr:`input` must be
  1463. :ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
  1464. and :attr:`out` will be a :math:`(n \times p)` tensor.
  1465. :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
  1466. :attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively.
  1467. .. math::
  1468. \text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i)
  1469. If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
  1470. it will not be propagated.
  1471. For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
  1472. :attr:`alpha` must be real numbers, otherwise they should be integers.
  1473. This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`. If
  1474. :attr:`input` is sparse the result will have the same layout and if :attr:`out`
  1475. is provided it must have the same layout as :attr:`input`.
  1476. .. warning::
  1477. Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported,
  1478. or may not have autograd support. If you notice missing functionality please
  1479. open a feature request.
  1480. This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
  1481. On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
  1482. Args:
  1483. input (Tensor): matrix to be added
  1484. mat1 (Tensor): the first matrix to be matrix multiplied
  1485. mat2 (Tensor): the second matrix to be matrix multiplied
  1486. Keyword args:
  1487. beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
  1488. alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
  1489. out (Tensor, optional): the output tensor.
  1490. Example::
  1491. >>> M = torch.randn(2, 3)
  1492. >>> mat1 = torch.randn(2, 3)
  1493. >>> mat2 = torch.randn(3, 3)
  1494. >>> torch.addmm(M, mat1, mat2)
  1495. tensor([[-4.8716, 1.4671, -1.3746],
  1496. [ 0.7573, -3.9555, -2.8681]])
  1497. """
  1498. ...
  1499. @overload
  1500. def addmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat1: Tensor, mat2: Tensor, *, out: Tensor) -> Tensor:
  1501. r"""
  1502. addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
  1503. Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`.
  1504. The matrix :attr:`input` is added to the final result.
  1505. If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
  1506. :math:`(m \times p)` tensor, then :attr:`input` must be
  1507. :ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
  1508. and :attr:`out` will be a :math:`(n \times p)` tensor.
  1509. :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
  1510. :attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively.
  1511. .. math::
  1512. \text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i)
  1513. If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
  1514. it will not be propagated.
  1515. For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
  1516. :attr:`alpha` must be real numbers, otherwise they should be integers.
  1517. This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`. If
  1518. :attr:`input` is sparse the result will have the same layout and if :attr:`out`
  1519. is provided it must have the same layout as :attr:`input`.
  1520. .. warning::
  1521. Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported,
  1522. or may not have autograd support. If you notice missing functionality please
  1523. open a feature request.
  1524. This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
  1525. On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
  1526. Args:
  1527. input (Tensor): matrix to be added
  1528. mat1 (Tensor): the first matrix to be matrix multiplied
  1529. mat2 (Tensor): the second matrix to be matrix multiplied
  1530. Keyword args:
  1531. beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
  1532. alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
  1533. out (Tensor, optional): the output tensor.
  1534. Example::
  1535. >>> M = torch.randn(2, 3)
  1536. >>> mat1 = torch.randn(2, 3)
  1537. >>> mat2 = torch.randn(3, 3)
  1538. >>> torch.addmm(M, mat1, mat2)
  1539. tensor([[-4.8716, 1.4671, -1.3746],
  1540. [ 0.7573, -3.9555, -2.8681]])
  1541. """
  1542. ...
  1543. @overload
  1544. def addmm(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor:
  1545. r"""
  1546. addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
  1547. Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`.
  1548. The matrix :attr:`input` is added to the final result.
  1549. If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
  1550. :math:`(m \times p)` tensor, then :attr:`input` must be
  1551. :ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
  1552. and :attr:`out` will be a :math:`(n \times p)` tensor.
  1553. :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
  1554. :attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively.
  1555. .. math::
  1556. \text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i)
  1557. If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
  1558. it will not be propagated.
  1559. For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
  1560. :attr:`alpha` must be real numbers, otherwise they should be integers.
  1561. This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`. If
  1562. :attr:`input` is sparse the result will have the same layout and if :attr:`out`
  1563. is provided it must have the same layout as :attr:`input`.
  1564. .. warning::
  1565. Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported,
  1566. or may not have autograd support. If you notice missing functionality please
  1567. open a feature request.
  1568. This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
  1569. On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
  1570. Args:
  1571. input (Tensor): matrix to be added
  1572. mat1 (Tensor): the first matrix to be matrix multiplied
  1573. mat2 (Tensor): the second matrix to be matrix multiplied
  1574. Keyword args:
  1575. beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
  1576. alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
  1577. out (Tensor, optional): the output tensor.
  1578. Example::
  1579. >>> M = torch.randn(2, 3)
  1580. >>> mat1 = torch.randn(2, 3)
  1581. >>> mat2 = torch.randn(3, 3)
  1582. >>> torch.addmm(M, mat1, mat2)
  1583. tensor([[-4.8716, 1.4671, -1.3746],
  1584. [ 0.7573, -3.9555, -2.8681]])
  1585. """
  1586. ...
  1587. @overload
  1588. def addmm(beta: Union[Number, _complex], self: Tensor, mat1: Tensor, mat2: Tensor) -> Tensor:
  1589. r"""
  1590. addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
  1591. Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`.
  1592. The matrix :attr:`input` is added to the final result.
  1593. If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
  1594. :math:`(m \times p)` tensor, then :attr:`input` must be
  1595. :ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
  1596. and :attr:`out` will be a :math:`(n \times p)` tensor.
  1597. :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
  1598. :attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively.
  1599. .. math::
  1600. \text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i)
  1601. If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
  1602. it will not be propagated.
  1603. For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
  1604. :attr:`alpha` must be real numbers, otherwise they should be integers.
  1605. This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`. If
  1606. :attr:`input` is sparse the result will have the same layout and if :attr:`out`
  1607. is provided it must have the same layout as :attr:`input`.
  1608. .. warning::
  1609. Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported,
  1610. or may not have autograd support. If you notice missing functionality please
  1611. open a feature request.
  1612. This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
  1613. On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
  1614. Args:
  1615. input (Tensor): matrix to be added
  1616. mat1 (Tensor): the first matrix to be matrix multiplied
  1617. mat2 (Tensor): the second matrix to be matrix multiplied
  1618. Keyword args:
  1619. beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
  1620. alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
  1621. out (Tensor, optional): the output tensor.
  1622. Example::
  1623. >>> M = torch.randn(2, 3)
  1624. >>> mat1 = torch.randn(2, 3)
  1625. >>> mat2 = torch.randn(3, 3)
  1626. >>> torch.addmm(M, mat1, mat2)
  1627. tensor([[-4.8716, 1.4671, -1.3746],
  1628. [ 0.7573, -3.9555, -2.8681]])
  1629. """
  1630. ...
  1631. @overload
  1632. def addmm(beta: Union[Number, _complex], self: Tensor, mat1: Tensor, mat2: Tensor, *, out: Tensor) -> Tensor:
  1633. r"""
  1634. addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
  1635. Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`.
  1636. The matrix :attr:`input` is added to the final result.
  1637. If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
  1638. :math:`(m \times p)` tensor, then :attr:`input` must be
  1639. :ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
  1640. and :attr:`out` will be a :math:`(n \times p)` tensor.
  1641. :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
  1642. :attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively.
  1643. .. math::
  1644. \text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i)
  1645. If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
  1646. it will not be propagated.
  1647. For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
  1648. :attr:`alpha` must be real numbers, otherwise they should be integers.
  1649. This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`. If
  1650. :attr:`input` is sparse the result will have the same layout and if :attr:`out`
  1651. is provided it must have the same layout as :attr:`input`.
  1652. .. warning::
  1653. Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported,
  1654. or may not have autograd support. If you notice missing functionality please
  1655. open a feature request.
  1656. This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
  1657. On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
  1658. Args:
  1659. input (Tensor): matrix to be added
  1660. mat1 (Tensor): the first matrix to be matrix multiplied
  1661. mat2 (Tensor): the second matrix to be matrix multiplied
  1662. Keyword args:
  1663. beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
  1664. alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
  1665. out (Tensor, optional): the output tensor.
  1666. Example::
  1667. >>> M = torch.randn(2, 3)
  1668. >>> mat1 = torch.randn(2, 3)
  1669. >>> mat2 = torch.randn(3, 3)
  1670. >>> torch.addmm(M, mat1, mat2)
  1671. tensor([[-4.8716, 1.4671, -1.3746],
  1672. [ 0.7573, -3.9555, -2.8681]])
  1673. """
  1674. ...
  1675. @overload
  1676. def addmv(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat: Tensor, vec: Tensor) -> Tensor:
  1677. r"""
  1678. addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor
  1679. Performs a matrix-vector product of the matrix :attr:`mat` and
  1680. the vector :attr:`vec`.
  1681. The vector :attr:`input` is added to the final result.
  1682. If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
  1683. size `m`, then :attr:`input` must be
  1684. :ref:`broadcastable <broadcasting-semantics>` with a 1-D tensor of size `n` and
  1685. :attr:`out` will be 1-D tensor of size `n`.
  1686. :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
  1687. :attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively.
  1688. .. math::
  1689. \text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec})
  1690. If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
  1691. it will not be propagated.
  1692. For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
  1693. :attr:`alpha` must be real numbers, otherwise they should be integers.
  1694. Args:
  1695. input (Tensor): vector to be added
  1696. mat (Tensor): matrix to be matrix multiplied
  1697. vec (Tensor): vector to be matrix multiplied
  1698. Keyword args:
  1699. beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
  1700. alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`)
  1701. out (Tensor, optional): the output tensor.
  1702. Example::
  1703. >>> M = torch.randn(2)
  1704. >>> mat = torch.randn(2, 3)
  1705. >>> vec = torch.randn(3)
  1706. >>> torch.addmv(M, mat, vec)
  1707. tensor([-0.3768, -5.5565])
  1708. """
  1709. ...
  1710. @overload
  1711. def addmv(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat: Tensor, vec: Tensor, *, out: Tensor) -> Tensor:
  1712. r"""
  1713. addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor
  1714. Performs a matrix-vector product of the matrix :attr:`mat` and
  1715. the vector :attr:`vec`.
  1716. The vector :attr:`input` is added to the final result.
  1717. If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
  1718. size `m`, then :attr:`input` must be
  1719. :ref:`broadcastable <broadcasting-semantics>` with a 1-D tensor of size `n` and
  1720. :attr:`out` will be 1-D tensor of size `n`.
  1721. :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
  1722. :attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively.
  1723. .. math::
  1724. \text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec})
  1725. If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
  1726. it will not be propagated.
  1727. For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
  1728. :attr:`alpha` must be real numbers, otherwise they should be integers.
  1729. Args:
  1730. input (Tensor): vector to be added
  1731. mat (Tensor): matrix to be matrix multiplied
  1732. vec (Tensor): vector to be matrix multiplied
  1733. Keyword args:
  1734. beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
  1735. alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`)
  1736. out (Tensor, optional): the output tensor.
  1737. Example::
  1738. >>> M = torch.randn(2)
  1739. >>> mat = torch.randn(2, 3)
  1740. >>> vec = torch.randn(3)
  1741. >>> torch.addmv(M, mat, vec)
  1742. tensor([-0.3768, -5.5565])
  1743. """
  1744. ...
  1745. @overload
  1746. def addmv(input: Tensor, mat: Tensor, vec: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor:
  1747. r"""
  1748. addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor
  1749. Performs a matrix-vector product of the matrix :attr:`mat` and
  1750. the vector :attr:`vec`.
  1751. The vector :attr:`input` is added to the final result.
  1752. If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
  1753. size `m`, then :attr:`input` must be
  1754. :ref:`broadcastable <broadcasting-semantics>` with a 1-D tensor of size `n` and
  1755. :attr:`out` will be 1-D tensor of size `n`.
  1756. :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
  1757. :attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively.
  1758. .. math::
  1759. \text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec})
  1760. If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
  1761. it will not be propagated.
  1762. For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
  1763. :attr:`alpha` must be real numbers, otherwise they should be integers.
  1764. Args:
  1765. input (Tensor): vector to be added
  1766. mat (Tensor): matrix to be matrix multiplied
  1767. vec (Tensor): vector to be matrix multiplied
  1768. Keyword args:
  1769. beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
  1770. alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`)
  1771. out (Tensor, optional): the output tensor.
  1772. Example::
  1773. >>> M = torch.randn(2)
  1774. >>> mat = torch.randn(2, 3)
  1775. >>> vec = torch.randn(3)
  1776. >>> torch.addmv(M, mat, vec)
  1777. tensor([-0.3768, -5.5565])
  1778. """
  1779. ...
  1780. @overload
  1781. def addmv(beta: Union[Number, _complex], self: Tensor, mat: Tensor, vec: Tensor) -> Tensor:
  1782. r"""
  1783. addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor
  1784. Performs a matrix-vector product of the matrix :attr:`mat` and
  1785. the vector :attr:`vec`.
  1786. The vector :attr:`input` is added to the final result.
  1787. If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
  1788. size `m`, then :attr:`input` must be
  1789. :ref:`broadcastable <broadcasting-semantics>` with a 1-D tensor of size `n` and
  1790. :attr:`out` will be 1-D tensor of size `n`.
  1791. :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
  1792. :attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively.
  1793. .. math::
  1794. \text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec})
  1795. If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
  1796. it will not be propagated.
  1797. For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
  1798. :attr:`alpha` must be real numbers, otherwise they should be integers.
  1799. Args:
  1800. input (Tensor): vector to be added
  1801. mat (Tensor): matrix to be matrix multiplied
  1802. vec (Tensor): vector to be matrix multiplied
  1803. Keyword args:
  1804. beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
  1805. alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`)
  1806. out (Tensor, optional): the output tensor.
  1807. Example::
  1808. >>> M = torch.randn(2)
  1809. >>> mat = torch.randn(2, 3)
  1810. >>> vec = torch.randn(3)
  1811. >>> torch.addmv(M, mat, vec)
  1812. tensor([-0.3768, -5.5565])
  1813. """
  1814. ...
  1815. @overload
  1816. def addmv(beta: Union[Number, _complex], self: Tensor, mat: Tensor, vec: Tensor, *, out: Tensor) -> Tensor:
  1817. r"""
  1818. addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor
  1819. Performs a matrix-vector product of the matrix :attr:`mat` and
  1820. the vector :attr:`vec`.
  1821. The vector :attr:`input` is added to the final result.
  1822. If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
  1823. size `m`, then :attr:`input` must be
  1824. :ref:`broadcastable <broadcasting-semantics>` with a 1-D tensor of size `n` and
  1825. :attr:`out` will be 1-D tensor of size `n`.
  1826. :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
  1827. :attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively.
  1828. .. math::
  1829. \text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec})
  1830. If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
  1831. it will not be propagated.
  1832. For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
  1833. :attr:`alpha` must be real numbers, otherwise they should be integers.
  1834. Args:
  1835. input (Tensor): vector to be added
  1836. mat (Tensor): matrix to be matrix multiplied
  1837. vec (Tensor): vector to be matrix multiplied
  1838. Keyword args:
  1839. beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
  1840. alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`)
  1841. out (Tensor, optional): the output tensor.
  1842. Example::
  1843. >>> M = torch.randn(2)
  1844. >>> mat = torch.randn(2, 3)
  1845. >>> vec = torch.randn(3)
  1846. >>> torch.addmv(M, mat, vec)
  1847. tensor([-0.3768, -5.5565])
  1848. """
  1849. ...
  1850. @overload
  1851. def addmv_(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat: Tensor, vec: Tensor) -> Tensor: ...
  1852. @overload
  1853. def addmv_(input: Tensor, mat: Tensor, vec: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor: ...
  1854. @overload
  1855. def addmv_(beta: Union[Number, _complex], self: Tensor, mat: Tensor, vec: Tensor) -> Tensor: ...
  1856. @overload
  1857. def addr(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], vec1: Tensor, vec2: Tensor) -> Tensor:
  1858. r"""
  1859. addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor
  1860. Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2`
  1861. and adds it to the matrix :attr:`input`.
  1862. Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the
  1863. outer product between :attr:`vec1` and :attr:`vec2` and the added matrix
  1864. :attr:`input` respectively.
  1865. .. math::
  1866. \text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2})
  1867. If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
  1868. it will not be propagated.
  1869. If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector
  1870. of size `m`, then :attr:`input` must be
  1871. :ref:`broadcastable <broadcasting-semantics>` with a matrix of size
  1872. :math:`(n \times m)` and :attr:`out` will be a matrix of size
  1873. :math:`(n \times m)`.
  1874. Args:
  1875. input (Tensor): matrix to be added
  1876. vec1 (Tensor): the first vector of the outer product
  1877. vec2 (Tensor): the second vector of the outer product
  1878. Keyword args:
  1879. beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
  1880. alpha (Number, optional): multiplier for :math:`\text{vec1} \otimes \text{vec2}` (:math:`\alpha`)
  1881. out (Tensor, optional): the output tensor.
  1882. Example::
  1883. >>> vec1 = torch.arange(1., 4.)
  1884. >>> vec2 = torch.arange(1., 3.)
  1885. >>> M = torch.zeros(3, 2)
  1886. >>> torch.addr(M, vec1, vec2)
  1887. tensor([[ 1., 2.],
  1888. [ 2., 4.],
  1889. [ 3., 6.]])
  1890. """
  1891. ...
  1892. @overload
  1893. def addr(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], vec1: Tensor, vec2: Tensor, *, out: Tensor) -> Tensor:
  1894. r"""
  1895. addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor
  1896. Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2`
  1897. and adds it to the matrix :attr:`input`.
  1898. Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the
  1899. outer product between :attr:`vec1` and :attr:`vec2` and the added matrix
  1900. :attr:`input` respectively.
  1901. .. math::
  1902. \text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2})
  1903. If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
  1904. it will not be propagated.
  1905. If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector
  1906. of size `m`, then :attr:`input` must be
  1907. :ref:`broadcastable <broadcasting-semantics>` with a matrix of size
  1908. :math:`(n \times m)` and :attr:`out` will be a matrix of size
  1909. :math:`(n \times m)`.
  1910. Args:
  1911. input (Tensor): matrix to be added
  1912. vec1 (Tensor): the first vector of the outer product
  1913. vec2 (Tensor): the second vector of the outer product
  1914. Keyword args:
  1915. beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
  1916. alpha (Number, optional): multiplier for :math:`\text{vec1} \otimes \text{vec2}` (:math:`\alpha`)
  1917. out (Tensor, optional): the output tensor.
  1918. Example::
  1919. >>> vec1 = torch.arange(1., 4.)
  1920. >>> vec2 = torch.arange(1., 3.)
  1921. >>> M = torch.zeros(3, 2)
  1922. >>> torch.addr(M, vec1, vec2)
  1923. tensor([[ 1., 2.],
  1924. [ 2., 4.],
  1925. [ 3., 6.]])
  1926. """
  1927. ...
  1928. @overload
  1929. def addr(input: Tensor, vec1: Tensor, vec2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor:
  1930. r"""
  1931. addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor
  1932. Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2`
  1933. and adds it to the matrix :attr:`input`.
  1934. Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the
  1935. outer product between :attr:`vec1` and :attr:`vec2` and the added matrix
  1936. :attr:`input` respectively.
  1937. .. math::
  1938. \text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2})
  1939. If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
  1940. it will not be propagated.
  1941. If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector
  1942. of size `m`, then :attr:`input` must be
  1943. :ref:`broadcastable <broadcasting-semantics>` with a matrix of size
  1944. :math:`(n \times m)` and :attr:`out` will be a matrix of size
  1945. :math:`(n \times m)`.
  1946. Args:
  1947. input (Tensor): matrix to be added
  1948. vec1 (Tensor): the first vector of the outer product
  1949. vec2 (Tensor): the second vector of the outer product
  1950. Keyword args:
  1951. beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
  1952. alpha (Number, optional): multiplier for :math:`\text{vec1} \otimes \text{vec2}` (:math:`\alpha`)
  1953. out (Tensor, optional): the output tensor.
  1954. Example::
  1955. >>> vec1 = torch.arange(1., 4.)
  1956. >>> vec2 = torch.arange(1., 3.)
  1957. >>> M = torch.zeros(3, 2)
  1958. >>> torch.addr(M, vec1, vec2)
  1959. tensor([[ 1., 2.],
  1960. [ 2., 4.],
  1961. [ 3., 6.]])
  1962. """
  1963. ...
  1964. @overload
  1965. def addr(beta: Union[Number, _complex], self: Tensor, vec1: Tensor, vec2: Tensor) -> Tensor:
  1966. r"""
  1967. addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor
  1968. Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2`
  1969. and adds it to the matrix :attr:`input`.
  1970. Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the
  1971. outer product between :attr:`vec1` and :attr:`vec2` and the added matrix
  1972. :attr:`input` respectively.
  1973. .. math::
  1974. \text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2})
  1975. If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
  1976. it will not be propagated.
  1977. If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector
  1978. of size `m`, then :attr:`input` must be
  1979. :ref:`broadcastable <broadcasting-semantics>` with a matrix of size
  1980. :math:`(n \times m)` and :attr:`out` will be a matrix of size
  1981. :math:`(n \times m)`.
  1982. Args:
  1983. input (Tensor): matrix to be added
  1984. vec1 (Tensor): the first vector of the outer product
  1985. vec2 (Tensor): the second vector of the outer product
  1986. Keyword args:
  1987. beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
  1988. alpha (Number, optional): multiplier for :math:`\text{vec1} \otimes \text{vec2}` (:math:`\alpha`)
  1989. out (Tensor, optional): the output tensor.
  1990. Example::
  1991. >>> vec1 = torch.arange(1., 4.)
  1992. >>> vec2 = torch.arange(1., 3.)
  1993. >>> M = torch.zeros(3, 2)
  1994. >>> torch.addr(M, vec1, vec2)
  1995. tensor([[ 1., 2.],
  1996. [ 2., 4.],
  1997. [ 3., 6.]])
  1998. """
  1999. ...
  2000. @overload
  2001. def addr(beta: Union[Number, _complex], self: Tensor, vec1: Tensor, vec2: Tensor, *, out: Tensor) -> Tensor:
  2002. r"""
  2003. addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor
  2004. Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2`
  2005. and adds it to the matrix :attr:`input`.
  2006. Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the
  2007. outer product between :attr:`vec1` and :attr:`vec2` and the added matrix
  2008. :attr:`input` respectively.
  2009. .. math::
  2010. \text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2})
  2011. If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
  2012. it will not be propagated.
  2013. If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector
  2014. of size `m`, then :attr:`input` must be
  2015. :ref:`broadcastable <broadcasting-semantics>` with a matrix of size
  2016. :math:`(n \times m)` and :attr:`out` will be a matrix of size
  2017. :math:`(n \times m)`.
  2018. Args:
  2019. input (Tensor): matrix to be added
  2020. vec1 (Tensor): the first vector of the outer product
  2021. vec2 (Tensor): the second vector of the outer product
  2022. Keyword args:
  2023. beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
  2024. alpha (Number, optional): multiplier for :math:`\text{vec1} \otimes \text{vec2}` (:math:`\alpha`)
  2025. out (Tensor, optional): the output tensor.
  2026. Example::
  2027. >>> vec1 = torch.arange(1., 4.)
  2028. >>> vec2 = torch.arange(1., 3.)
  2029. >>> M = torch.zeros(3, 2)
  2030. >>> torch.addr(M, vec1, vec2)
  2031. tensor([[ 1., 2.],
  2032. [ 2., 4.],
  2033. [ 3., 6.]])
  2034. """
  2035. ...
  2036. def adjoint(input: Tensor) -> Tensor:
  2037. r"""
  2038. adjoint(Tensor) -> Tensor
  2039. Returns a view of the tensor conjugated and with the last two dimensions transposed.
  2040. ``x.adjoint()`` is equivalent to ``x.transpose(-2, -1).conj()`` for complex tensors and
  2041. to ``x.transpose(-2, -1)`` for real tensors.
  2042. Example::
  2043. >>> x = torch.arange(4, dtype=torch.float)
  2044. >>> A = torch.complex(x, x).reshape(2, 2)
  2045. >>> A
  2046. tensor([[0.+0.j, 1.+1.j],
  2047. [2.+2.j, 3.+3.j]])
  2048. >>> A.adjoint()
  2049. tensor([[0.-0.j, 2.-2.j],
  2050. [1.-1.j, 3.-3.j]])
  2051. >>> (A.adjoint() == A.mH).all()
  2052. tensor(True)
  2053. """
  2054. ...
  2055. def affine_grid_generator(theta: Tensor, size: Sequence[Union[_int, SymInt]], align_corners: _bool) -> Tensor: ...
  2056. def alias_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  2057. r"""
  2058. Performs the same operation as :func:`torch.alias`, but all output tensors
  2059. are freshly created instead of aliasing the input.
  2060. """
  2061. ...
  2062. @overload
  2063. def all(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  2064. r"""
  2065. all(input) -> Tensor
  2066. Tests if all elements in :attr:`input` evaluate to `True`.
  2067. .. note:: This function matches the behaviour of NumPy in returning
  2068. output of dtype `bool` for all supported dtypes except `uint8`.
  2069. For `uint8` the dtype of output is `uint8` itself.
  2070. Example::
  2071. >>> a = torch.rand(1, 2).bool()
  2072. >>> a
  2073. tensor([[False, True]], dtype=torch.bool)
  2074. >>> torch.all(a)
  2075. tensor(False, dtype=torch.bool)
  2076. >>> a = torch.arange(0, 3)
  2077. >>> a
  2078. tensor([0, 1, 2])
  2079. >>> torch.all(a)
  2080. tensor(False)
  2081. .. function:: all(input, dim, keepdim=False, *, out=None) -> Tensor
  2082. :noindex:
  2083. For each row of :attr:`input` in the given dimension :attr:`dim`,
  2084. returns `True` if all elements in the row evaluate to `True` and `False` otherwise.
  2085. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  2086. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  2087. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  2088. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  2089. Args:
  2090. input (Tensor): the input tensor.
  2091. dim (int or tuple of ints): the dimension or dimensions to reduce.
  2092. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  2093. Keyword args:
  2094. out (Tensor, optional): the output tensor.
  2095. Example::
  2096. >>> a = torch.rand(4, 2).bool()
  2097. >>> a
  2098. tensor([[True, True],
  2099. [True, False],
  2100. [True, True],
  2101. [True, True]], dtype=torch.bool)
  2102. >>> torch.all(a, dim=1)
  2103. tensor([ True, False, True, True], dtype=torch.bool)
  2104. >>> torch.all(a, dim=0)
  2105. tensor([ True, False], dtype=torch.bool)
  2106. """
  2107. ...
  2108. @overload
  2109. def all(input: Tensor, dim: Optional[_size] = None, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
  2110. r"""
  2111. all(input) -> Tensor
  2112. Tests if all elements in :attr:`input` evaluate to `True`.
  2113. .. note:: This function matches the behaviour of NumPy in returning
  2114. output of dtype `bool` for all supported dtypes except `uint8`.
  2115. For `uint8` the dtype of output is `uint8` itself.
  2116. Example::
  2117. >>> a = torch.rand(1, 2).bool()
  2118. >>> a
  2119. tensor([[False, True]], dtype=torch.bool)
  2120. >>> torch.all(a)
  2121. tensor(False, dtype=torch.bool)
  2122. >>> a = torch.arange(0, 3)
  2123. >>> a
  2124. tensor([0, 1, 2])
  2125. >>> torch.all(a)
  2126. tensor(False)
  2127. .. function:: all(input, dim, keepdim=False, *, out=None) -> Tensor
  2128. :noindex:
  2129. For each row of :attr:`input` in the given dimension :attr:`dim`,
  2130. returns `True` if all elements in the row evaluate to `True` and `False` otherwise.
  2131. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  2132. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  2133. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  2134. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  2135. Args:
  2136. input (Tensor): the input tensor.
  2137. dim (int or tuple of ints): the dimension or dimensions to reduce.
  2138. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  2139. Keyword args:
  2140. out (Tensor, optional): the output tensor.
  2141. Example::
  2142. >>> a = torch.rand(4, 2).bool()
  2143. >>> a
  2144. tensor([[True, True],
  2145. [True, False],
  2146. [True, True],
  2147. [True, True]], dtype=torch.bool)
  2148. >>> torch.all(a, dim=1)
  2149. tensor([ True, False, True, True], dtype=torch.bool)
  2150. >>> torch.all(a, dim=0)
  2151. tensor([ True, False], dtype=torch.bool)
  2152. """
  2153. ...
  2154. @overload
  2155. def all(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
  2156. r"""
  2157. all(input) -> Tensor
  2158. Tests if all elements in :attr:`input` evaluate to `True`.
  2159. .. note:: This function matches the behaviour of NumPy in returning
  2160. output of dtype `bool` for all supported dtypes except `uint8`.
  2161. For `uint8` the dtype of output is `uint8` itself.
  2162. Example::
  2163. >>> a = torch.rand(1, 2).bool()
  2164. >>> a
  2165. tensor([[False, True]], dtype=torch.bool)
  2166. >>> torch.all(a)
  2167. tensor(False, dtype=torch.bool)
  2168. >>> a = torch.arange(0, 3)
  2169. >>> a
  2170. tensor([0, 1, 2])
  2171. >>> torch.all(a)
  2172. tensor(False)
  2173. .. function:: all(input, dim, keepdim=False, *, out=None) -> Tensor
  2174. :noindex:
  2175. For each row of :attr:`input` in the given dimension :attr:`dim`,
  2176. returns `True` if all elements in the row evaluate to `True` and `False` otherwise.
  2177. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  2178. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  2179. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  2180. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  2181. Args:
  2182. input (Tensor): the input tensor.
  2183. dim (int or tuple of ints): the dimension or dimensions to reduce.
  2184. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  2185. Keyword args:
  2186. out (Tensor, optional): the output tensor.
  2187. Example::
  2188. >>> a = torch.rand(4, 2).bool()
  2189. >>> a
  2190. tensor([[True, True],
  2191. [True, False],
  2192. [True, True],
  2193. [True, True]], dtype=torch.bool)
  2194. >>> torch.all(a, dim=1)
  2195. tensor([ True, False, True, True], dtype=torch.bool)
  2196. >>> torch.all(a, dim=0)
  2197. tensor([ True, False], dtype=torch.bool)
  2198. """
  2199. ...
  2200. @overload
  2201. def all(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
  2202. r"""
  2203. all(input) -> Tensor
  2204. Tests if all elements in :attr:`input` evaluate to `True`.
  2205. .. note:: This function matches the behaviour of NumPy in returning
  2206. output of dtype `bool` for all supported dtypes except `uint8`.
  2207. For `uint8` the dtype of output is `uint8` itself.
  2208. Example::
  2209. >>> a = torch.rand(1, 2).bool()
  2210. >>> a
  2211. tensor([[False, True]], dtype=torch.bool)
  2212. >>> torch.all(a)
  2213. tensor(False, dtype=torch.bool)
  2214. >>> a = torch.arange(0, 3)
  2215. >>> a
  2216. tensor([0, 1, 2])
  2217. >>> torch.all(a)
  2218. tensor(False)
  2219. .. function:: all(input, dim, keepdim=False, *, out=None) -> Tensor
  2220. :noindex:
  2221. For each row of :attr:`input` in the given dimension :attr:`dim`,
  2222. returns `True` if all elements in the row evaluate to `True` and `False` otherwise.
  2223. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  2224. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  2225. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  2226. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  2227. Args:
  2228. input (Tensor): the input tensor.
  2229. dim (int or tuple of ints): the dimension or dimensions to reduce.
  2230. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  2231. Keyword args:
  2232. out (Tensor, optional): the output tensor.
  2233. Example::
  2234. >>> a = torch.rand(4, 2).bool()
  2235. >>> a
  2236. tensor([[True, True],
  2237. [True, False],
  2238. [True, True],
  2239. [True, True]], dtype=torch.bool)
  2240. >>> torch.all(a, dim=1)
  2241. tensor([ True, False, True, True], dtype=torch.bool)
  2242. >>> torch.all(a, dim=0)
  2243. tensor([ True, False], dtype=torch.bool)
  2244. """
  2245. ...
  2246. def allclose(input: Tensor, other: Tensor, rtol: _float = 1e-05, atol: _float = 1e-08, equal_nan: _bool = False) -> _bool:
  2247. r"""
  2248. allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False) -> bool
  2249. This function checks if :attr:`input` and :attr:`other` satisfy the condition:
  2250. .. math::
  2251. \lvert \text{input} - \text{other} \rvert \leq \texttt{atol} + \texttt{rtol} \times \lvert \text{other} \rvert
  2252. elementwise, for all elements of :attr:`input` and :attr:`other`. The behaviour of this function is analogous to
  2253. `numpy.allclose <https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html>`_
  2254. Args:
  2255. input (Tensor): first tensor to compare
  2256. other (Tensor): second tensor to compare
  2257. atol (float, optional): absolute tolerance. Default: 1e-08
  2258. rtol (float, optional): relative tolerance. Default: 1e-05
  2259. equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False``
  2260. Example::
  2261. >>> torch.allclose(torch.tensor([10000., 1e-07]), torch.tensor([10000.1, 1e-08]))
  2262. False
  2263. >>> torch.allclose(torch.tensor([10000., 1e-08]), torch.tensor([10000.1, 1e-09]))
  2264. True
  2265. >>> torch.allclose(torch.tensor([1.0, float('nan')]), torch.tensor([1.0, float('nan')]))
  2266. False
  2267. >>> torch.allclose(torch.tensor([1.0, float('nan')]), torch.tensor([1.0, float('nan')]), equal_nan=True)
  2268. True
  2269. """
  2270. ...
  2271. def alpha_dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ...
  2272. def alpha_dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ...
  2273. def amax(input: Tensor, dim: Union[_int, _size] = (), keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
  2274. r"""
  2275. amax(input, dim, keepdim=False, *, out=None) -> Tensor
  2276. Returns the maximum value of each slice of the :attr:`input` tensor in the given
  2277. dimension(s) :attr:`dim`.
  2278. .. note::
  2279. The difference between ``max``/``min`` and ``amax``/``amin`` is:
  2280. - ``amax``/``amin`` supports reducing on multiple dimensions,
  2281. - ``amax``/``amin`` does not return indices,
  2282. - ``amax``/``amin`` evenly distributes gradient between equal values,
  2283. while ``max(dim)``/``min(dim)`` propagates gradient only to a single
  2284. index in the source tensor.
  2285. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  2286. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  2287. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  2288. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  2289. Args:
  2290. input (Tensor): the input tensor.
  2291. dim (int or tuple of ints): the dimension or dimensions to reduce.
  2292. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  2293. Keyword args:
  2294. out (Tensor, optional): the output tensor.
  2295. Example::
  2296. >>> a = torch.randn(4, 4)
  2297. >>> a
  2298. tensor([[ 0.8177, 1.4878, -0.2491, 0.9130],
  2299. [-0.7158, 1.1775, 2.0992, 0.4817],
  2300. [-0.0053, 0.0164, -1.3738, -0.0507],
  2301. [ 1.9700, 1.1106, -1.0318, -1.0816]])
  2302. >>> torch.amax(a, 1)
  2303. tensor([1.4878, 2.0992, 0.0164, 1.9700])
  2304. """
  2305. ...
  2306. def amin(input: Tensor, dim: Union[_int, _size] = (), keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
  2307. r"""
  2308. amin(input, dim, keepdim=False, *, out=None) -> Tensor
  2309. Returns the minimum value of each slice of the :attr:`input` tensor in the given
  2310. dimension(s) :attr:`dim`.
  2311. .. note::
  2312. The difference between ``max``/``min`` and ``amax``/``amin`` is:
  2313. - ``amax``/``amin`` supports reducing on multiple dimensions,
  2314. - ``amax``/``amin`` does not return indices,
  2315. - ``amax``/``amin`` evenly distributes gradient between equal values,
  2316. while ``max(dim)``/``min(dim)`` propagates gradient only to a single
  2317. index in the source tensor.
  2318. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  2319. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  2320. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  2321. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  2322. Args:
  2323. input (Tensor): the input tensor.
  2324. dim (int or tuple of ints): the dimension or dimensions to reduce.
  2325. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  2326. Keyword args:
  2327. out (Tensor, optional): the output tensor.
  2328. Example::
  2329. >>> a = torch.randn(4, 4)
  2330. >>> a
  2331. tensor([[ 0.6451, -0.4866, 0.2987, -1.3312],
  2332. [-0.5744, 1.2980, 1.8397, -0.2713],
  2333. [ 0.9128, 0.9214, -1.7268, -0.2995],
  2334. [ 0.9023, 0.4853, 0.9075, -1.6165]])
  2335. >>> torch.amin(a, 1)
  2336. tensor([-1.3312, -0.5744, -1.7268, -1.6165])
  2337. """
  2338. ...
  2339. def aminmax(input: Tensor, *, dim: Optional[_int] = None, keepdim: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.aminmax:
  2340. r"""
  2341. aminmax(input, *, dim=None, keepdim=False, out=None) -> (Tensor min, Tensor max)
  2342. Computes the minimum and maximum values of the :attr:`input` tensor.
  2343. Args:
  2344. input (Tensor):
  2345. The input tensor
  2346. Keyword Args:
  2347. dim (Optional[int]):
  2348. The dimension along which to compute the values. If `None`,
  2349. computes the values over the entire :attr:`input` tensor.
  2350. Default is `None`.
  2351. keepdim (bool):
  2352. If `True`, the reduced dimensions will be kept in the output
  2353. tensor as dimensions with size 1 for broadcasting, otherwise
  2354. they will be removed, as if calling (:func:`torch.squeeze`).
  2355. Default is `False`.
  2356. out (Optional[Tuple[Tensor, Tensor]]):
  2357. Optional tensors on which to write the result. Must have the same
  2358. shape and dtype as the expected output.
  2359. Default is `None`.
  2360. Returns:
  2361. A named tuple `(min, max)` containing the minimum and maximum values.
  2362. Raises:
  2363. RuntimeError
  2364. If any of the dimensions to compute the values over has size 0.
  2365. .. note::
  2366. NaN values are propagated to the output if at least one value is NaN.
  2367. .. seealso::
  2368. :func:`torch.amin` computes just the minimum value
  2369. :func:`torch.amax` computes just the maximum value
  2370. Example::
  2371. >>> torch.aminmax(torch.tensor([1, -3, 5]))
  2372. torch.return_types.aminmax(
  2373. min=tensor(-3),
  2374. max=tensor(5))
  2375. >>> # aminmax propagates NaNs
  2376. >>> torch.aminmax(torch.tensor([1, -3, 5, torch.nan]))
  2377. torch.return_types.aminmax(
  2378. min=tensor(nan),
  2379. max=tensor(nan))
  2380. >>> t = torch.arange(10).view(2, 5)
  2381. >>> t
  2382. tensor([[0, 1, 2, 3, 4],
  2383. [5, 6, 7, 8, 9]])
  2384. >>> t.aminmax(dim=0, keepdim=True)
  2385. torch.return_types.aminmax(
  2386. min=tensor([[0, 1, 2, 3, 4]]),
  2387. max=tensor([[5, 6, 7, 8, 9]]))
  2388. """
  2389. ...
  2390. def angle(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  2391. r"""
  2392. angle(input, *, out=None) -> Tensor
  2393. Computes the element-wise angle (in radians) of the given :attr:`input` tensor.
  2394. .. math::
  2395. \text{out}_{i} = angle(\text{input}_{i})
  2396. Args:
  2397. input (Tensor): the input tensor.
  2398. Keyword args:
  2399. out (Tensor, optional): the output tensor.
  2400. .. note:: Starting in PyTorch 1.8, angle returns pi for negative real numbers,
  2401. zero for non-negative real numbers, and propagates NaNs. Previously
  2402. the function would return zero for all real numbers and not propagate
  2403. floating-point NaNs.
  2404. Example::
  2405. >>> torch.angle(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))*180/3.14159
  2406. tensor([ 135., 135, -45])
  2407. """
  2408. ...
  2409. @overload
  2410. def any(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  2411. r"""
  2412. any(input) -> Tensor
  2413. Tests if any element in :attr:`input` evaluates to `True`.
  2414. .. note:: This function matches the behaviour of NumPy in returning
  2415. output of dtype `bool` for all supported dtypes except `uint8`.
  2416. For `uint8` the dtype of output is `uint8` itself.
  2417. Example::
  2418. >>> a = torch.rand(1, 2).bool()
  2419. >>> a
  2420. tensor([[False, True]], dtype=torch.bool)
  2421. >>> torch.any(a)
  2422. tensor(True, dtype=torch.bool)
  2423. >>> a = torch.arange(0, 3)
  2424. >>> a
  2425. tensor([0, 1, 2])
  2426. >>> torch.any(a)
  2427. tensor(True)
  2428. .. function:: any(input, dim, keepdim=False, *, out=None) -> Tensor
  2429. :noindex:
  2430. For each row of :attr:`input` in the given dimension :attr:`dim`,
  2431. returns `True` if any element in the row evaluate to `True` and `False` otherwise.
  2432. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  2433. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  2434. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  2435. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  2436. Args:
  2437. input (Tensor): the input tensor.
  2438. dim (int or tuple of ints): the dimension or dimensions to reduce.
  2439. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  2440. Keyword args:
  2441. out (Tensor, optional): the output tensor.
  2442. Example::
  2443. >>> a = torch.randn(4, 2) < 0
  2444. >>> a
  2445. tensor([[ True, True],
  2446. [False, True],
  2447. [ True, True],
  2448. [False, False]])
  2449. >>> torch.any(a, 1)
  2450. tensor([ True, True, True, False])
  2451. >>> torch.any(a, 0)
  2452. tensor([True, True])
  2453. """
  2454. ...
  2455. @overload
  2456. def any(input: Tensor, dim: Optional[_size] = None, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
  2457. r"""
  2458. any(input) -> Tensor
  2459. Tests if any element in :attr:`input` evaluates to `True`.
  2460. .. note:: This function matches the behaviour of NumPy in returning
  2461. output of dtype `bool` for all supported dtypes except `uint8`.
  2462. For `uint8` the dtype of output is `uint8` itself.
  2463. Example::
  2464. >>> a = torch.rand(1, 2).bool()
  2465. >>> a
  2466. tensor([[False, True]], dtype=torch.bool)
  2467. >>> torch.any(a)
  2468. tensor(True, dtype=torch.bool)
  2469. >>> a = torch.arange(0, 3)
  2470. >>> a
  2471. tensor([0, 1, 2])
  2472. >>> torch.any(a)
  2473. tensor(True)
  2474. .. function:: any(input, dim, keepdim=False, *, out=None) -> Tensor
  2475. :noindex:
  2476. For each row of :attr:`input` in the given dimension :attr:`dim`,
  2477. returns `True` if any element in the row evaluate to `True` and `False` otherwise.
  2478. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  2479. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  2480. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  2481. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  2482. Args:
  2483. input (Tensor): the input tensor.
  2484. dim (int or tuple of ints): the dimension or dimensions to reduce.
  2485. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  2486. Keyword args:
  2487. out (Tensor, optional): the output tensor.
  2488. Example::
  2489. >>> a = torch.randn(4, 2) < 0
  2490. >>> a
  2491. tensor([[ True, True],
  2492. [False, True],
  2493. [ True, True],
  2494. [False, False]])
  2495. >>> torch.any(a, 1)
  2496. tensor([ True, True, True, False])
  2497. >>> torch.any(a, 0)
  2498. tensor([True, True])
  2499. """
  2500. ...
  2501. @overload
  2502. def any(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
  2503. r"""
  2504. any(input) -> Tensor
  2505. Tests if any element in :attr:`input` evaluates to `True`.
  2506. .. note:: This function matches the behaviour of NumPy in returning
  2507. output of dtype `bool` for all supported dtypes except `uint8`.
  2508. For `uint8` the dtype of output is `uint8` itself.
  2509. Example::
  2510. >>> a = torch.rand(1, 2).bool()
  2511. >>> a
  2512. tensor([[False, True]], dtype=torch.bool)
  2513. >>> torch.any(a)
  2514. tensor(True, dtype=torch.bool)
  2515. >>> a = torch.arange(0, 3)
  2516. >>> a
  2517. tensor([0, 1, 2])
  2518. >>> torch.any(a)
  2519. tensor(True)
  2520. .. function:: any(input, dim, keepdim=False, *, out=None) -> Tensor
  2521. :noindex:
  2522. For each row of :attr:`input` in the given dimension :attr:`dim`,
  2523. returns `True` if any element in the row evaluate to `True` and `False` otherwise.
  2524. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  2525. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  2526. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  2527. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  2528. Args:
  2529. input (Tensor): the input tensor.
  2530. dim (int or tuple of ints): the dimension or dimensions to reduce.
  2531. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  2532. Keyword args:
  2533. out (Tensor, optional): the output tensor.
  2534. Example::
  2535. >>> a = torch.randn(4, 2) < 0
  2536. >>> a
  2537. tensor([[ True, True],
  2538. [False, True],
  2539. [ True, True],
  2540. [False, False]])
  2541. >>> torch.any(a, 1)
  2542. tensor([ True, True, True, False])
  2543. >>> torch.any(a, 0)
  2544. tensor([True, True])
  2545. """
  2546. ...
  2547. @overload
  2548. def any(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
  2549. r"""
  2550. any(input) -> Tensor
  2551. Tests if any element in :attr:`input` evaluates to `True`.
  2552. .. note:: This function matches the behaviour of NumPy in returning
  2553. output of dtype `bool` for all supported dtypes except `uint8`.
  2554. For `uint8` the dtype of output is `uint8` itself.
  2555. Example::
  2556. >>> a = torch.rand(1, 2).bool()
  2557. >>> a
  2558. tensor([[False, True]], dtype=torch.bool)
  2559. >>> torch.any(a)
  2560. tensor(True, dtype=torch.bool)
  2561. >>> a = torch.arange(0, 3)
  2562. >>> a
  2563. tensor([0, 1, 2])
  2564. >>> torch.any(a)
  2565. tensor(True)
  2566. .. function:: any(input, dim, keepdim=False, *, out=None) -> Tensor
  2567. :noindex:
  2568. For each row of :attr:`input` in the given dimension :attr:`dim`,
  2569. returns `True` if any element in the row evaluate to `True` and `False` otherwise.
  2570. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  2571. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  2572. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  2573. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  2574. Args:
  2575. input (Tensor): the input tensor.
  2576. dim (int or tuple of ints): the dimension or dimensions to reduce.
  2577. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  2578. Keyword args:
  2579. out (Tensor, optional): the output tensor.
  2580. Example::
  2581. >>> a = torch.randn(4, 2) < 0
  2582. >>> a
  2583. tensor([[ True, True],
  2584. [False, True],
  2585. [ True, True],
  2586. [False, False]])
  2587. >>> torch.any(a, 1)
  2588. tensor([ True, True, True, False])
  2589. >>> torch.any(a, 0)
  2590. tensor([True, True])
  2591. """
  2592. ...
  2593. @overload
  2594. def arange(start: Number, end: Number, step: Number, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
  2595. r"""
  2596. arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  2597. Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil`
  2598. with values from the interval ``[start, end)`` taken with common difference
  2599. :attr:`step` beginning from `start`.
  2600. Note that non-integer :attr:`step` is subject to floating point rounding errors when
  2601. comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end`
  2602. in such cases.
  2603. .. math::
  2604. \text{out}_{{i+1}} = \text{out}_{i} + \text{step}
  2605. Args:
  2606. start (Number): the starting value for the set of points. Default: ``0``.
  2607. end (Number): the ending value for the set of points
  2608. step (Number): the gap between each pair of adjacent points. Default: ``1``.
  2609. Keyword args:
  2610. out (Tensor, optional): the output tensor.
  2611. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  2612. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). If `dtype` is not given, infer the data type from the other input
  2613. arguments. If any of `start`, `end`, or `stop` are floating-point, the
  2614. `dtype` is inferred to be the default dtype, see
  2615. :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
  2616. be `torch.int64`.
  2617. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  2618. Default: ``torch.strided``.
  2619. device (:class:`torch.device`, optional): the desired device of returned tensor.
  2620. Default: if ``None``, uses the current device for the default tensor type
  2621. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  2622. for CPU tensor types and the current CUDA device for CUDA tensor types.
  2623. requires_grad (bool, optional): If autograd should record operations on the
  2624. returned tensor. Default: ``False``.
  2625. Example::
  2626. >>> torch.arange(5)
  2627. tensor([ 0, 1, 2, 3, 4])
  2628. >>> torch.arange(1, 4)
  2629. tensor([ 1, 2, 3])
  2630. >>> torch.arange(1, 2.5, 0.5)
  2631. tensor([ 1.0000, 1.5000, 2.0000])
  2632. """
  2633. ...
  2634. @overload
  2635. def arange(start: Number, end: Number, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
  2636. r"""
  2637. arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  2638. Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil`
  2639. with values from the interval ``[start, end)`` taken with common difference
  2640. :attr:`step` beginning from `start`.
  2641. Note that non-integer :attr:`step` is subject to floating point rounding errors when
  2642. comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end`
  2643. in such cases.
  2644. .. math::
  2645. \text{out}_{{i+1}} = \text{out}_{i} + \text{step}
  2646. Args:
  2647. start (Number): the starting value for the set of points. Default: ``0``.
  2648. end (Number): the ending value for the set of points
  2649. step (Number): the gap between each pair of adjacent points. Default: ``1``.
  2650. Keyword args:
  2651. out (Tensor, optional): the output tensor.
  2652. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  2653. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). If `dtype` is not given, infer the data type from the other input
  2654. arguments. If any of `start`, `end`, or `stop` are floating-point, the
  2655. `dtype` is inferred to be the default dtype, see
  2656. :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
  2657. be `torch.int64`.
  2658. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  2659. Default: ``torch.strided``.
  2660. device (:class:`torch.device`, optional): the desired device of returned tensor.
  2661. Default: if ``None``, uses the current device for the default tensor type
  2662. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  2663. for CPU tensor types and the current CUDA device for CUDA tensor types.
  2664. requires_grad (bool, optional): If autograd should record operations on the
  2665. returned tensor. Default: ``False``.
  2666. Example::
  2667. >>> torch.arange(5)
  2668. tensor([ 0, 1, 2, 3, 4])
  2669. >>> torch.arange(1, 4)
  2670. tensor([ 1, 2, 3])
  2671. >>> torch.arange(1, 2.5, 0.5)
  2672. tensor([ 1.0000, 1.5000, 2.0000])
  2673. """
  2674. ...
  2675. @overload
  2676. def arange(end: Number, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
  2677. r"""
  2678. arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  2679. Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil`
  2680. with values from the interval ``[start, end)`` taken with common difference
  2681. :attr:`step` beginning from `start`.
  2682. Note that non-integer :attr:`step` is subject to floating point rounding errors when
  2683. comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end`
  2684. in such cases.
  2685. .. math::
  2686. \text{out}_{{i+1}} = \text{out}_{i} + \text{step}
  2687. Args:
  2688. start (Number): the starting value for the set of points. Default: ``0``.
  2689. end (Number): the ending value for the set of points
  2690. step (Number): the gap between each pair of adjacent points. Default: ``1``.
  2691. Keyword args:
  2692. out (Tensor, optional): the output tensor.
  2693. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  2694. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). If `dtype` is not given, infer the data type from the other input
  2695. arguments. If any of `start`, `end`, or `stop` are floating-point, the
  2696. `dtype` is inferred to be the default dtype, see
  2697. :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
  2698. be `torch.int64`.
  2699. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  2700. Default: ``torch.strided``.
  2701. device (:class:`torch.device`, optional): the desired device of returned tensor.
  2702. Default: if ``None``, uses the current device for the default tensor type
  2703. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  2704. for CPU tensor types and the current CUDA device for CUDA tensor types.
  2705. requires_grad (bool, optional): If autograd should record operations on the
  2706. returned tensor. Default: ``False``.
  2707. Example::
  2708. >>> torch.arange(5)
  2709. tensor([ 0, 1, 2, 3, 4])
  2710. >>> torch.arange(1, 4)
  2711. tensor([ 1, 2, 3])
  2712. >>> torch.arange(1, 2.5, 0.5)
  2713. tensor([ 1.0000, 1.5000, 2.0000])
  2714. """
  2715. ...
  2716. @overload
  2717. def arange(end: Union[Number, _complex], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  2718. r"""
  2719. arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  2720. Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil`
  2721. with values from the interval ``[start, end)`` taken with common difference
  2722. :attr:`step` beginning from `start`.
  2723. Note that non-integer :attr:`step` is subject to floating point rounding errors when
  2724. comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end`
  2725. in such cases.
  2726. .. math::
  2727. \text{out}_{{i+1}} = \text{out}_{i} + \text{step}
  2728. Args:
  2729. start (Number): the starting value for the set of points. Default: ``0``.
  2730. end (Number): the ending value for the set of points
  2731. step (Number): the gap between each pair of adjacent points. Default: ``1``.
  2732. Keyword args:
  2733. out (Tensor, optional): the output tensor.
  2734. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  2735. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). If `dtype` is not given, infer the data type from the other input
  2736. arguments. If any of `start`, `end`, or `stop` are floating-point, the
  2737. `dtype` is inferred to be the default dtype, see
  2738. :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
  2739. be `torch.int64`.
  2740. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  2741. Default: ``torch.strided``.
  2742. device (:class:`torch.device`, optional): the desired device of returned tensor.
  2743. Default: if ``None``, uses the current device for the default tensor type
  2744. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  2745. for CPU tensor types and the current CUDA device for CUDA tensor types.
  2746. requires_grad (bool, optional): If autograd should record operations on the
  2747. returned tensor. Default: ``False``.
  2748. Example::
  2749. >>> torch.arange(5)
  2750. tensor([ 0, 1, 2, 3, 4])
  2751. >>> torch.arange(1, 4)
  2752. tensor([ 1, 2, 3])
  2753. >>> torch.arange(1, 2.5, 0.5)
  2754. tensor([ 1.0000, 1.5000, 2.0000])
  2755. """
  2756. ...
  2757. @overload
  2758. def arange(start: Union[Number, _complex], end: Union[Number, _complex], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  2759. r"""
  2760. arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  2761. Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil`
  2762. with values from the interval ``[start, end)`` taken with common difference
  2763. :attr:`step` beginning from `start`.
  2764. Note that non-integer :attr:`step` is subject to floating point rounding errors when
  2765. comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end`
  2766. in such cases.
  2767. .. math::
  2768. \text{out}_{{i+1}} = \text{out}_{i} + \text{step}
  2769. Args:
  2770. start (Number): the starting value for the set of points. Default: ``0``.
  2771. end (Number): the ending value for the set of points
  2772. step (Number): the gap between each pair of adjacent points. Default: ``1``.
  2773. Keyword args:
  2774. out (Tensor, optional): the output tensor.
  2775. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  2776. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). If `dtype` is not given, infer the data type from the other input
  2777. arguments. If any of `start`, `end`, or `stop` are floating-point, the
  2778. `dtype` is inferred to be the default dtype, see
  2779. :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
  2780. be `torch.int64`.
  2781. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  2782. Default: ``torch.strided``.
  2783. device (:class:`torch.device`, optional): the desired device of returned tensor.
  2784. Default: if ``None``, uses the current device for the default tensor type
  2785. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  2786. for CPU tensor types and the current CUDA device for CUDA tensor types.
  2787. requires_grad (bool, optional): If autograd should record operations on the
  2788. returned tensor. Default: ``False``.
  2789. Example::
  2790. >>> torch.arange(5)
  2791. tensor([ 0, 1, 2, 3, 4])
  2792. >>> torch.arange(1, 4)
  2793. tensor([ 1, 2, 3])
  2794. >>> torch.arange(1, 2.5, 0.5)
  2795. tensor([ 1.0000, 1.5000, 2.0000])
  2796. """
  2797. ...
  2798. @overload
  2799. def arange(start: Union[Number, _complex], end: Union[Number, _complex], step: Union[Number, _complex] = 1, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  2800. r"""
  2801. arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  2802. Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil`
  2803. with values from the interval ``[start, end)`` taken with common difference
  2804. :attr:`step` beginning from `start`.
  2805. Note that non-integer :attr:`step` is subject to floating point rounding errors when
  2806. comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end`
  2807. in such cases.
  2808. .. math::
  2809. \text{out}_{{i+1}} = \text{out}_{i} + \text{step}
  2810. Args:
  2811. start (Number): the starting value for the set of points. Default: ``0``.
  2812. end (Number): the ending value for the set of points
  2813. step (Number): the gap between each pair of adjacent points. Default: ``1``.
  2814. Keyword args:
  2815. out (Tensor, optional): the output tensor.
  2816. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  2817. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). If `dtype` is not given, infer the data type from the other input
  2818. arguments. If any of `start`, `end`, or `stop` are floating-point, the
  2819. `dtype` is inferred to be the default dtype, see
  2820. :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
  2821. be `torch.int64`.
  2822. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  2823. Default: ``torch.strided``.
  2824. device (:class:`torch.device`, optional): the desired device of returned tensor.
  2825. Default: if ``None``, uses the current device for the default tensor type
  2826. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  2827. for CPU tensor types and the current CUDA device for CUDA tensor types.
  2828. requires_grad (bool, optional): If autograd should record operations on the
  2829. returned tensor. Default: ``False``.
  2830. Example::
  2831. >>> torch.arange(5)
  2832. tensor([ 0, 1, 2, 3, 4])
  2833. >>> torch.arange(1, 4)
  2834. tensor([ 1, 2, 3])
  2835. >>> torch.arange(1, 2.5, 0.5)
  2836. tensor([ 1.0000, 1.5000, 2.0000])
  2837. """
  2838. ...
  2839. def arccos(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  2840. r"""
  2841. arccos(input, *, out=None) -> Tensor
  2842. Alias for :func:`torch.acos`.
  2843. """
  2844. ...
  2845. def arccos_(input: Tensor) -> Tensor: ...
  2846. def arccosh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  2847. r"""
  2848. arccosh(input, *, out=None) -> Tensor
  2849. Alias for :func:`torch.acosh`.
  2850. """
  2851. ...
  2852. def arccosh_(input: Tensor) -> Tensor: ...
  2853. def arcsin(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  2854. r"""
  2855. arcsin(input, *, out=None) -> Tensor
  2856. Alias for :func:`torch.asin`.
  2857. """
  2858. ...
  2859. def arcsin_(input: Tensor) -> Tensor: ...
  2860. def arcsinh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  2861. r"""
  2862. arcsinh(input, *, out=None) -> Tensor
  2863. Alias for :func:`torch.asinh`.
  2864. """
  2865. ...
  2866. def arcsinh_(input: Tensor) -> Tensor: ...
  2867. def arctan(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  2868. r"""
  2869. arctan(input, *, out=None) -> Tensor
  2870. Alias for :func:`torch.atan`.
  2871. """
  2872. ...
  2873. def arctan2(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  2874. r"""
  2875. arctan2(input, other, *, out=None) -> Tensor
  2876. Alias for :func:`torch.atan2`.
  2877. """
  2878. ...
  2879. def arctan_(input: Tensor) -> Tensor: ...
  2880. def arctanh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  2881. r"""
  2882. arctanh(input, *, out=None) -> Tensor
  2883. Alias for :func:`torch.atanh`.
  2884. """
  2885. ...
  2886. def arctanh_(input: Tensor) -> Tensor: ...
  2887. def argmax(input: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
  2888. r"""
  2889. argmax(input) -> LongTensor
  2890. Returns the indices of the maximum value of all elements in the :attr:`input` tensor.
  2891. This is the second value returned by :meth:`torch.max`. See its
  2892. documentation for the exact semantics of this method.
  2893. .. note:: If there are multiple maximal values then the indices of the first maximal value are returned.
  2894. Args:
  2895. input (Tensor): the input tensor.
  2896. Example::
  2897. >>> a = torch.randn(4, 4)
  2898. >>> a
  2899. tensor([[ 1.3398, 0.2663, -0.2686, 0.2450],
  2900. [-0.7401, -0.8805, -0.3402, -1.1936],
  2901. [ 0.4907, -1.3948, -1.0691, -0.3132],
  2902. [-1.6092, 0.5419, -0.2993, 0.3195]])
  2903. >>> torch.argmax(a)
  2904. tensor(0)
  2905. .. function:: argmax(input, dim, keepdim=False) -> LongTensor
  2906. :noindex:
  2907. Returns the indices of the maximum values of a tensor across a dimension.
  2908. This is the second value returned by :meth:`torch.max`. See its
  2909. documentation for the exact semantics of this method.
  2910. Args:
  2911. input (Tensor): the input tensor.
  2912. dim (int): the dimension to reduce. If ``None``, the argmax of the flattened input is returned.
  2913. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  2914. Example::
  2915. >>> a = torch.randn(4, 4)
  2916. >>> a
  2917. tensor([[ 1.3398, 0.2663, -0.2686, 0.2450],
  2918. [-0.7401, -0.8805, -0.3402, -1.1936],
  2919. [ 0.4907, -1.3948, -1.0691, -0.3132],
  2920. [-1.6092, 0.5419, -0.2993, 0.3195]])
  2921. >>> torch.argmax(a, dim=1)
  2922. tensor([ 0, 2, 0, 1])
  2923. """
  2924. ...
  2925. def argmin(input: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
  2926. r"""
  2927. argmin(input, dim=None, keepdim=False) -> LongTensor
  2928. Returns the indices of the minimum value(s) of the flattened tensor or along a dimension
  2929. This is the second value returned by :meth:`torch.min`. See its
  2930. documentation for the exact semantics of this method.
  2931. .. note:: If there are multiple minimal values then the indices of the first minimal value are returned.
  2932. Args:
  2933. input (Tensor): the input tensor.
  2934. dim (int): the dimension to reduce. If ``None``, the argmin of the flattened input is returned.
  2935. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  2936. Example::
  2937. >>> a = torch.randn(4, 4)
  2938. >>> a
  2939. tensor([[ 0.1139, 0.2254, -0.1381, 0.3687],
  2940. [ 1.0100, -1.1975, -0.0102, -0.4732],
  2941. [-0.9240, 0.1207, -0.7506, -1.0213],
  2942. [ 1.7809, -1.2960, 0.9384, 0.1438]])
  2943. >>> torch.argmin(a)
  2944. tensor(13)
  2945. >>> torch.argmin(a, dim=1)
  2946. tensor([ 2, 1, 3, 1])
  2947. >>> torch.argmin(a, dim=1, keepdim=True)
  2948. tensor([[2],
  2949. [1],
  2950. [3],
  2951. [1]])
  2952. """
  2953. ...
  2954. @overload
  2955. def argsort(input: Tensor, *, stable: _bool, dim: _int = -1, descending: _bool = False) -> Tensor:
  2956. r"""
  2957. argsort(input, dim=-1, descending=False, stable=False) -> Tensor
  2958. Returns the indices that sort a tensor along a given dimension in ascending
  2959. order by value.
  2960. This is the second value returned by :meth:`torch.sort`. See its documentation
  2961. for the exact semantics of this method.
  2962. If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving
  2963. the order of equivalent elements. If ``False``, the relative order of values
  2964. which compare equal is not guaranteed. ``True`` is slower.
  2965. Args:
  2966. input (Tensor): the input tensor.
  2967. dim (int, optional): the dimension to sort along
  2968. descending (bool, optional): controls the sorting order (ascending or descending)
  2969. stable (bool, optional): controls the relative order of equivalent elements
  2970. Example::
  2971. >>> a = torch.randn(4, 4)
  2972. >>> a
  2973. tensor([[ 0.0785, 1.5267, -0.8521, 0.4065],
  2974. [ 0.1598, 0.0788, -0.0745, -1.2700],
  2975. [ 1.2208, 1.0722, -0.7064, 1.2564],
  2976. [ 0.0669, -0.2318, -0.8229, -0.9280]])
  2977. >>> torch.argsort(a, dim=1)
  2978. tensor([[2, 0, 3, 1],
  2979. [3, 2, 1, 0],
  2980. [2, 1, 0, 3],
  2981. [3, 2, 1, 0]])
  2982. """
  2983. ...
  2984. @overload
  2985. def argsort(input: Tensor, dim: _int = -1, descending: _bool = False) -> Tensor:
  2986. r"""
  2987. argsort(input, dim=-1, descending=False, stable=False) -> Tensor
  2988. Returns the indices that sort a tensor along a given dimension in ascending
  2989. order by value.
  2990. This is the second value returned by :meth:`torch.sort`. See its documentation
  2991. for the exact semantics of this method.
  2992. If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving
  2993. the order of equivalent elements. If ``False``, the relative order of values
  2994. which compare equal is not guaranteed. ``True`` is slower.
  2995. Args:
  2996. input (Tensor): the input tensor.
  2997. dim (int, optional): the dimension to sort along
  2998. descending (bool, optional): controls the sorting order (ascending or descending)
  2999. stable (bool, optional): controls the relative order of equivalent elements
  3000. Example::
  3001. >>> a = torch.randn(4, 4)
  3002. >>> a
  3003. tensor([[ 0.0785, 1.5267, -0.8521, 0.4065],
  3004. [ 0.1598, 0.0788, -0.0745, -1.2700],
  3005. [ 1.2208, 1.0722, -0.7064, 1.2564],
  3006. [ 0.0669, -0.2318, -0.8229, -0.9280]])
  3007. >>> torch.argsort(a, dim=1)
  3008. tensor([[2, 0, 3, 1],
  3009. [3, 2, 1, 0],
  3010. [2, 1, 0, 3],
  3011. [3, 2, 1, 0]])
  3012. """
  3013. ...
  3014. @overload
  3015. def argsort(input: Tensor, dim: Union[str, ellipsis, None], descending: _bool = False) -> Tensor:
  3016. r"""
  3017. argsort(input, dim=-1, descending=False, stable=False) -> Tensor
  3018. Returns the indices that sort a tensor along a given dimension in ascending
  3019. order by value.
  3020. This is the second value returned by :meth:`torch.sort`. See its documentation
  3021. for the exact semantics of this method.
  3022. If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving
  3023. the order of equivalent elements. If ``False``, the relative order of values
  3024. which compare equal is not guaranteed. ``True`` is slower.
  3025. Args:
  3026. input (Tensor): the input tensor.
  3027. dim (int, optional): the dimension to sort along
  3028. descending (bool, optional): controls the sorting order (ascending or descending)
  3029. stable (bool, optional): controls the relative order of equivalent elements
  3030. Example::
  3031. >>> a = torch.randn(4, 4)
  3032. >>> a
  3033. tensor([[ 0.0785, 1.5267, -0.8521, 0.4065],
  3034. [ 0.1598, 0.0788, -0.0745, -1.2700],
  3035. [ 1.2208, 1.0722, -0.7064, 1.2564],
  3036. [ 0.0669, -0.2318, -0.8229, -0.9280]])
  3037. >>> torch.argsort(a, dim=1)
  3038. tensor([[2, 0, 3, 1],
  3039. [3, 2, 1, 0],
  3040. [2, 1, 0, 3],
  3041. [3, 2, 1, 0]])
  3042. """
  3043. ...
  3044. def argwhere(input: Tensor) -> Tensor:
  3045. r"""
  3046. argwhere(input) -> Tensor
  3047. Returns a tensor containing the indices of all non-zero elements of
  3048. :attr:`input`. Each row in the result contains the indices of a non-zero
  3049. element in :attr:`input`. The result is sorted lexicographically, with
  3050. the last index changing the fastest (C-style).
  3051. If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor
  3052. :attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of
  3053. non-zero elements in the :attr:`input` tensor.
  3054. .. note::
  3055. This function is similar to NumPy's `argwhere`.
  3056. When :attr:`input` is on CUDA, this function causes host-device synchronization.
  3057. Args:
  3058. {input}
  3059. Example::
  3060. >>> t = torch.tensor([1, 0, 1])
  3061. >>> torch.argwhere(t)
  3062. tensor([[0],
  3063. [2]])
  3064. >>> t = torch.tensor([[1, 0, 1], [0, 1, 1]])
  3065. >>> torch.argwhere(t)
  3066. tensor([[0, 0],
  3067. [0, 2],
  3068. [1, 1],
  3069. [1, 2]])
  3070. """
  3071. ...
  3072. def as_strided(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None) -> Tensor:
  3073. r"""
  3074. as_strided(input, size, stride, storage_offset=None) -> Tensor
  3075. Create a view of an existing `torch.Tensor` :attr:`input` with specified
  3076. :attr:`size`, :attr:`stride` and :attr:`storage_offset`.
  3077. .. warning::
  3078. Prefer using other view functions, like :meth:`torch.Tensor.expand`,
  3079. to setting a view's strides manually with `as_strided`, as this
  3080. function's behavior depends on the implementation of a tensor's storage.
  3081. The constructed view of the storage must only refer to elements within
  3082. the storage or a runtime error will be thrown, and if the view is
  3083. "overlapped" (with multiple indices referring to the same element in
  3084. memory) its behavior is undefined.
  3085. Args:
  3086. input (Tensor): the input tensor.
  3087. size (tuple or ints): the shape of the output tensor
  3088. stride (tuple or ints): the stride of the output tensor
  3089. storage_offset (int, optional): the offset in the underlying storage of the output tensor.
  3090. If ``None``, the storage_offset of the output tensor will match the input tensor.
  3091. Example::
  3092. >>> x = torch.randn(3, 3)
  3093. >>> x
  3094. tensor([[ 0.9039, 0.6291, 1.0795],
  3095. [ 0.1586, 2.1939, -0.4900],
  3096. [-0.1909, -0.7503, 1.9355]])
  3097. >>> t = torch.as_strided(x, (2, 2), (1, 2))
  3098. >>> t
  3099. tensor([[0.9039, 1.0795],
  3100. [0.6291, 0.1586]])
  3101. >>> t = torch.as_strided(x, (2, 2), (1, 2), 1)
  3102. tensor([[0.6291, 0.1586],
  3103. [1.0795, 2.1939]])
  3104. """
  3105. ...
  3106. def as_strided_(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None) -> Tensor: ...
  3107. def as_strided_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None, *, out: Optional[Tensor] = None) -> Tensor:
  3108. r"""
  3109. Performs the same operation as :func:`torch.as_strided`, but all output tensors
  3110. are freshly created instead of aliasing the input.
  3111. """
  3112. ...
  3113. def as_strided_scatter(input: Tensor, src: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None) -> Tensor:
  3114. r"""
  3115. as_strided_scatter(input, src, size, stride, storage_offset=None) -> Tensor
  3116. Embeds the values of the :attr:`src` tensor into :attr:`input` along
  3117. the elements corresponding to the result of calling
  3118. input.as_strided(size, stride, storage_offset).
  3119. This function returns a tensor with fresh storage; it does not
  3120. return a view.
  3121. Args:
  3122. input (Tensor): the input tensor.
  3123. size (tuple or ints): the shape of the output tensor
  3124. stride (tuple or ints): the stride of the output tensor
  3125. storage_offset (int, optional): the offset in the underlying storage of the output tensor
  3126. .. note::
  3127. :attr:`src` must be of the proper size in order to be embedded
  3128. into :attr:`input`. Specifically, it should have the same shape as
  3129. `torch.as_strided(input, size, stride, storage_offset)`
  3130. Example::
  3131. >>> a = torch.arange(4).reshape(2, 2) + 1
  3132. >>> a
  3133. tensor([[1, 2],
  3134. [3, 4]])
  3135. >>> b = torch.zeros(3, 3)
  3136. >>> b
  3137. tensor([[0., 0., 0.],
  3138. [0., 0., 0.],
  3139. [0., 0., 0.]])
  3140. >>> torch.as_strided_scatter(b, a, (2, 2), (1, 2))
  3141. tensor([[1., 3., 2.],
  3142. [4., 0., 0.],
  3143. [0., 0., 0.]])
  3144. """
  3145. ...
  3146. def as_tensor(data: Any, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None) -> Tensor:
  3147. r"""
  3148. as_tensor(data, dtype=None, device=None) -> Tensor
  3149. Converts :attr:`data` into a tensor, sharing data and preserving autograd
  3150. history if possible.
  3151. If :attr:`data` is already a tensor with the requested dtype and device
  3152. then :attr:`data` itself is returned, but if :attr:`data` is a
  3153. tensor with a different dtype or device then it's copied as if using
  3154. `data.to(dtype=dtype, device=device)`.
  3155. If :attr:`data` is a NumPy array (an ndarray) with the same dtype and device then a
  3156. tensor is constructed using :func:`torch.from_numpy`.
  3157. .. seealso::
  3158. :func:`torch.tensor` never shares its data and creates a new "leaf tensor" (see :doc:`/notes/autograd`).
  3159. Args:
  3160. data (array_like): Initial data for the tensor. Can be a list, tuple,
  3161. NumPy ``ndarray``, scalar, and other types.
  3162. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  3163. Default: if ``None``, infers data type from :attr:`data`.
  3164. device (:class:`torch.device`, optional): the device of the constructed tensor. If None and data is a tensor
  3165. then the device of data is used. If None and data is not a tensor then
  3166. the result tensor is constructed on the current device.
  3167. Example::
  3168. >>> a = numpy.array([1, 2, 3])
  3169. >>> t = torch.as_tensor(a)
  3170. >>> t
  3171. tensor([ 1, 2, 3])
  3172. >>> t[0] = -1
  3173. >>> a
  3174. array([-1, 2, 3])
  3175. >>> a = numpy.array([1, 2, 3])
  3176. >>> t = torch.as_tensor(a, device=torch.device('cuda'))
  3177. >>> t
  3178. tensor([ 1, 2, 3])
  3179. >>> t[0] = -1
  3180. >>> a
  3181. array([1, 2, 3])
  3182. """
  3183. ...
  3184. def asarray(obj: Any, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, copy: Optional[_bool] = None, requires_grad: _bool = False) -> Tensor:
  3185. r"""
  3186. asarray(obj, *, dtype=None, device=None, copy=None, requires_grad=False) -> Tensor
  3187. Converts :attr:`obj` to a tensor.
  3188. :attr:`obj` can be one of:
  3189. 1. a tensor
  3190. 2. a NumPy array or a NumPy scalar
  3191. 3. a DLPack capsule
  3192. 4. an object that implements Python's buffer protocol
  3193. 5. a scalar
  3194. 6. a sequence of scalars
  3195. When :attr:`obj` is a tensor, NumPy array, or DLPack capsule the returned tensor will,
  3196. by default, not require a gradient, have the same datatype as :attr:`obj`, be on the
  3197. same device, and share memory with it. These properties can be controlled with the
  3198. :attr:`dtype`, :attr:`device`, :attr:`copy`, and :attr:`requires_grad` keyword arguments.
  3199. If the returned tensor is of a different datatype, on a different device, or a copy is
  3200. requested then it will not share its memory with :attr:`obj`. If :attr:`requires_grad`
  3201. is ``True`` then the returned tensor will require a gradient, and if :attr:`obj` is
  3202. also a tensor with an autograd history then the returned tensor will have the same history.
  3203. When :attr:`obj` is not a tensor, NumPy array, or DLPack capsule but implements Python's
  3204. buffer protocol then the buffer is interpreted as an array of bytes grouped according to
  3205. the size of the datatype passed to the :attr:`dtype` keyword argument. (If no datatype is
  3206. passed then the default floating point datatype is used, instead.) The returned tensor
  3207. will have the specified datatype (or default floating point datatype if none is specified)
  3208. and, by default, be on the CPU device and share memory with the buffer.
  3209. When :attr:`obj` is a NumPy scalar, the returned tensor will be a 0-dimensional tensor on
  3210. the CPU and that doesn't share its memory (i.e. ``copy=True``). By default datatype will
  3211. be the PyTorch datatype corresponding to the NumPy's scalar's datatype.
  3212. When :attr:`obj` is none of the above but a scalar, or a sequence of scalars then the
  3213. returned tensor will, by default, infer its datatype from the scalar values, be on the
  3214. current default device, and not share its memory.
  3215. .. seealso::
  3216. :func:`torch.tensor` creates a tensor that always copies the data from the input object.
  3217. :func:`torch.from_numpy` creates a tensor that always shares memory from NumPy arrays.
  3218. :func:`torch.frombuffer` creates a tensor that always shares memory from objects that
  3219. implement the buffer protocol.
  3220. :func:`torch.from_dlpack` creates a tensor that always shares memory from
  3221. DLPack capsules.
  3222. Args:
  3223. obj (object): a tensor, NumPy array, DLPack Capsule, object that implements Python's
  3224. buffer protocol, scalar, or sequence of scalars.
  3225. Keyword args:
  3226. dtype (:class:`torch.dtype`, optional): the datatype of the returned tensor.
  3227. Default: ``None``, which causes the datatype of the returned tensor to be
  3228. inferred from :attr:`obj`.
  3229. copy (bool, optional): controls whether the returned tensor shares memory with :attr:`obj`.
  3230. Default: ``None``, which causes the returned tensor to share memory with :attr:`obj`
  3231. whenever possible. If ``True`` then the returned tensor does not share its memory.
  3232. If ``False`` then the returned tensor shares its memory with :attr:`obj` and an
  3233. error is thrown if it cannot.
  3234. device (:class:`torch.device`, optional): the device of the returned tensor.
  3235. Default: ``None``, which causes the device of :attr:`obj` to be used. Or, if
  3236. :attr:`obj` is a Python sequence, the current default device will be used.
  3237. requires_grad (bool, optional): whether the returned tensor requires grad.
  3238. Default: ``False``, which causes the returned tensor not to require a gradient.
  3239. If ``True``, then the returned tensor will require a gradient, and if :attr:`obj`
  3240. is also a tensor with an autograd history then the returned tensor will have
  3241. the same history.
  3242. Example::
  3243. >>> a = torch.tensor([1, 2, 3])
  3244. >>> # Shares memory with tensor 'a'
  3245. >>> b = torch.asarray(a)
  3246. >>> a.data_ptr() == b.data_ptr()
  3247. True
  3248. >>> # Forces memory copy
  3249. >>> c = torch.asarray(a, copy=True)
  3250. >>> a.data_ptr() == c.data_ptr()
  3251. False
  3252. >>> a = torch.tensor([1., 2., 3.], requires_grad=True)
  3253. >>> b = a + 2
  3254. >>> b
  3255. tensor([3., 4., 5.], grad_fn=<AddBackward0>)
  3256. >>> # Shares memory with tensor 'b', with no grad
  3257. >>> c = torch.asarray(b)
  3258. >>> c
  3259. tensor([3., 4., 5.])
  3260. >>> # Shares memory with tensor 'b', retaining autograd history
  3261. >>> d = torch.asarray(b, requires_grad=True)
  3262. >>> d
  3263. tensor([3., 4., 5.], grad_fn=<AddBackward0>)
  3264. >>> array = numpy.array([1, 2, 3])
  3265. >>> # Shares memory with array 'array'
  3266. >>> t1 = torch.asarray(array)
  3267. >>> array.__array_interface__['data'][0] == t1.data_ptr()
  3268. True
  3269. >>> # Copies memory due to dtype mismatch
  3270. >>> t2 = torch.asarray(array, dtype=torch.float32)
  3271. >>> array.__array_interface__['data'][0] == t2.data_ptr()
  3272. False
  3273. >>> scalar = numpy.float64(0.5)
  3274. >>> torch.asarray(scalar)
  3275. tensor(0.5000, dtype=torch.float64)
  3276. """
  3277. ...
  3278. def asin(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  3279. r"""
  3280. asin(input, *, out=None) -> Tensor
  3281. Returns a new tensor with the arcsine of the elements of :attr:`input`.
  3282. .. math::
  3283. \text{out}_{i} = \sin^{-1}(\text{input}_{i})
  3284. Args:
  3285. input (Tensor): the input tensor.
  3286. Keyword args:
  3287. out (Tensor, optional): the output tensor.
  3288. Example::
  3289. >>> a = torch.randn(4)
  3290. >>> a
  3291. tensor([-0.5962, 1.4985, -0.4396, 1.4525])
  3292. >>> torch.asin(a)
  3293. tensor([-0.6387, nan, -0.4552, nan])
  3294. """
  3295. ...
  3296. def asin_(input: Tensor) -> Tensor: ...
  3297. def asinh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  3298. r"""
  3299. asinh(input, *, out=None) -> Tensor
  3300. Returns a new tensor with the inverse hyperbolic sine of the elements of :attr:`input`.
  3301. .. math::
  3302. \text{out}_{i} = \sinh^{-1}(\text{input}_{i})
  3303. Args:
  3304. input (Tensor): the input tensor.
  3305. Keyword arguments:
  3306. out (Tensor, optional): the output tensor.
  3307. Example::
  3308. >>> a = torch.randn(4)
  3309. >>> a
  3310. tensor([ 0.1606, -1.4267, -1.0899, -1.0250 ])
  3311. >>> torch.asinh(a)
  3312. tensor([ 0.1599, -1.1534, -0.9435, -0.8990 ])
  3313. """
  3314. ...
  3315. def asinh_(input: Tensor) -> Tensor: ...
  3316. def atan(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  3317. r"""
  3318. atan(input, *, out=None) -> Tensor
  3319. Returns a new tensor with the arctangent of the elements of :attr:`input`.
  3320. .. math::
  3321. \text{out}_{i} = \tan^{-1}(\text{input}_{i})
  3322. Args:
  3323. input (Tensor): the input tensor.
  3324. Keyword args:
  3325. out (Tensor, optional): the output tensor.
  3326. Example::
  3327. >>> a = torch.randn(4)
  3328. >>> a
  3329. tensor([ 0.2341, 0.2539, -0.6256, -0.6448])
  3330. >>> torch.atan(a)
  3331. tensor([ 0.2299, 0.2487, -0.5591, -0.5727])
  3332. """
  3333. ...
  3334. def atan2(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  3335. r"""
  3336. atan2(input, other, *, out=None) -> Tensor
  3337. Element-wise arctangent of :math:`\text{input}_{i} / \text{other}_{i}`
  3338. with consideration of the quadrant. Returns a new tensor with the signed angles
  3339. in radians between vector :math:`(\text{other}_{i}, \text{input}_{i})`
  3340. and vector :math:`(1, 0)`. (Note that :math:`\text{other}_{i}`, the second
  3341. parameter, is the x-coordinate, while :math:`\text{input}_{i}`, the first
  3342. parameter, is the y-coordinate.)
  3343. The shapes of ``input`` and ``other`` must be
  3344. :ref:`broadcastable <broadcasting-semantics>`.
  3345. Args:
  3346. input (Tensor): the first input tensor
  3347. other (Tensor): the second input tensor
  3348. Keyword args:
  3349. out (Tensor, optional): the output tensor.
  3350. Example::
  3351. >>> a = torch.randn(4)
  3352. >>> a
  3353. tensor([ 0.9041, 0.0196, -0.3108, -2.4423])
  3354. >>> torch.atan2(a, torch.randn(4))
  3355. tensor([ 0.9833, 0.0811, -1.9743, -1.4151])
  3356. """
  3357. ...
  3358. def atan_(input: Tensor) -> Tensor: ...
  3359. def atanh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  3360. r"""
  3361. atanh(input, *, out=None) -> Tensor
  3362. Returns a new tensor with the inverse hyperbolic tangent of the elements of :attr:`input`.
  3363. Note:
  3364. The domain of the inverse hyperbolic tangent is `(-1, 1)` and values outside this range
  3365. will be mapped to ``NaN``, except for the values `1` and `-1` for which the output is
  3366. mapped to `+/-INF` respectively.
  3367. .. math::
  3368. \text{out}_{i} = \tanh^{-1}(\text{input}_{i})
  3369. Args:
  3370. input (Tensor): the input tensor.
  3371. Keyword arguments:
  3372. out (Tensor, optional): the output tensor.
  3373. Example::
  3374. >>> a = torch.randn(4).uniform_(-1, 1)
  3375. >>> a
  3376. tensor([ -0.9385, 0.2968, -0.8591, -0.1871 ])
  3377. >>> torch.atanh(a)
  3378. tensor([ -1.7253, 0.3060, -1.2899, -0.1893 ])
  3379. """
  3380. ...
  3381. def atanh_(input: Tensor) -> Tensor: ...
  3382. def avg_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, ceil_mode: _bool = False, count_include_pad: _bool = True) -> Tensor: ...
  3383. @overload
  3384. def baddbmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], batch1: Tensor, batch2: Tensor) -> Tensor:
  3385. r"""
  3386. baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
  3387. Performs a batch matrix-matrix product of matrices in :attr:`batch1`
  3388. and :attr:`batch2`.
  3389. :attr:`input` is added to the final result.
  3390. :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same
  3391. number of matrices.
  3392. If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
  3393. :math:`(b \times m \times p)` tensor, then :attr:`input` must be
  3394. :ref:`broadcastable <broadcasting-semantics>` with a
  3395. :math:`(b \times n \times p)` tensor and :attr:`out` will be a
  3396. :math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the
  3397. same as the scaling factors used in :meth:`torch.addbmm`.
  3398. .. math::
  3399. \text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i)
  3400. If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
  3401. it will not be propagated.
  3402. For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
  3403. :attr:`alpha` must be real numbers, otherwise they should be integers.
  3404. This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
  3405. On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
  3406. Args:
  3407. input (Tensor): the tensor to be added
  3408. batch1 (Tensor): the first batch of matrices to be multiplied
  3409. batch2 (Tensor): the second batch of matrices to be multiplied
  3410. Keyword args:
  3411. beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
  3412. alpha (Number, optional): multiplier for :math:`\text{batch1} \mathbin{@} \text{batch2}` (:math:`\alpha`)
  3413. out (Tensor, optional): the output tensor.
  3414. Example::
  3415. >>> M = torch.randn(10, 3, 5)
  3416. >>> batch1 = torch.randn(10, 3, 4)
  3417. >>> batch2 = torch.randn(10, 4, 5)
  3418. >>> torch.baddbmm(M, batch1, batch2).size()
  3419. torch.Size([10, 3, 5])
  3420. """
  3421. ...
  3422. @overload
  3423. def baddbmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor:
  3424. r"""
  3425. baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
  3426. Performs a batch matrix-matrix product of matrices in :attr:`batch1`
  3427. and :attr:`batch2`.
  3428. :attr:`input` is added to the final result.
  3429. :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same
  3430. number of matrices.
  3431. If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
  3432. :math:`(b \times m \times p)` tensor, then :attr:`input` must be
  3433. :ref:`broadcastable <broadcasting-semantics>` with a
  3434. :math:`(b \times n \times p)` tensor and :attr:`out` will be a
  3435. :math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the
  3436. same as the scaling factors used in :meth:`torch.addbmm`.
  3437. .. math::
  3438. \text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i)
  3439. If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
  3440. it will not be propagated.
  3441. For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
  3442. :attr:`alpha` must be real numbers, otherwise they should be integers.
  3443. This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
  3444. On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
  3445. Args:
  3446. input (Tensor): the tensor to be added
  3447. batch1 (Tensor): the first batch of matrices to be multiplied
  3448. batch2 (Tensor): the second batch of matrices to be multiplied
  3449. Keyword args:
  3450. beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
  3451. alpha (Number, optional): multiplier for :math:`\text{batch1} \mathbin{@} \text{batch2}` (:math:`\alpha`)
  3452. out (Tensor, optional): the output tensor.
  3453. Example::
  3454. >>> M = torch.randn(10, 3, 5)
  3455. >>> batch1 = torch.randn(10, 3, 4)
  3456. >>> batch2 = torch.randn(10, 4, 5)
  3457. >>> torch.baddbmm(M, batch1, batch2).size()
  3458. torch.Size([10, 3, 5])
  3459. """
  3460. ...
  3461. @overload
  3462. def baddbmm(input: Tensor, batch1: Tensor, batch2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor:
  3463. r"""
  3464. baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
  3465. Performs a batch matrix-matrix product of matrices in :attr:`batch1`
  3466. and :attr:`batch2`.
  3467. :attr:`input` is added to the final result.
  3468. :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same
  3469. number of matrices.
  3470. If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
  3471. :math:`(b \times m \times p)` tensor, then :attr:`input` must be
  3472. :ref:`broadcastable <broadcasting-semantics>` with a
  3473. :math:`(b \times n \times p)` tensor and :attr:`out` will be a
  3474. :math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the
  3475. same as the scaling factors used in :meth:`torch.addbmm`.
  3476. .. math::
  3477. \text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i)
  3478. If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
  3479. it will not be propagated.
  3480. For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
  3481. :attr:`alpha` must be real numbers, otherwise they should be integers.
  3482. This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
  3483. On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
  3484. Args:
  3485. input (Tensor): the tensor to be added
  3486. batch1 (Tensor): the first batch of matrices to be multiplied
  3487. batch2 (Tensor): the second batch of matrices to be multiplied
  3488. Keyword args:
  3489. beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
  3490. alpha (Number, optional): multiplier for :math:`\text{batch1} \mathbin{@} \text{batch2}` (:math:`\alpha`)
  3491. out (Tensor, optional): the output tensor.
  3492. Example::
  3493. >>> M = torch.randn(10, 3, 5)
  3494. >>> batch1 = torch.randn(10, 3, 4)
  3495. >>> batch2 = torch.randn(10, 4, 5)
  3496. >>> torch.baddbmm(M, batch1, batch2).size()
  3497. torch.Size([10, 3, 5])
  3498. """
  3499. ...
  3500. @overload
  3501. def baddbmm(beta: Union[Number, _complex], self: Tensor, batch1: Tensor, batch2: Tensor) -> Tensor:
  3502. r"""
  3503. baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
  3504. Performs a batch matrix-matrix product of matrices in :attr:`batch1`
  3505. and :attr:`batch2`.
  3506. :attr:`input` is added to the final result.
  3507. :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same
  3508. number of matrices.
  3509. If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
  3510. :math:`(b \times m \times p)` tensor, then :attr:`input` must be
  3511. :ref:`broadcastable <broadcasting-semantics>` with a
  3512. :math:`(b \times n \times p)` tensor and :attr:`out` will be a
  3513. :math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the
  3514. same as the scaling factors used in :meth:`torch.addbmm`.
  3515. .. math::
  3516. \text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i)
  3517. If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
  3518. it will not be propagated.
  3519. For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
  3520. :attr:`alpha` must be real numbers, otherwise they should be integers.
  3521. This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
  3522. On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
  3523. Args:
  3524. input (Tensor): the tensor to be added
  3525. batch1 (Tensor): the first batch of matrices to be multiplied
  3526. batch2 (Tensor): the second batch of matrices to be multiplied
  3527. Keyword args:
  3528. beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
  3529. alpha (Number, optional): multiplier for :math:`\text{batch1} \mathbin{@} \text{batch2}` (:math:`\alpha`)
  3530. out (Tensor, optional): the output tensor.
  3531. Example::
  3532. >>> M = torch.randn(10, 3, 5)
  3533. >>> batch1 = torch.randn(10, 3, 4)
  3534. >>> batch2 = torch.randn(10, 4, 5)
  3535. >>> torch.baddbmm(M, batch1, batch2).size()
  3536. torch.Size([10, 3, 5])
  3537. """
  3538. ...
  3539. @overload
  3540. def baddbmm(beta: Union[Number, _complex], self: Tensor, batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor:
  3541. r"""
  3542. baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
  3543. Performs a batch matrix-matrix product of matrices in :attr:`batch1`
  3544. and :attr:`batch2`.
  3545. :attr:`input` is added to the final result.
  3546. :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same
  3547. number of matrices.
  3548. If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
  3549. :math:`(b \times m \times p)` tensor, then :attr:`input` must be
  3550. :ref:`broadcastable <broadcasting-semantics>` with a
  3551. :math:`(b \times n \times p)` tensor and :attr:`out` will be a
  3552. :math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the
  3553. same as the scaling factors used in :meth:`torch.addbmm`.
  3554. .. math::
  3555. \text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i)
  3556. If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
  3557. it will not be propagated.
  3558. For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
  3559. :attr:`alpha` must be real numbers, otherwise they should be integers.
  3560. This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
  3561. On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
  3562. Args:
  3563. input (Tensor): the tensor to be added
  3564. batch1 (Tensor): the first batch of matrices to be multiplied
  3565. batch2 (Tensor): the second batch of matrices to be multiplied
  3566. Keyword args:
  3567. beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
  3568. alpha (Number, optional): multiplier for :math:`\text{batch1} \mathbin{@} \text{batch2}` (:math:`\alpha`)
  3569. out (Tensor, optional): the output tensor.
  3570. Example::
  3571. >>> M = torch.randn(10, 3, 5)
  3572. >>> batch1 = torch.randn(10, 3, 4)
  3573. >>> batch2 = torch.randn(10, 4, 5)
  3574. >>> torch.baddbmm(M, batch1, batch2).size()
  3575. torch.Size([10, 3, 5])
  3576. """
  3577. ...
  3578. @overload
  3579. def bartlett_window(window_length: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  3580. r"""
  3581. bartlett_window(window_length, periodic=True, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  3582. Bartlett window function.
  3583. .. math::
  3584. w[n] = 1 - \left| \frac{2n}{N-1} - 1 \right| = \begin{cases}
  3585. \frac{2n}{N - 1} & \text{if } 0 \leq n \leq \frac{N - 1}{2} \\
  3586. 2 - \frac{2n}{N - 1} & \text{if } \frac{N - 1}{2} < n < N \\
  3587. \end{cases},
  3588. where :math:`N` is the full window size.
  3589. The input :attr:`window_length` is a positive integer controlling the
  3590. returned window size. :attr:`periodic` flag determines whether the returned
  3591. window trims off the last duplicate value from the symmetric window and is
  3592. ready to be used as a periodic window with functions like
  3593. :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
  3594. above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
  3595. ``torch.bartlett_window(L, periodic=True)`` equal to
  3596. ``torch.bartlett_window(L + 1, periodic=False)[:-1])``.
  3597. .. note::
  3598. If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
  3599. Arguments:
  3600. window_length (int): the size of returned window
  3601. periodic (bool, optional): If True, returns a window to be used as periodic
  3602. function. If False, return a symmetric window.
  3603. Keyword args:
  3604. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  3605. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported.
  3606. layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
  3607. ``torch.strided`` (dense layout) is supported.
  3608. device (:class:`torch.device`, optional): the desired device of returned tensor.
  3609. Default: if ``None``, uses the current device for the default tensor type
  3610. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  3611. for CPU tensor types and the current CUDA device for CUDA tensor types.
  3612. requires_grad (bool, optional): If autograd should record operations on the
  3613. returned tensor. Default: ``False``.
  3614. Returns:
  3615. Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window
  3616. """
  3617. ...
  3618. @overload
  3619. def bartlett_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  3620. r"""
  3621. bartlett_window(window_length, periodic=True, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  3622. Bartlett window function.
  3623. .. math::
  3624. w[n] = 1 - \left| \frac{2n}{N-1} - 1 \right| = \begin{cases}
  3625. \frac{2n}{N - 1} & \text{if } 0 \leq n \leq \frac{N - 1}{2} \\
  3626. 2 - \frac{2n}{N - 1} & \text{if } \frac{N - 1}{2} < n < N \\
  3627. \end{cases},
  3628. where :math:`N` is the full window size.
  3629. The input :attr:`window_length` is a positive integer controlling the
  3630. returned window size. :attr:`periodic` flag determines whether the returned
  3631. window trims off the last duplicate value from the symmetric window and is
  3632. ready to be used as a periodic window with functions like
  3633. :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
  3634. above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
  3635. ``torch.bartlett_window(L, periodic=True)`` equal to
  3636. ``torch.bartlett_window(L + 1, periodic=False)[:-1])``.
  3637. .. note::
  3638. If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
  3639. Arguments:
  3640. window_length (int): the size of returned window
  3641. periodic (bool, optional): If True, returns a window to be used as periodic
  3642. function. If False, return a symmetric window.
  3643. Keyword args:
  3644. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  3645. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported.
  3646. layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
  3647. ``torch.strided`` (dense layout) is supported.
  3648. device (:class:`torch.device`, optional): the desired device of returned tensor.
  3649. Default: if ``None``, uses the current device for the default tensor type
  3650. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  3651. for CPU tensor types and the current CUDA device for CUDA tensor types.
  3652. requires_grad (bool, optional): If autograd should record operations on the
  3653. returned tensor. Default: ``False``.
  3654. Returns:
  3655. Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window
  3656. """
  3657. ...
  3658. def batch_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, momentum: _float, eps: _float, cudnn_enabled: _bool) -> Tensor: ...
  3659. def batch_norm_backward_elemt(grad_out: Tensor, input: Tensor, mean: Tensor, invstd: Tensor, weight: Optional[Tensor], sum_dy: Tensor, sum_dy_xmu: Tensor, count: Tensor) -> Tensor: ...
  3660. def batch_norm_backward_reduce(grad_out: Tensor, input: Tensor, mean: Tensor, invstd: Tensor, weight: Optional[Tensor], input_g: _bool, weight_g: _bool, bias_g: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
  3661. def batch_norm_elemt(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], mean: Tensor, invstd: Tensor, eps: _float, *, out: Optional[Tensor] = None) -> Tensor: ...
  3662. def batch_norm_gather_stats(input: Tensor, mean: Tensor, invstd: Tensor, running_mean: Optional[Tensor], running_var: Optional[Tensor], momentum: _float, eps: _float, count: _int) -> Tuple[Tensor, Tensor]: ...
  3663. def batch_norm_gather_stats_with_counts(input: Tensor, mean: Tensor, invstd: Tensor, running_mean: Optional[Tensor], running_var: Optional[Tensor], momentum: _float, eps: _float, counts: Tensor) -> Tuple[Tensor, Tensor]: ...
  3664. def batch_norm_stats(input: Tensor, eps: _float) -> Tuple[Tensor, Tensor]: ...
  3665. def batch_norm_update_stats(input: Tensor, running_mean: Optional[Tensor], running_var: Optional[Tensor], momentum: _float) -> Tuple[Tensor, Tensor]: ...
  3666. @overload
  3667. def bernoulli(input: Tensor, *, generator: Optional[Generator] = None, out: Optional[Tensor] = None) -> Tensor:
  3668. r"""
  3669. bernoulli(input, *, generator=None, out=None) -> Tensor
  3670. Draws binary random numbers (0 or 1) from a Bernoulli distribution.
  3671. The :attr:`input` tensor should be a tensor containing probabilities
  3672. to be used for drawing the binary random number.
  3673. Hence, all values in :attr:`input` have to be in the range:
  3674. :math:`0 \leq \text{input}_i \leq 1`.
  3675. The :math:`\text{i}^{th}` element of the output tensor will draw a
  3676. value :math:`1` according to the :math:`\text{i}^{th}` probability value given
  3677. in :attr:`input`.
  3678. .. math::
  3679. \text{out}_{i} \sim \mathrm{Bernoulli}(p = \text{input}_{i})
  3680. The returned :attr:`out` tensor only has values 0 or 1 and is of the same
  3681. shape as :attr:`input`.
  3682. :attr:`out` can have integral ``dtype``, but :attr:`input` must have floating
  3683. point ``dtype``.
  3684. Args:
  3685. input (Tensor): the input tensor of probability values for the Bernoulli distribution
  3686. Keyword args:
  3687. generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
  3688. out (Tensor, optional): the output tensor.
  3689. Example::
  3690. >>> a = torch.empty(3, 3).uniform_(0, 1) # generate a uniform random matrix with range [0, 1]
  3691. >>> a
  3692. tensor([[ 0.1737, 0.0950, 0.3609],
  3693. [ 0.7148, 0.0289, 0.2676],
  3694. [ 0.9456, 0.8937, 0.7202]])
  3695. >>> torch.bernoulli(a)
  3696. tensor([[ 1., 0., 0.],
  3697. [ 0., 0., 0.],
  3698. [ 1., 1., 1.]])
  3699. >>> a = torch.ones(3, 3) # probability of drawing "1" is 1
  3700. >>> torch.bernoulli(a)
  3701. tensor([[ 1., 1., 1.],
  3702. [ 1., 1., 1.],
  3703. [ 1., 1., 1.]])
  3704. >>> a = torch.zeros(3, 3) # probability of drawing "1" is 0
  3705. >>> torch.bernoulli(a)
  3706. tensor([[ 0., 0., 0.],
  3707. [ 0., 0., 0.],
  3708. [ 0., 0., 0.]])
  3709. """
  3710. ...
  3711. @overload
  3712. def bernoulli(input: Tensor, p: _float, *, generator: Optional[Generator] = None) -> Tensor:
  3713. r"""
  3714. bernoulli(input, *, generator=None, out=None) -> Tensor
  3715. Draws binary random numbers (0 or 1) from a Bernoulli distribution.
  3716. The :attr:`input` tensor should be a tensor containing probabilities
  3717. to be used for drawing the binary random number.
  3718. Hence, all values in :attr:`input` have to be in the range:
  3719. :math:`0 \leq \text{input}_i \leq 1`.
  3720. The :math:`\text{i}^{th}` element of the output tensor will draw a
  3721. value :math:`1` according to the :math:`\text{i}^{th}` probability value given
  3722. in :attr:`input`.
  3723. .. math::
  3724. \text{out}_{i} \sim \mathrm{Bernoulli}(p = \text{input}_{i})
  3725. The returned :attr:`out` tensor only has values 0 or 1 and is of the same
  3726. shape as :attr:`input`.
  3727. :attr:`out` can have integral ``dtype``, but :attr:`input` must have floating
  3728. point ``dtype``.
  3729. Args:
  3730. input (Tensor): the input tensor of probability values for the Bernoulli distribution
  3731. Keyword args:
  3732. generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
  3733. out (Tensor, optional): the output tensor.
  3734. Example::
  3735. >>> a = torch.empty(3, 3).uniform_(0, 1) # generate a uniform random matrix with range [0, 1]
  3736. >>> a
  3737. tensor([[ 0.1737, 0.0950, 0.3609],
  3738. [ 0.7148, 0.0289, 0.2676],
  3739. [ 0.9456, 0.8937, 0.7202]])
  3740. >>> torch.bernoulli(a)
  3741. tensor([[ 1., 0., 0.],
  3742. [ 0., 0., 0.],
  3743. [ 1., 1., 1.]])
  3744. >>> a = torch.ones(3, 3) # probability of drawing "1" is 1
  3745. >>> torch.bernoulli(a)
  3746. tensor([[ 1., 1., 1.],
  3747. [ 1., 1., 1.],
  3748. [ 1., 1., 1.]])
  3749. >>> a = torch.zeros(3, 3) # probability of drawing "1" is 0
  3750. >>> torch.bernoulli(a)
  3751. tensor([[ 0., 0., 0.],
  3752. [ 0., 0., 0.],
  3753. [ 0., 0., 0.]])
  3754. """
  3755. ...
  3756. def bilinear(input1: Tensor, input2: Tensor, weight: Tensor, bias: Optional[Tensor] = None) -> Tensor: ...
  3757. def binary_cross_entropy_with_logits(input: Tensor, target: Tensor, weight: Optional[Tensor] = None, pos_weight: Optional[Tensor] = None, reduction: _int = 1) -> Tensor: ...
  3758. def bincount(input: Tensor, weights: Optional[Tensor] = None, minlength: _int = 0) -> Tensor:
  3759. r"""
  3760. bincount(input, weights=None, minlength=0) -> Tensor
  3761. Count the frequency of each value in an array of non-negative ints.
  3762. The number of bins (size 1) is one larger than the largest value in
  3763. :attr:`input` unless :attr:`input` is empty, in which case the result is a
  3764. tensor of size 0. If :attr:`minlength` is specified, the number of bins is at least
  3765. :attr:`minlength` and if :attr:`input` is empty, then the result is tensor of size
  3766. :attr:`minlength` filled with zeros. If ``n`` is the value at position ``i``,
  3767. ``out[n] += weights[i]`` if :attr:`weights` is specified else
  3768. ``out[n] += 1``.
  3769. Note:
  3770. This operation may produce nondeterministic gradients when given tensors on a CUDA device. See :doc:`/notes/randomness` for more information.
  3771. Arguments:
  3772. input (Tensor): 1-d int tensor
  3773. weights (Tensor): optional, weight for each value in the input tensor.
  3774. Should be of same size as input tensor.
  3775. minlength (int): optional, minimum number of bins. Should be non-negative.
  3776. Returns:
  3777. output (Tensor): a tensor of shape ``Size([max(input) + 1])`` if
  3778. :attr:`input` is non-empty, else ``Size(0)``
  3779. Example::
  3780. >>> input = torch.randint(0, 8, (5,), dtype=torch.int64)
  3781. >>> weights = torch.linspace(0, 1, steps=5)
  3782. >>> input, weights
  3783. (tensor([4, 3, 6, 3, 4]),
  3784. tensor([ 0.0000, 0.2500, 0.5000, 0.7500, 1.0000])
  3785. >>> torch.bincount(input)
  3786. tensor([0, 0, 0, 2, 2, 0, 1])
  3787. >>> input.bincount(weights)
  3788. tensor([0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 0.0000, 0.5000])
  3789. """
  3790. ...
  3791. def binomial(count: Tensor, prob: Tensor, generator: Optional[Generator] = None) -> Tensor: ...
  3792. @overload
  3793. def bitwise_and(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  3794. r"""
  3795. bitwise_and(input, other, *, out=None) -> Tensor
  3796. Computes the bitwise AND of :attr:`input` and :attr:`other`. The input tensor must be of
  3797. integral or Boolean types. For bool tensors, it computes the logical AND.
  3798. Args:
  3799. input: the first input tensor
  3800. other: the second input tensor
  3801. Keyword args:
  3802. out (Tensor, optional): the output tensor.
  3803. Example::
  3804. >>> torch.bitwise_and(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
  3805. tensor([1, 0, 3], dtype=torch.int8)
  3806. >>> torch.bitwise_and(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
  3807. tensor([ False, True, False])
  3808. """
  3809. ...
  3810. @overload
  3811. def bitwise_and(self: Union[Number, _complex], other: Tensor) -> Tensor:
  3812. r"""
  3813. bitwise_and(input, other, *, out=None) -> Tensor
  3814. Computes the bitwise AND of :attr:`input` and :attr:`other`. The input tensor must be of
  3815. integral or Boolean types. For bool tensors, it computes the logical AND.
  3816. Args:
  3817. input: the first input tensor
  3818. other: the second input tensor
  3819. Keyword args:
  3820. out (Tensor, optional): the output tensor.
  3821. Example::
  3822. >>> torch.bitwise_and(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
  3823. tensor([1, 0, 3], dtype=torch.int8)
  3824. >>> torch.bitwise_and(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
  3825. tensor([ False, True, False])
  3826. """
  3827. ...
  3828. @overload
  3829. def bitwise_and(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
  3830. r"""
  3831. bitwise_and(input, other, *, out=None) -> Tensor
  3832. Computes the bitwise AND of :attr:`input` and :attr:`other`. The input tensor must be of
  3833. integral or Boolean types. For bool tensors, it computes the logical AND.
  3834. Args:
  3835. input: the first input tensor
  3836. other: the second input tensor
  3837. Keyword args:
  3838. out (Tensor, optional): the output tensor.
  3839. Example::
  3840. >>> torch.bitwise_and(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
  3841. tensor([1, 0, 3], dtype=torch.int8)
  3842. >>> torch.bitwise_and(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
  3843. tensor([ False, True, False])
  3844. """
  3845. ...
  3846. @overload
  3847. def bitwise_left_shift(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  3848. r"""
  3849. bitwise_left_shift(input, other, *, out=None) -> Tensor
  3850. Computes the left arithmetic shift of :attr:`input` by :attr:`other` bits.
  3851. The input tensor must be of integral type. This operator supports
  3852. :ref:`broadcasting to a common shape <broadcasting-semantics>` and
  3853. :ref:`type promotion <type-promotion-doc>`.
  3854. The operation applied is:
  3855. .. math::
  3856. \text{out}_i = \text{input}_i << \text{other}_i
  3857. Args:
  3858. input (Tensor or Scalar): the first input tensor
  3859. other (Tensor or Scalar): the second input tensor
  3860. Keyword args:
  3861. out (Tensor, optional): the output tensor.
  3862. Example::
  3863. >>> torch.bitwise_left_shift(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
  3864. tensor([-2, -2, 24], dtype=torch.int8)
  3865. """
  3866. ...
  3867. @overload
  3868. def bitwise_left_shift(self: Union[Number, _complex], other: Tensor) -> Tensor:
  3869. r"""
  3870. bitwise_left_shift(input, other, *, out=None) -> Tensor
  3871. Computes the left arithmetic shift of :attr:`input` by :attr:`other` bits.
  3872. The input tensor must be of integral type. This operator supports
  3873. :ref:`broadcasting to a common shape <broadcasting-semantics>` and
  3874. :ref:`type promotion <type-promotion-doc>`.
  3875. The operation applied is:
  3876. .. math::
  3877. \text{out}_i = \text{input}_i << \text{other}_i
  3878. Args:
  3879. input (Tensor or Scalar): the first input tensor
  3880. other (Tensor or Scalar): the second input tensor
  3881. Keyword args:
  3882. out (Tensor, optional): the output tensor.
  3883. Example::
  3884. >>> torch.bitwise_left_shift(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
  3885. tensor([-2, -2, 24], dtype=torch.int8)
  3886. """
  3887. ...
  3888. @overload
  3889. def bitwise_left_shift(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
  3890. r"""
  3891. bitwise_left_shift(input, other, *, out=None) -> Tensor
  3892. Computes the left arithmetic shift of :attr:`input` by :attr:`other` bits.
  3893. The input tensor must be of integral type. This operator supports
  3894. :ref:`broadcasting to a common shape <broadcasting-semantics>` and
  3895. :ref:`type promotion <type-promotion-doc>`.
  3896. The operation applied is:
  3897. .. math::
  3898. \text{out}_i = \text{input}_i << \text{other}_i
  3899. Args:
  3900. input (Tensor or Scalar): the first input tensor
  3901. other (Tensor or Scalar): the second input tensor
  3902. Keyword args:
  3903. out (Tensor, optional): the output tensor.
  3904. Example::
  3905. >>> torch.bitwise_left_shift(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
  3906. tensor([-2, -2, 24], dtype=torch.int8)
  3907. """
  3908. ...
  3909. def bitwise_not(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  3910. r"""
  3911. bitwise_not(input, *, out=None) -> Tensor
  3912. Computes the bitwise NOT of the given input tensor. The input tensor must be of
  3913. integral or Boolean types. For bool tensors, it computes the logical NOT.
  3914. Args:
  3915. input (Tensor): the input tensor.
  3916. Keyword args:
  3917. out (Tensor, optional): the output tensor.
  3918. Example::
  3919. >>> torch.bitwise_not(torch.tensor([-1, -2, 3], dtype=torch.int8))
  3920. tensor([ 0, 1, -4], dtype=torch.int8)
  3921. """
  3922. ...
  3923. @overload
  3924. def bitwise_or(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  3925. r"""
  3926. bitwise_or(input, other, *, out=None) -> Tensor
  3927. Computes the bitwise OR of :attr:`input` and :attr:`other`. The input tensor must be of
  3928. integral or Boolean types. For bool tensors, it computes the logical OR.
  3929. Args:
  3930. input: the first input tensor
  3931. other: the second input tensor
  3932. Keyword args:
  3933. out (Tensor, optional): the output tensor.
  3934. Example::
  3935. >>> torch.bitwise_or(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
  3936. tensor([-1, -2, 3], dtype=torch.int8)
  3937. >>> torch.bitwise_or(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
  3938. tensor([ True, True, False])
  3939. """
  3940. ...
  3941. @overload
  3942. def bitwise_or(self: Union[Number, _complex], other: Tensor) -> Tensor:
  3943. r"""
  3944. bitwise_or(input, other, *, out=None) -> Tensor
  3945. Computes the bitwise OR of :attr:`input` and :attr:`other`. The input tensor must be of
  3946. integral or Boolean types. For bool tensors, it computes the logical OR.
  3947. Args:
  3948. input: the first input tensor
  3949. other: the second input tensor
  3950. Keyword args:
  3951. out (Tensor, optional): the output tensor.
  3952. Example::
  3953. >>> torch.bitwise_or(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
  3954. tensor([-1, -2, 3], dtype=torch.int8)
  3955. >>> torch.bitwise_or(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
  3956. tensor([ True, True, False])
  3957. """
  3958. ...
  3959. @overload
  3960. def bitwise_or(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
  3961. r"""
  3962. bitwise_or(input, other, *, out=None) -> Tensor
  3963. Computes the bitwise OR of :attr:`input` and :attr:`other`. The input tensor must be of
  3964. integral or Boolean types. For bool tensors, it computes the logical OR.
  3965. Args:
  3966. input: the first input tensor
  3967. other: the second input tensor
  3968. Keyword args:
  3969. out (Tensor, optional): the output tensor.
  3970. Example::
  3971. >>> torch.bitwise_or(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
  3972. tensor([-1, -2, 3], dtype=torch.int8)
  3973. >>> torch.bitwise_or(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
  3974. tensor([ True, True, False])
  3975. """
  3976. ...
  3977. @overload
  3978. def bitwise_right_shift(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  3979. r"""
  3980. bitwise_right_shift(input, other, *, out=None) -> Tensor
  3981. Computes the right arithmetic shift of :attr:`input` by :attr:`other` bits.
  3982. The input tensor must be of integral type. This operator supports
  3983. :ref:`broadcasting to a common shape <broadcasting-semantics>` and
  3984. :ref:`type promotion <type-promotion-doc>`.
  3985. In any case, if the value of the right operand is negative or is greater
  3986. or equal to the number of bits in the promoted left operand, the behavior is undefined.
  3987. The operation applied is:
  3988. .. math::
  3989. \text{out}_i = \text{input}_i >> \text{other}_i
  3990. Args:
  3991. input (Tensor or Scalar): the first input tensor
  3992. other (Tensor or Scalar): the second input tensor
  3993. Keyword args:
  3994. out (Tensor, optional): the output tensor.
  3995. Example::
  3996. >>> torch.bitwise_right_shift(torch.tensor([-2, -7, 31], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
  3997. tensor([-1, -7, 3], dtype=torch.int8)
  3998. """
  3999. ...
  4000. @overload
  4001. def bitwise_right_shift(self: Union[Number, _complex], other: Tensor) -> Tensor:
  4002. r"""
  4003. bitwise_right_shift(input, other, *, out=None) -> Tensor
  4004. Computes the right arithmetic shift of :attr:`input` by :attr:`other` bits.
  4005. The input tensor must be of integral type. This operator supports
  4006. :ref:`broadcasting to a common shape <broadcasting-semantics>` and
  4007. :ref:`type promotion <type-promotion-doc>`.
  4008. In any case, if the value of the right operand is negative or is greater
  4009. or equal to the number of bits in the promoted left operand, the behavior is undefined.
  4010. The operation applied is:
  4011. .. math::
  4012. \text{out}_i = \text{input}_i >> \text{other}_i
  4013. Args:
  4014. input (Tensor or Scalar): the first input tensor
  4015. other (Tensor or Scalar): the second input tensor
  4016. Keyword args:
  4017. out (Tensor, optional): the output tensor.
  4018. Example::
  4019. >>> torch.bitwise_right_shift(torch.tensor([-2, -7, 31], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
  4020. tensor([-1, -7, 3], dtype=torch.int8)
  4021. """
  4022. ...
  4023. @overload
  4024. def bitwise_right_shift(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
  4025. r"""
  4026. bitwise_right_shift(input, other, *, out=None) -> Tensor
  4027. Computes the right arithmetic shift of :attr:`input` by :attr:`other` bits.
  4028. The input tensor must be of integral type. This operator supports
  4029. :ref:`broadcasting to a common shape <broadcasting-semantics>` and
  4030. :ref:`type promotion <type-promotion-doc>`.
  4031. In any case, if the value of the right operand is negative or is greater
  4032. or equal to the number of bits in the promoted left operand, the behavior is undefined.
  4033. The operation applied is:
  4034. .. math::
  4035. \text{out}_i = \text{input}_i >> \text{other}_i
  4036. Args:
  4037. input (Tensor or Scalar): the first input tensor
  4038. other (Tensor or Scalar): the second input tensor
  4039. Keyword args:
  4040. out (Tensor, optional): the output tensor.
  4041. Example::
  4042. >>> torch.bitwise_right_shift(torch.tensor([-2, -7, 31], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
  4043. tensor([-1, -7, 3], dtype=torch.int8)
  4044. """
  4045. ...
  4046. @overload
  4047. def bitwise_xor(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  4048. r"""
  4049. bitwise_xor(input, other, *, out=None) -> Tensor
  4050. Computes the bitwise XOR of :attr:`input` and :attr:`other`. The input tensor must be of
  4051. integral or Boolean types. For bool tensors, it computes the logical XOR.
  4052. Args:
  4053. input: the first input tensor
  4054. other: the second input tensor
  4055. Keyword args:
  4056. out (Tensor, optional): the output tensor.
  4057. Example::
  4058. >>> torch.bitwise_xor(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
  4059. tensor([-2, -2, 0], dtype=torch.int8)
  4060. >>> torch.bitwise_xor(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
  4061. tensor([ True, False, False])
  4062. """
  4063. ...
  4064. @overload
  4065. def bitwise_xor(self: Union[Number, _complex], other: Tensor) -> Tensor:
  4066. r"""
  4067. bitwise_xor(input, other, *, out=None) -> Tensor
  4068. Computes the bitwise XOR of :attr:`input` and :attr:`other`. The input tensor must be of
  4069. integral or Boolean types. For bool tensors, it computes the logical XOR.
  4070. Args:
  4071. input: the first input tensor
  4072. other: the second input tensor
  4073. Keyword args:
  4074. out (Tensor, optional): the output tensor.
  4075. Example::
  4076. >>> torch.bitwise_xor(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
  4077. tensor([-2, -2, 0], dtype=torch.int8)
  4078. >>> torch.bitwise_xor(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
  4079. tensor([ True, False, False])
  4080. """
  4081. ...
  4082. @overload
  4083. def bitwise_xor(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
  4084. r"""
  4085. bitwise_xor(input, other, *, out=None) -> Tensor
  4086. Computes the bitwise XOR of :attr:`input` and :attr:`other`. The input tensor must be of
  4087. integral or Boolean types. For bool tensors, it computes the logical XOR.
  4088. Args:
  4089. input: the first input tensor
  4090. other: the second input tensor
  4091. Keyword args:
  4092. out (Tensor, optional): the output tensor.
  4093. Example::
  4094. >>> torch.bitwise_xor(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
  4095. tensor([-2, -2, 0], dtype=torch.int8)
  4096. >>> torch.bitwise_xor(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
  4097. tensor([ True, False, False])
  4098. """
  4099. ...
  4100. @overload
  4101. def blackman_window(window_length: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  4102. r"""
  4103. blackman_window(window_length, periodic=True, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  4104. Blackman window function.
  4105. .. math::
  4106. w[n] = 0.42 - 0.5 \cos \left( \frac{2 \pi n}{N - 1} \right) + 0.08 \cos \left( \frac{4 \pi n}{N - 1} \right)
  4107. where :math:`N` is the full window size.
  4108. The input :attr:`window_length` is a positive integer controlling the
  4109. returned window size. :attr:`periodic` flag determines whether the returned
  4110. window trims off the last duplicate value from the symmetric window and is
  4111. ready to be used as a periodic window with functions like
  4112. :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
  4113. above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
  4114. ``torch.blackman_window(L, periodic=True)`` equal to
  4115. ``torch.blackman_window(L + 1, periodic=False)[:-1])``.
  4116. .. note::
  4117. If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
  4118. Arguments:
  4119. window_length (int): the size of returned window
  4120. periodic (bool, optional): If True, returns a window to be used as periodic
  4121. function. If False, return a symmetric window.
  4122. Keyword args:
  4123. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  4124. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported.
  4125. layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
  4126. ``torch.strided`` (dense layout) is supported.
  4127. device (:class:`torch.device`, optional): the desired device of returned tensor.
  4128. Default: if ``None``, uses the current device for the default tensor type
  4129. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  4130. for CPU tensor types and the current CUDA device for CUDA tensor types.
  4131. requires_grad (bool, optional): If autograd should record operations on the
  4132. returned tensor. Default: ``False``.
  4133. Returns:
  4134. Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window
  4135. """
  4136. ...
  4137. @overload
  4138. def blackman_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  4139. r"""
  4140. blackman_window(window_length, periodic=True, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  4141. Blackman window function.
  4142. .. math::
  4143. w[n] = 0.42 - 0.5 \cos \left( \frac{2 \pi n}{N - 1} \right) + 0.08 \cos \left( \frac{4 \pi n}{N - 1} \right)
  4144. where :math:`N` is the full window size.
  4145. The input :attr:`window_length` is a positive integer controlling the
  4146. returned window size. :attr:`periodic` flag determines whether the returned
  4147. window trims off the last duplicate value from the symmetric window and is
  4148. ready to be used as a periodic window with functions like
  4149. :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
  4150. above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
  4151. ``torch.blackman_window(L, periodic=True)`` equal to
  4152. ``torch.blackman_window(L + 1, periodic=False)[:-1])``.
  4153. .. note::
  4154. If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
  4155. Arguments:
  4156. window_length (int): the size of returned window
  4157. periodic (bool, optional): If True, returns a window to be used as periodic
  4158. function. If False, return a symmetric window.
  4159. Keyword args:
  4160. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  4161. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported.
  4162. layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
  4163. ``torch.strided`` (dense layout) is supported.
  4164. device (:class:`torch.device`, optional): the desired device of returned tensor.
  4165. Default: if ``None``, uses the current device for the default tensor type
  4166. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  4167. for CPU tensor types and the current CUDA device for CUDA tensor types.
  4168. requires_grad (bool, optional): If autograd should record operations on the
  4169. returned tensor. Default: ``False``.
  4170. Returns:
  4171. Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window
  4172. """
  4173. ...
  4174. def bmm(input: Tensor, mat2: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  4175. r"""
  4176. bmm(input, mat2, *, out=None) -> Tensor
  4177. Performs a batch matrix-matrix product of matrices stored in :attr:`input`
  4178. and :attr:`mat2`.
  4179. :attr:`input` and :attr:`mat2` must be 3-D tensors each containing
  4180. the same number of matrices.
  4181. If :attr:`input` is a :math:`(b \times n \times m)` tensor, :attr:`mat2` is a
  4182. :math:`(b \times m \times p)` tensor, :attr:`out` will be a
  4183. :math:`(b \times n \times p)` tensor.
  4184. .. math::
  4185. \text{out}_i = \text{input}_i \mathbin{@} \text{mat2}_i
  4186. This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
  4187. On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
  4188. .. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
  4189. For broadcasting matrix products, see :func:`torch.matmul`.
  4190. Args:
  4191. input (Tensor): the first batch of matrices to be multiplied
  4192. mat2 (Tensor): the second batch of matrices to be multiplied
  4193. Keyword Args:
  4194. out (Tensor, optional): the output tensor.
  4195. Example::
  4196. >>> input = torch.randn(10, 3, 4)
  4197. >>> mat2 = torch.randn(10, 4, 5)
  4198. >>> res = torch.bmm(input, mat2)
  4199. >>> res.size()
  4200. torch.Size([10, 3, 5])
  4201. """
  4202. ...
  4203. def broadcast_to(input: Tensor, size: Sequence[Union[_int, SymInt]]) -> Tensor:
  4204. r"""
  4205. broadcast_to(input, shape) -> Tensor
  4206. Broadcasts :attr:`input` to the shape :attr:`\shape`.
  4207. Equivalent to calling ``input.expand(shape)``. See :meth:`~Tensor.expand` for details.
  4208. Args:
  4209. input (Tensor): the input tensor.
  4210. shape (list, tuple, or :class:`torch.Size`): the new shape.
  4211. Example::
  4212. >>> x = torch.tensor([1, 2, 3])
  4213. >>> torch.broadcast_to(x, (3, 3))
  4214. tensor([[1, 2, 3],
  4215. [1, 2, 3],
  4216. [1, 2, 3]])
  4217. """
  4218. ...
  4219. @overload
  4220. def bucketize(input: Tensor, boundaries: Tensor, *, out_int32: _bool = False, right: _bool = False, out: Optional[Tensor] = None) -> Tensor:
  4221. r"""
  4222. bucketize(input, boundaries, *, out_int32=False, right=False, out=None) -> Tensor
  4223. Returns the indices of the buckets to which each value in the :attr:`input` belongs, where the
  4224. boundaries of the buckets are set by :attr:`boundaries`. Return a new tensor with the same size
  4225. as :attr:`input`. If :attr:`right` is False (default), then the left boundary is open. Note that
  4226. this behavior is opposite the behavior of
  4227. `numpy.digitize <https://docs.scipy.org/doc/numpy/reference/generated/numpy.digitize.html>`_.
  4228. More formally, the returned index satisfies the following rules:
  4229. .. list-table::
  4230. :widths: 15 85
  4231. :header-rows: 1
  4232. * - :attr:`right`
  4233. - *returned index satisfies*
  4234. * - False
  4235. - ``boundaries[i-1] < input[m][n]...[l][x] <= boundaries[i]``
  4236. * - True
  4237. - ``boundaries[i-1] <= input[m][n]...[l][x] < boundaries[i]``
  4238. Args:
  4239. input (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s).
  4240. boundaries (Tensor): 1-D tensor, must contain a strictly increasing sequence, or the return value is undefined.
  4241. Keyword args:
  4242. out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise.
  4243. Default value is False, i.e. default output data type is torch.int64.
  4244. right (bool, optional): if False, return the first suitable location that is found. If True, return the
  4245. last such index. If no suitable index found, return 0 for non-numerical value
  4246. (eg. nan, inf) or the size of :attr:`boundaries` (one pass the last index).
  4247. In other words, if False, gets the lower bound index for each value in :attr:`input`
  4248. from :attr:`boundaries`. If True, gets the upper bound index instead.
  4249. Default value is False.
  4250. out (Tensor, optional): the output tensor, must be the same size as :attr:`input` if provided.
  4251. Example::
  4252. >>> boundaries = torch.tensor([1, 3, 5, 7, 9])
  4253. >>> boundaries
  4254. tensor([1, 3, 5, 7, 9])
  4255. >>> v = torch.tensor([[3, 6, 9], [3, 6, 9]])
  4256. >>> v
  4257. tensor([[3, 6, 9],
  4258. [3, 6, 9]])
  4259. >>> torch.bucketize(v, boundaries)
  4260. tensor([[1, 3, 4],
  4261. [1, 3, 4]])
  4262. >>> torch.bucketize(v, boundaries, right=True)
  4263. tensor([[2, 3, 5],
  4264. [2, 3, 5]])
  4265. """
  4266. ...
  4267. @overload
  4268. def bucketize(self: Union[Number, _complex], boundaries: Tensor, *, out_int32: _bool = False, right: _bool = False) -> Tensor:
  4269. r"""
  4270. bucketize(input, boundaries, *, out_int32=False, right=False, out=None) -> Tensor
  4271. Returns the indices of the buckets to which each value in the :attr:`input` belongs, where the
  4272. boundaries of the buckets are set by :attr:`boundaries`. Return a new tensor with the same size
  4273. as :attr:`input`. If :attr:`right` is False (default), then the left boundary is open. Note that
  4274. this behavior is opposite the behavior of
  4275. `numpy.digitize <https://docs.scipy.org/doc/numpy/reference/generated/numpy.digitize.html>`_.
  4276. More formally, the returned index satisfies the following rules:
  4277. .. list-table::
  4278. :widths: 15 85
  4279. :header-rows: 1
  4280. * - :attr:`right`
  4281. - *returned index satisfies*
  4282. * - False
  4283. - ``boundaries[i-1] < input[m][n]...[l][x] <= boundaries[i]``
  4284. * - True
  4285. - ``boundaries[i-1] <= input[m][n]...[l][x] < boundaries[i]``
  4286. Args:
  4287. input (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s).
  4288. boundaries (Tensor): 1-D tensor, must contain a strictly increasing sequence, or the return value is undefined.
  4289. Keyword args:
  4290. out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise.
  4291. Default value is False, i.e. default output data type is torch.int64.
  4292. right (bool, optional): if False, return the first suitable location that is found. If True, return the
  4293. last such index. If no suitable index found, return 0 for non-numerical value
  4294. (eg. nan, inf) or the size of :attr:`boundaries` (one pass the last index).
  4295. In other words, if False, gets the lower bound index for each value in :attr:`input`
  4296. from :attr:`boundaries`. If True, gets the upper bound index instead.
  4297. Default value is False.
  4298. out (Tensor, optional): the output tensor, must be the same size as :attr:`input` if provided.
  4299. Example::
  4300. >>> boundaries = torch.tensor([1, 3, 5, 7, 9])
  4301. >>> boundaries
  4302. tensor([1, 3, 5, 7, 9])
  4303. >>> v = torch.tensor([[3, 6, 9], [3, 6, 9]])
  4304. >>> v
  4305. tensor([[3, 6, 9],
  4306. [3, 6, 9]])
  4307. >>> torch.bucketize(v, boundaries)
  4308. tensor([[1, 3, 4],
  4309. [1, 3, 4]])
  4310. >>> torch.bucketize(v, boundaries, right=True)
  4311. tensor([[2, 3, 5],
  4312. [2, 3, 5]])
  4313. """
  4314. ...
  4315. def can_cast(from_: _dtype, to: _dtype) -> _bool:
  4316. r"""
  4317. can_cast(from_, to) -> bool
  4318. Determines if a type conversion is allowed under PyTorch casting rules
  4319. described in the type promotion :ref:`documentation <type-promotion-doc>`.
  4320. Args:
  4321. from\_ (dtype): The original :class:`torch.dtype`.
  4322. to (dtype): The target :class:`torch.dtype`.
  4323. Example::
  4324. >>> torch.can_cast(torch.double, torch.float)
  4325. True
  4326. >>> torch.can_cast(torch.float, torch.int)
  4327. False
  4328. """
  4329. ...
  4330. @overload
  4331. def cat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int = 0, *, out: Optional[Tensor] = None) -> Tensor:
  4332. r"""
  4333. cat(tensors, dim=0, *, out=None) -> Tensor
  4334. Concatenates the given sequence of :attr:`seq` tensors in the given dimension.
  4335. All tensors must either have the same shape (except in the concatenating
  4336. dimension) or be a 1-D empty tensor with size ``(0,)``.
  4337. :func:`torch.cat` can be seen as an inverse operation for :func:`torch.split`
  4338. and :func:`torch.chunk`.
  4339. :func:`torch.cat` can be best understood via examples.
  4340. .. seealso::
  4341. :func:`torch.stack` concatenates the given sequence along a new dimension.
  4342. Args:
  4343. tensors (sequence of Tensors): any python sequence of tensors of the same type.
  4344. Non-empty tensors provided must have the same shape, except in the
  4345. cat dimension.
  4346. dim (int, optional): the dimension over which the tensors are concatenated
  4347. Keyword args:
  4348. out (Tensor, optional): the output tensor.
  4349. Example::
  4350. >>> x = torch.randn(2, 3)
  4351. >>> x
  4352. tensor([[ 0.6580, -1.0969, -0.4614],
  4353. [-0.1034, -0.5790, 0.1497]])
  4354. >>> torch.cat((x, x, x), 0)
  4355. tensor([[ 0.6580, -1.0969, -0.4614],
  4356. [-0.1034, -0.5790, 0.1497],
  4357. [ 0.6580, -1.0969, -0.4614],
  4358. [-0.1034, -0.5790, 0.1497],
  4359. [ 0.6580, -1.0969, -0.4614],
  4360. [-0.1034, -0.5790, 0.1497]])
  4361. >>> torch.cat((x, x, x), 1)
  4362. tensor([[ 0.6580, -1.0969, -0.4614, 0.6580, -1.0969, -0.4614, 0.6580,
  4363. -1.0969, -0.4614],
  4364. [-0.1034, -0.5790, 0.1497, -0.1034, -0.5790, 0.1497, -0.1034,
  4365. -0.5790, 0.1497]])
  4366. """
  4367. ...
  4368. @overload
  4369. def cat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: Union[str, ellipsis, None], *, out: Optional[Tensor] = None) -> Tensor:
  4370. r"""
  4371. cat(tensors, dim=0, *, out=None) -> Tensor
  4372. Concatenates the given sequence of :attr:`seq` tensors in the given dimension.
  4373. All tensors must either have the same shape (except in the concatenating
  4374. dimension) or be a 1-D empty tensor with size ``(0,)``.
  4375. :func:`torch.cat` can be seen as an inverse operation for :func:`torch.split`
  4376. and :func:`torch.chunk`.
  4377. :func:`torch.cat` can be best understood via examples.
  4378. .. seealso::
  4379. :func:`torch.stack` concatenates the given sequence along a new dimension.
  4380. Args:
  4381. tensors (sequence of Tensors): any python sequence of tensors of the same type.
  4382. Non-empty tensors provided must have the same shape, except in the
  4383. cat dimension.
  4384. dim (int, optional): the dimension over which the tensors are concatenated
  4385. Keyword args:
  4386. out (Tensor, optional): the output tensor.
  4387. Example::
  4388. >>> x = torch.randn(2, 3)
  4389. >>> x
  4390. tensor([[ 0.6580, -1.0969, -0.4614],
  4391. [-0.1034, -0.5790, 0.1497]])
  4392. >>> torch.cat((x, x, x), 0)
  4393. tensor([[ 0.6580, -1.0969, -0.4614],
  4394. [-0.1034, -0.5790, 0.1497],
  4395. [ 0.6580, -1.0969, -0.4614],
  4396. [-0.1034, -0.5790, 0.1497],
  4397. [ 0.6580, -1.0969, -0.4614],
  4398. [-0.1034, -0.5790, 0.1497]])
  4399. >>> torch.cat((x, x, x), 1)
  4400. tensor([[ 0.6580, -1.0969, -0.4614, 0.6580, -1.0969, -0.4614, 0.6580,
  4401. -1.0969, -0.4614],
  4402. [-0.1034, -0.5790, 0.1497, -0.1034, -0.5790, 0.1497, -0.1034,
  4403. -0.5790, 0.1497]])
  4404. """
  4405. ...
  4406. def ccol_indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
  4407. def ceil(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  4408. r"""
  4409. ceil(input, *, out=None) -> Tensor
  4410. Returns a new tensor with the ceil of the elements of :attr:`input`,
  4411. the smallest integer greater than or equal to each element.
  4412. For integer inputs, follows the array-api convention of returning a
  4413. copy of the input tensor.
  4414. .. math::
  4415. \text{out}_{i} = \left\lceil \text{input}_{i} \right\rceil
  4416. Args:
  4417. input (Tensor): the input tensor.
  4418. Keyword args:
  4419. out (Tensor, optional): the output tensor.
  4420. Example::
  4421. >>> a = torch.randn(4)
  4422. >>> a
  4423. tensor([-0.6341, -1.4208, -1.0900, 0.5826])
  4424. >>> torch.ceil(a)
  4425. tensor([-0., -1., -1., 1.])
  4426. """
  4427. ...
  4428. def ceil_(input: Tensor) -> Tensor: ...
  4429. def celu(input: Tensor, alpha: Union[Number, _complex] = 1.0) -> Tensor: ...
  4430. def celu_(input: Tensor, alpha: Union[Number, _complex] = 1.0) -> Tensor: ...
  4431. def channel_shuffle(input: Tensor, groups: Union[_int, SymInt]) -> Tensor: ...
  4432. def cholesky(input: Tensor, upper: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
  4433. r"""
  4434. cholesky(input, upper=False, *, out=None) -> Tensor
  4435. Computes the Cholesky decomposition of a symmetric positive-definite
  4436. matrix :math:`A` or for batches of symmetric positive-definite matrices.
  4437. If :attr:`upper` is ``True``, the returned matrix ``U`` is upper-triangular, and
  4438. the decomposition has the form:
  4439. .. math::
  4440. A = U^TU
  4441. If :attr:`upper` is ``False``, the returned matrix ``L`` is lower-triangular, and
  4442. the decomposition has the form:
  4443. .. math::
  4444. A = LL^T
  4445. If :attr:`upper` is ``True``, and :math:`A` is a batch of symmetric positive-definite
  4446. matrices, then the returned tensor will be composed of upper-triangular Cholesky factors
  4447. of each of the individual matrices. Similarly, when :attr:`upper` is ``False``, the returned
  4448. tensor will be composed of lower-triangular Cholesky factors of each of the individual
  4449. matrices.
  4450. .. warning::
  4451. :func:`torch.cholesky` is deprecated in favor of :func:`torch.linalg.cholesky`
  4452. and will be removed in a future PyTorch release.
  4453. ``L = torch.cholesky(A)`` should be replaced with
  4454. .. code:: python
  4455. L = torch.linalg.cholesky(A)
  4456. ``U = torch.cholesky(A, upper=True)`` should be replaced with
  4457. .. code:: python
  4458. U = torch.linalg.cholesky(A).mH
  4459. This transform will produce equivalent results for all valid (symmetric positive definite) inputs.
  4460. Args:
  4461. input (Tensor): the input tensor :math:`A` of size :math:`(*, n, n)` where `*` is zero or more
  4462. batch dimensions consisting of symmetric positive-definite matrices.
  4463. upper (bool, optional): flag that indicates whether to return a
  4464. upper or lower triangular matrix. Default: ``False``
  4465. Keyword args:
  4466. out (Tensor, optional): the output matrix
  4467. Example::
  4468. >>> a = torch.randn(3, 3)
  4469. >>> a = a @ a.mT + 1e-3 # make symmetric positive-definite
  4470. >>> l = torch.cholesky(a)
  4471. >>> a
  4472. tensor([[ 2.4112, -0.7486, 1.4551],
  4473. [-0.7486, 1.3544, 0.1294],
  4474. [ 1.4551, 0.1294, 1.6724]])
  4475. >>> l
  4476. tensor([[ 1.5528, 0.0000, 0.0000],
  4477. [-0.4821, 1.0592, 0.0000],
  4478. [ 0.9371, 0.5487, 0.7023]])
  4479. >>> l @ l.mT
  4480. tensor([[ 2.4112, -0.7486, 1.4551],
  4481. [-0.7486, 1.3544, 0.1294],
  4482. [ 1.4551, 0.1294, 1.6724]])
  4483. >>> a = torch.randn(3, 2, 2) # Example for batched input
  4484. >>> a = a @ a.mT + 1e-03 # make symmetric positive-definite
  4485. >>> l = torch.cholesky(a)
  4486. >>> z = l @ l.mT
  4487. >>> torch.dist(z, a)
  4488. tensor(2.3842e-07)
  4489. """
  4490. ...
  4491. def cholesky_inverse(input: Tensor, upper: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
  4492. r"""
  4493. cholesky_inverse(L, upper=False, *, out=None) -> Tensor
  4494. Computes the inverse of a complex Hermitian or real symmetric
  4495. positive-definite matrix given its Cholesky decomposition.
  4496. Let :math:`A` be a complex Hermitian or real symmetric positive-definite matrix,
  4497. and :math:`L` its Cholesky decomposition such that:
  4498. .. math::
  4499. A = LL^{\text{H}}
  4500. where :math:`L^{\text{H}}` is the conjugate transpose when :math:`L` is complex,
  4501. and the transpose when :math:`L` is real-valued.
  4502. Computes the inverse matrix :math:`A^{-1}`.
  4503. Supports input of float, double, cfloat and cdouble dtypes.
  4504. Also supports batches of matrices, and if :math:`A` is a batch of matrices
  4505. then the output has the same batch dimensions.
  4506. Args:
  4507. L (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
  4508. consisting of lower or upper triangular Cholesky decompositions of
  4509. symmetric or Hermitian positive-definite matrices.
  4510. upper (bool, optional): flag that indicates whether :math:`L` is lower triangular
  4511. or upper triangular. Default: ``False``
  4512. Keyword args:
  4513. out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
  4514. Example::
  4515. >>> A = torch.randn(3, 3)
  4516. >>> A = A @ A.T + torch.eye(3) * 1e-3 # Creates a symmetric positive-definite matrix
  4517. >>> L = torch.linalg.cholesky(A) # Extract Cholesky decomposition
  4518. >>> torch.cholesky_inverse(L)
  4519. tensor([[ 1.9314, 1.2251, -0.0889],
  4520. [ 1.2251, 2.4439, 0.2122],
  4521. [-0.0889, 0.2122, 0.1412]])
  4522. >>> A.inverse()
  4523. tensor([[ 1.9314, 1.2251, -0.0889],
  4524. [ 1.2251, 2.4439, 0.2122],
  4525. [-0.0889, 0.2122, 0.1412]])
  4526. >>> A = torch.randn(3, 2, 2, dtype=torch.complex64)
  4527. >>> A = A @ A.mH + torch.eye(2) * 1e-3 # Batch of Hermitian positive-definite matrices
  4528. >>> L = torch.linalg.cholesky(A)
  4529. >>> torch.dist(torch.inverse(A), torch.cholesky_inverse(L))
  4530. tensor(5.6358e-7)
  4531. """
  4532. ...
  4533. def cholesky_solve(input: Tensor, input2: Tensor, upper: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
  4534. r"""
  4535. cholesky_solve(B, L, upper=False, *, out=None) -> Tensor
  4536. Computes the solution of a system of linear equations with complex Hermitian
  4537. or real symmetric positive-definite lhs given its Cholesky decomposition.
  4538. Let :math:`A` be a complex Hermitian or real symmetric positive-definite matrix,
  4539. and :math:`L` its Cholesky decomposition such that:
  4540. .. math::
  4541. A = LL^{\text{H}}
  4542. where :math:`L^{\text{H}}` is the conjugate transpose when :math:`L` is complex,
  4543. and the transpose when :math:`L` is real-valued.
  4544. Returns the solution :math:`X` of the following linear system:
  4545. .. math::
  4546. AX = B
  4547. Supports inputs of float, double, cfloat and cdouble dtypes.
  4548. Also supports batches of matrices, and if :math:`A` or :math:`B` is a batch of matrices
  4549. then the output has the same batch dimensions.
  4550. Args:
  4551. B (Tensor): right-hand side tensor of shape `(*, n, k)`
  4552. where :math:`*` is zero or more batch dimensions
  4553. L (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
  4554. consisting of lower or upper triangular Cholesky decompositions of
  4555. symmetric or Hermitian positive-definite matrices.
  4556. upper (bool, optional): flag that indicates whether :math:`L` is lower triangular
  4557. or upper triangular. Default: ``False``.
  4558. Keyword args:
  4559. out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
  4560. Example::
  4561. >>> A = torch.randn(3, 3)
  4562. >>> A = A @ A.T + torch.eye(3) * 1e-3 # Creates a symmetric positive-definite matrix
  4563. >>> L = torch.linalg.cholesky(A) # Extract Cholesky decomposition
  4564. >>> B = torch.randn(3, 2)
  4565. >>> torch.cholesky_solve(B, L)
  4566. tensor([[ -8.1625, 19.6097],
  4567. [ -5.8398, 14.2387],
  4568. [ -4.3771, 10.4173]])
  4569. >>> A.inverse() @ B
  4570. tensor([[ -8.1626, 19.6097],
  4571. [ -5.8398, 14.2387],
  4572. [ -4.3771, 10.4173]])
  4573. >>> A = torch.randn(3, 2, 2, dtype=torch.complex64)
  4574. >>> A = A @ A.mH + torch.eye(2) * 1e-3 # Batch of Hermitian positive-definite matrices
  4575. >>> L = torch.linalg.cholesky(A)
  4576. >>> B = torch.randn(2, 1, dtype=torch.complex64)
  4577. >>> X = torch.cholesky_solve(B, L)
  4578. >>> torch.dist(X, A.inverse() @ B)
  4579. tensor(1.6881e-5)
  4580. """
  4581. ...
  4582. def choose_qparams_optimized(input: Tensor, numel: _int, n_bins: _int, ratio: _float, bit_width: _int) -> Tuple[Tensor, Tensor]: ...
  4583. def chunk(input: Tensor, chunks: _int, dim: _int = 0) -> Tuple[Tensor, ...]:
  4584. r"""
  4585. chunk(input, chunks, dim=0) -> List of Tensors
  4586. Attempts to split a tensor into the specified number of chunks. Each chunk is a view of
  4587. the input tensor.
  4588. .. note::
  4589. This function may return fewer than the specified number of chunks!
  4590. .. seealso::
  4591. :func:`torch.tensor_split` a function that always returns exactly the specified number of chunks
  4592. If the tensor size along the given dimension :attr:`dim` is divisible by :attr:`chunks`,
  4593. all returned chunks will be the same size.
  4594. If the tensor size along the given dimension :attr:`dim` is not divisible by :attr:`chunks`,
  4595. all returned chunks will be the same size, except the last one.
  4596. If such division is not possible, this function may return fewer
  4597. than the specified number of chunks.
  4598. Arguments:
  4599. input (Tensor): the tensor to split
  4600. chunks (int): number of chunks to return
  4601. dim (int): dimension along which to split the tensor
  4602. Example:
  4603. >>> torch.arange(11).chunk(6)
  4604. (tensor([0, 1]),
  4605. tensor([2, 3]),
  4606. tensor([4, 5]),
  4607. tensor([6, 7]),
  4608. tensor([8, 9]),
  4609. tensor([10]))
  4610. >>> torch.arange(12).chunk(6)
  4611. (tensor([0, 1]),
  4612. tensor([2, 3]),
  4613. tensor([4, 5]),
  4614. tensor([6, 7]),
  4615. tensor([8, 9]),
  4616. tensor([10, 11]))
  4617. >>> torch.arange(13).chunk(6)
  4618. (tensor([0, 1, 2]),
  4619. tensor([3, 4, 5]),
  4620. tensor([6, 7, 8]),
  4621. tensor([ 9, 10, 11]),
  4622. tensor([12]))
  4623. """
  4624. ...
  4625. @overload
  4626. def clamp(input: Tensor, min: Optional[Tensor] = None, max: Optional[Tensor] = None, *, out: Optional[Tensor] = None) -> Tensor:
  4627. r"""
  4628. clamp(input, min=None, max=None, *, out=None) -> Tensor
  4629. Clamps all elements in :attr:`input` into the range `[` :attr:`min`, :attr:`max` `]`.
  4630. Letting min_value and max_value be :attr:`min` and :attr:`max`, respectively, this returns:
  4631. .. math::
  4632. y_i = \min(\max(x_i, \text{min\_value}_i), \text{max\_value}_i)
  4633. If :attr:`min` is ``None``, there is no lower bound.
  4634. Or, if :attr:`max` is ``None`` there is no upper bound.
  4635. .. note::
  4636. If :attr:`min` is greater than :attr:`max` :func:`torch.clamp(..., min, max) <torch.clamp>`
  4637. sets all elements in :attr:`input` to the value of :attr:`max`.
  4638. Args:
  4639. input (Tensor): the input tensor.
  4640. min (Number or Tensor, optional): lower-bound of the range to be clamped to
  4641. max (Number or Tensor, optional): upper-bound of the range to be clamped to
  4642. Keyword args:
  4643. out (Tensor, optional): the output tensor.
  4644. Example::
  4645. >>> a = torch.randn(4)
  4646. >>> a
  4647. tensor([-1.7120, 0.1734, -0.0478, -0.0922])
  4648. >>> torch.clamp(a, min=-0.5, max=0.5)
  4649. tensor([-0.5000, 0.1734, -0.0478, -0.0922])
  4650. >>> min = torch.linspace(-1, 1, steps=4)
  4651. >>> torch.clamp(a, min=min)
  4652. tensor([-1.0000, 0.1734, 0.3333, 1.0000])
  4653. """
  4654. ...
  4655. @overload
  4656. def clamp(input: Tensor, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None, *, out: Optional[Tensor] = None) -> Tensor:
  4657. r"""
  4658. clamp(input, min=None, max=None, *, out=None) -> Tensor
  4659. Clamps all elements in :attr:`input` into the range `[` :attr:`min`, :attr:`max` `]`.
  4660. Letting min_value and max_value be :attr:`min` and :attr:`max`, respectively, this returns:
  4661. .. math::
  4662. y_i = \min(\max(x_i, \text{min\_value}_i), \text{max\_value}_i)
  4663. If :attr:`min` is ``None``, there is no lower bound.
  4664. Or, if :attr:`max` is ``None`` there is no upper bound.
  4665. .. note::
  4666. If :attr:`min` is greater than :attr:`max` :func:`torch.clamp(..., min, max) <torch.clamp>`
  4667. sets all elements in :attr:`input` to the value of :attr:`max`.
  4668. Args:
  4669. input (Tensor): the input tensor.
  4670. min (Number or Tensor, optional): lower-bound of the range to be clamped to
  4671. max (Number or Tensor, optional): upper-bound of the range to be clamped to
  4672. Keyword args:
  4673. out (Tensor, optional): the output tensor.
  4674. Example::
  4675. >>> a = torch.randn(4)
  4676. >>> a
  4677. tensor([-1.7120, 0.1734, -0.0478, -0.0922])
  4678. >>> torch.clamp(a, min=-0.5, max=0.5)
  4679. tensor([-0.5000, 0.1734, -0.0478, -0.0922])
  4680. >>> min = torch.linspace(-1, 1, steps=4)
  4681. >>> torch.clamp(a, min=min)
  4682. tensor([-1.0000, 0.1734, 0.3333, 1.0000])
  4683. """
  4684. ...
  4685. @overload
  4686. def clamp_(input: Tensor, min: Optional[Tensor] = None, max: Optional[Tensor] = None) -> Tensor: ...
  4687. @overload
  4688. def clamp_(input: Tensor, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None) -> Tensor: ...
  4689. @overload
  4690. def clamp_max(input: Tensor, max: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
  4691. @overload
  4692. def clamp_max(input: Tensor, max: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
  4693. @overload
  4694. def clamp_max_(input: Tensor, max: Tensor) -> Tensor: ...
  4695. @overload
  4696. def clamp_max_(input: Tensor, max: Union[Number, _complex]) -> Tensor: ...
  4697. @overload
  4698. def clamp_min(input: Tensor, min: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
  4699. @overload
  4700. def clamp_min(input: Tensor, min: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
  4701. @overload
  4702. def clamp_min_(input: Tensor, min: Tensor) -> Tensor: ...
  4703. @overload
  4704. def clamp_min_(input: Tensor, min: Union[Number, _complex]) -> Tensor: ...
  4705. @overload
  4706. def clip(input: Tensor, min: Optional[Tensor] = None, max: Optional[Tensor] = None, *, out: Optional[Tensor] = None) -> Tensor:
  4707. r"""
  4708. clip(input, min=None, max=None, *, out=None) -> Tensor
  4709. Alias for :func:`torch.clamp`.
  4710. """
  4711. ...
  4712. @overload
  4713. def clip(input: Tensor, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None, *, out: Optional[Tensor] = None) -> Tensor:
  4714. r"""
  4715. clip(input, min=None, max=None, *, out=None) -> Tensor
  4716. Alias for :func:`torch.clamp`.
  4717. """
  4718. ...
  4719. @overload
  4720. def clip_(input: Tensor, min: Optional[Tensor] = None, max: Optional[Tensor] = None) -> Tensor: ...
  4721. @overload
  4722. def clip_(input: Tensor, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None) -> Tensor: ...
  4723. def clone(input: Tensor, *, memory_format: Optional[memory_format] = None) -> Tensor:
  4724. r"""
  4725. clone(input, *, memory_format=torch.preserve_format) -> Tensor
  4726. Returns a copy of :attr:`input`.
  4727. .. note::
  4728. This function is differentiable, so gradients will flow back from the
  4729. result of this operation to :attr:`input`. To create a tensor without an
  4730. autograd relationship to :attr:`input` see :meth:`~Tensor.detach`.
  4731. Args:
  4732. input (Tensor): the input tensor.
  4733. Keyword args:
  4734. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  4735. returned tensor. Default: ``torch.preserve_format``.
  4736. """
  4737. ...
  4738. def col_indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  4739. r"""
  4740. Performs the same operation as :func:`torch.col_indices`, but all output tensors
  4741. are freshly created instead of aliasing the input.
  4742. """
  4743. ...
  4744. def column_stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor] = None) -> Tensor:
  4745. r"""
  4746. column_stack(tensors, *, out=None) -> Tensor
  4747. Creates a new tensor by horizontally stacking the tensors in :attr:`tensors`.
  4748. Equivalent to ``torch.hstack(tensors)``, except each zero or one dimensional tensor ``t``
  4749. in :attr:`tensors` is first reshaped into a ``(t.numel(), 1)`` column before being stacked horizontally.
  4750. Args:
  4751. tensors (sequence of Tensors): sequence of tensors to concatenate
  4752. Keyword args:
  4753. out (Tensor, optional): the output tensor.
  4754. Example::
  4755. >>> a = torch.tensor([1, 2, 3])
  4756. >>> b = torch.tensor([4, 5, 6])
  4757. >>> torch.column_stack((a, b))
  4758. tensor([[1, 4],
  4759. [2, 5],
  4760. [3, 6]])
  4761. >>> a = torch.arange(5)
  4762. >>> b = torch.arange(10).reshape(5, 2)
  4763. >>> torch.column_stack((a, b, b))
  4764. tensor([[0, 0, 1, 0, 1],
  4765. [1, 2, 3, 2, 3],
  4766. [2, 4, 5, 4, 5],
  4767. [3, 6, 7, 6, 7],
  4768. [4, 8, 9, 8, 9]])
  4769. """
  4770. ...
  4771. def combinations(input: Tensor, r: _int = 2, with_replacement: _bool = False) -> Tensor:
  4772. r"""
  4773. combinations(input, r=2, with_replacement=False) -> seq
  4774. Compute combinations of length :math:`r` of the given tensor. The behavior is similar to
  4775. python's `itertools.combinations` when `with_replacement` is set to `False`, and
  4776. `itertools.combinations_with_replacement` when `with_replacement` is set to `True`.
  4777. Arguments:
  4778. input (Tensor): 1D vector.
  4779. r (int, optional): number of elements to combine
  4780. with_replacement (bool, optional): whether to allow duplication in combination
  4781. Returns:
  4782. Tensor: A tensor equivalent to converting all the input tensors into lists, do
  4783. `itertools.combinations` or `itertools.combinations_with_replacement` on these
  4784. lists, and finally convert the resulting list into tensor.
  4785. Example::
  4786. >>> a = [1, 2, 3]
  4787. >>> list(itertools.combinations(a, r=2))
  4788. [(1, 2), (1, 3), (2, 3)]
  4789. >>> list(itertools.combinations(a, r=3))
  4790. [(1, 2, 3)]
  4791. >>> list(itertools.combinations_with_replacement(a, r=2))
  4792. [(1, 1), (1, 2), (1, 3), (2, 2), (2, 3), (3, 3)]
  4793. >>> tensor_a = torch.tensor(a)
  4794. >>> torch.combinations(tensor_a)
  4795. tensor([[1, 2],
  4796. [1, 3],
  4797. [2, 3]])
  4798. >>> torch.combinations(tensor_a, r=3)
  4799. tensor([[1, 2, 3]])
  4800. >>> torch.combinations(tensor_a, with_replacement=True)
  4801. tensor([[1, 1],
  4802. [1, 2],
  4803. [1, 3],
  4804. [2, 2],
  4805. [2, 3],
  4806. [3, 3]])
  4807. """
  4808. ...
  4809. def complex(real: Tensor, imag: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  4810. r"""
  4811. complex(real, imag, *, out=None) -> Tensor
  4812. Constructs a complex tensor with its real part equal to :attr:`real` and its
  4813. imaginary part equal to :attr:`imag`.
  4814. Args:
  4815. real (Tensor): The real part of the complex tensor. Must be half, float or double.
  4816. imag (Tensor): The imaginary part of the complex tensor. Must be same dtype
  4817. as :attr:`real`.
  4818. Keyword args:
  4819. out (Tensor): If the inputs are ``torch.float32``, must be
  4820. ``torch.complex64``. If the inputs are ``torch.float64``, must be
  4821. ``torch.complex128``.
  4822. Example::
  4823. >>> real = torch.tensor([1, 2], dtype=torch.float32)
  4824. >>> imag = torch.tensor([3, 4], dtype=torch.float32)
  4825. >>> z = torch.complex(real, imag)
  4826. >>> z
  4827. tensor([(1.+3.j), (2.+4.j)])
  4828. >>> z.dtype
  4829. torch.complex64
  4830. """
  4831. ...
  4832. @overload
  4833. def concat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int = 0, *, out: Optional[Tensor] = None) -> Tensor:
  4834. r"""
  4835. concat(tensors, dim=0, *, out=None) -> Tensor
  4836. Alias of :func:`torch.cat`.
  4837. """
  4838. ...
  4839. @overload
  4840. def concat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: Union[str, ellipsis, None], *, out: Optional[Tensor] = None) -> Tensor:
  4841. r"""
  4842. concat(tensors, dim=0, *, out=None) -> Tensor
  4843. Alias of :func:`torch.cat`.
  4844. """
  4845. ...
  4846. @overload
  4847. def concatenate(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int = 0, *, out: Optional[Tensor] = None) -> Tensor:
  4848. r"""
  4849. concatenate(tensors, axis=0, out=None) -> Tensor
  4850. Alias of :func:`torch.cat`.
  4851. """
  4852. ...
  4853. @overload
  4854. def concatenate(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: Union[str, ellipsis, None], *, out: Optional[Tensor] = None) -> Tensor:
  4855. r"""
  4856. concatenate(tensors, axis=0, out=None) -> Tensor
  4857. Alias of :func:`torch.cat`.
  4858. """
  4859. ...
  4860. def conj(input: Tensor) -> Tensor:
  4861. r"""
  4862. conj(input) -> Tensor
  4863. Returns a view of :attr:`input` with a flipped conjugate bit. If :attr:`input` has a non-complex dtype,
  4864. this function just returns :attr:`input`.
  4865. .. note::
  4866. :func:`torch.conj` performs a lazy conjugation, but the actual conjugated tensor can be materialized
  4867. at any time using :func:`torch.resolve_conj`.
  4868. .. warning:: In the future, :func:`torch.conj` may return a non-writeable view for an :attr:`input` of
  4869. non-complex dtype. It's recommended that programs not modify the tensor returned by :func:`torch.conj_physical`
  4870. when :attr:`input` is of non-complex dtype to be compatible with this change.
  4871. Args:
  4872. input (Tensor): the input tensor.
  4873. Example::
  4874. >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])
  4875. >>> x.is_conj()
  4876. False
  4877. >>> y = torch.conj(x)
  4878. >>> y.is_conj()
  4879. True
  4880. """
  4881. ...
  4882. def conj_physical(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  4883. r"""
  4884. conj_physical(input, *, out=None) -> Tensor
  4885. Computes the element-wise conjugate of the given :attr:`input` tensor.
  4886. If :attr:`input` has a non-complex dtype, this function just returns :attr:`input`.
  4887. .. note::
  4888. This performs the conjugate operation regardless of the fact conjugate bit is set or not.
  4889. .. warning:: In the future, :func:`torch.conj_physical` may return a non-writeable view for an :attr:`input` of
  4890. non-complex dtype. It's recommended that programs not modify the tensor returned by :func:`torch.conj_physical`
  4891. when :attr:`input` is of non-complex dtype to be compatible with this change.
  4892. .. math::
  4893. \text{out}_{i} = conj(\text{input}_{i})
  4894. Args:
  4895. input (Tensor): the input tensor.
  4896. Keyword args:
  4897. out (Tensor, optional): the output tensor.
  4898. Example::
  4899. >>> torch.conj_physical(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))
  4900. tensor([-1 - 1j, -2 - 2j, 3 + 3j])
  4901. """
  4902. ...
  4903. def conj_physical_(input: Tensor) -> Tensor: ...
  4904. def constant_pad_nd(input: Tensor, pad: Sequence[Union[_int, SymInt]], value: Union[Number, _complex] = 0) -> Tensor: ...
  4905. @overload
  4906. def conv1d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ...
  4907. @overload
  4908. def conv1d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: str = "valid", dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ...
  4909. @overload
  4910. def conv2d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ...
  4911. @overload
  4912. def conv2d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: str = "valid", dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ...
  4913. @overload
  4914. def conv3d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ...
  4915. @overload
  4916. def conv3d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: str = "valid", dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ...
  4917. def conv_tbc(input: Tensor, weight: Tensor, bias: Tensor, pad: _int = 0) -> Tensor: ...
  4918. def conv_transpose1d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, output_padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, groups: Union[_int, SymInt] = 1, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1) -> Tensor: ...
  4919. def conv_transpose2d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, output_padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, groups: Union[_int, SymInt] = 1, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1) -> Tensor: ...
  4920. def conv_transpose3d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, output_padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, groups: Union[_int, SymInt] = 1, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1) -> Tensor: ...
  4921. def convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], transposed: _bool, output_padding: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
  4922. @overload
  4923. def copysign(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  4924. r"""
  4925. copysign(input, other, *, out=None) -> Tensor
  4926. Create a new floating-point tensor with the magnitude of :attr:`input` and the sign of :attr:`other`, elementwise.
  4927. .. math::
  4928. \text{out}_{i} = \begin{cases}
  4929. -|\text{input}_{i}| & \text{if } \text{other}_{i} \leq -0.0 \\
  4930. |\text{input}_{i}| & \text{if } \text{other}_{i} \geq 0.0 \\
  4931. \end{cases}
  4932. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
  4933. and integer and float inputs.
  4934. Args:
  4935. input (Tensor): magnitudes.
  4936. other (Tensor or Number): contains value(s) whose signbit(s) are
  4937. applied to the magnitudes in :attr:`input`.
  4938. Keyword args:
  4939. out (Tensor, optional): the output tensor.
  4940. Example::
  4941. >>> a = torch.randn(5)
  4942. >>> a
  4943. tensor([-1.2557, -0.0026, -0.5387, 0.4740, -0.9244])
  4944. >>> torch.copysign(a, 1)
  4945. tensor([1.2557, 0.0026, 0.5387, 0.4740, 0.9244])
  4946. >>> a = torch.randn(4, 4)
  4947. >>> a
  4948. tensor([[ 0.7079, 0.2778, -1.0249, 0.5719],
  4949. [-0.0059, -0.2600, -0.4475, -1.3948],
  4950. [ 0.3667, -0.9567, -2.5757, -0.1751],
  4951. [ 0.2046, -0.0742, 0.2998, -0.1054]])
  4952. >>> b = torch.randn(4)
  4953. tensor([ 0.2373, 0.3120, 0.3190, -1.1128])
  4954. >>> torch.copysign(a, b)
  4955. tensor([[ 0.7079, 0.2778, 1.0249, -0.5719],
  4956. [ 0.0059, 0.2600, 0.4475, -1.3948],
  4957. [ 0.3667, 0.9567, 2.5757, -0.1751],
  4958. [ 0.2046, 0.0742, 0.2998, -0.1054]])
  4959. >>> a = torch.tensor([1.])
  4960. >>> b = torch.tensor([-0.])
  4961. >>> torch.copysign(a, b)
  4962. tensor([-1.])
  4963. .. note::
  4964. copysign handles signed zeros. If the other argument has a negative zero (-0),
  4965. the corresponding output value will be negative.
  4966. """
  4967. ...
  4968. @overload
  4969. def copysign(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
  4970. r"""
  4971. copysign(input, other, *, out=None) -> Tensor
  4972. Create a new floating-point tensor with the magnitude of :attr:`input` and the sign of :attr:`other`, elementwise.
  4973. .. math::
  4974. \text{out}_{i} = \begin{cases}
  4975. -|\text{input}_{i}| & \text{if } \text{other}_{i} \leq -0.0 \\
  4976. |\text{input}_{i}| & \text{if } \text{other}_{i} \geq 0.0 \\
  4977. \end{cases}
  4978. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
  4979. and integer and float inputs.
  4980. Args:
  4981. input (Tensor): magnitudes.
  4982. other (Tensor or Number): contains value(s) whose signbit(s) are
  4983. applied to the magnitudes in :attr:`input`.
  4984. Keyword args:
  4985. out (Tensor, optional): the output tensor.
  4986. Example::
  4987. >>> a = torch.randn(5)
  4988. >>> a
  4989. tensor([-1.2557, -0.0026, -0.5387, 0.4740, -0.9244])
  4990. >>> torch.copysign(a, 1)
  4991. tensor([1.2557, 0.0026, 0.5387, 0.4740, 0.9244])
  4992. >>> a = torch.randn(4, 4)
  4993. >>> a
  4994. tensor([[ 0.7079, 0.2778, -1.0249, 0.5719],
  4995. [-0.0059, -0.2600, -0.4475, -1.3948],
  4996. [ 0.3667, -0.9567, -2.5757, -0.1751],
  4997. [ 0.2046, -0.0742, 0.2998, -0.1054]])
  4998. >>> b = torch.randn(4)
  4999. tensor([ 0.2373, 0.3120, 0.3190, -1.1128])
  5000. >>> torch.copysign(a, b)
  5001. tensor([[ 0.7079, 0.2778, 1.0249, -0.5719],
  5002. [ 0.0059, 0.2600, 0.4475, -1.3948],
  5003. [ 0.3667, 0.9567, 2.5757, -0.1751],
  5004. [ 0.2046, 0.0742, 0.2998, -0.1054]])
  5005. >>> a = torch.tensor([1.])
  5006. >>> b = torch.tensor([-0.])
  5007. >>> torch.copysign(a, b)
  5008. tensor([-1.])
  5009. .. note::
  5010. copysign handles signed zeros. If the other argument has a negative zero (-0),
  5011. the corresponding output value will be negative.
  5012. """
  5013. ...
  5014. def corrcoef(input: Tensor) -> Tensor:
  5015. r"""
  5016. corrcoef(input) -> Tensor
  5017. Estimates the Pearson product-moment correlation coefficient matrix of the variables given by the :attr:`input` matrix,
  5018. where rows are the variables and columns are the observations.
  5019. .. note::
  5020. The correlation coefficient matrix R is computed using the covariance matrix C as given by
  5021. :math:`R_{ij} = \frac{ C_{ij} } { \sqrt{ C_{ii} * C_{jj} } }`
  5022. .. note::
  5023. Due to floating point rounding, the resulting array may not be Hermitian and its diagonal elements may not be 1.
  5024. The real and imaginary values are clipped to the interval [-1, 1] in an attempt to improve this situation.
  5025. Args:
  5026. input (Tensor): A 2D matrix containing multiple variables and observations, or a
  5027. Scalar or 1D vector representing a single variable.
  5028. Returns:
  5029. (Tensor) The correlation coefficient matrix of the variables.
  5030. .. seealso::
  5031. :func:`torch.cov` covariance matrix.
  5032. Example::
  5033. >>> x = torch.tensor([[0, 1, 2], [2, 1, 0]])
  5034. >>> torch.corrcoef(x)
  5035. tensor([[ 1., -1.],
  5036. [-1., 1.]])
  5037. >>> x = torch.randn(2, 4)
  5038. >>> x
  5039. tensor([[-0.2678, -0.0908, -0.3766, 0.2780],
  5040. [-0.5812, 0.1535, 0.2387, 0.2350]])
  5041. >>> torch.corrcoef(x)
  5042. tensor([[1.0000, 0.3582],
  5043. [0.3582, 1.0000]])
  5044. >>> torch.corrcoef(x[0])
  5045. tensor(1.)
  5046. """
  5047. ...
  5048. def cos(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  5049. r"""
  5050. cos(input, *, out=None) -> Tensor
  5051. Returns a new tensor with the cosine of the elements of :attr:`input`.
  5052. .. math::
  5053. \text{out}_{i} = \cos(\text{input}_{i})
  5054. Args:
  5055. input (Tensor): the input tensor.
  5056. Keyword args:
  5057. out (Tensor, optional): the output tensor.
  5058. Example::
  5059. >>> a = torch.randn(4)
  5060. >>> a
  5061. tensor([ 1.4309, 1.2706, -0.8562, 0.9796])
  5062. >>> torch.cos(a)
  5063. tensor([ 0.1395, 0.2957, 0.6553, 0.5574])
  5064. """
  5065. ...
  5066. def cos_(input: Tensor) -> Tensor: ...
  5067. def cosh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  5068. r"""
  5069. cosh(input, *, out=None) -> Tensor
  5070. Returns a new tensor with the hyperbolic cosine of the elements of
  5071. :attr:`input`.
  5072. .. math::
  5073. \text{out}_{i} = \cosh(\text{input}_{i})
  5074. Args:
  5075. input (Tensor): the input tensor.
  5076. Keyword args:
  5077. out (Tensor, optional): the output tensor.
  5078. Example::
  5079. >>> a = torch.randn(4)
  5080. >>> a
  5081. tensor([ 0.1632, 1.1835, -0.6979, -0.7325])
  5082. >>> torch.cosh(a)
  5083. tensor([ 1.0133, 1.7860, 1.2536, 1.2805])
  5084. .. note::
  5085. When :attr:`input` is on the CPU, the implementation of torch.cosh may use
  5086. the Sleef library, which rounds very large results to infinity or negative
  5087. infinity. See `here <https://sleef.org/purec.xhtml>`_ for details.
  5088. """
  5089. ...
  5090. def cosh_(input: Tensor) -> Tensor: ...
  5091. def cosine_embedding_loss(input1: Tensor, input2: Tensor, target: Tensor, margin: _float = 0.0, reduction: _int = 1) -> Tensor: ...
  5092. def cosine_similarity(x1: Tensor, x2: Tensor, dim: _int = 1, eps: _float = 1e-08) -> Tensor: ...
  5093. @overload
  5094. def count_nonzero(input: Tensor, dim: Optional[_int] = None) -> Tensor:
  5095. r"""
  5096. count_nonzero(input, dim=None) -> Tensor
  5097. Counts the number of non-zero values in the tensor :attr:`input` along the given :attr:`dim`.
  5098. If no dim is specified then all non-zeros in the tensor are counted.
  5099. Args:
  5100. input (Tensor): the input tensor.
  5101. dim (int or tuple of ints, optional): Dim or tuple of dims along which to count non-zeros.
  5102. Example::
  5103. >>> x = torch.zeros(3,3)
  5104. >>> x[torch.randn(3,3) > 0.5] = 1
  5105. >>> x
  5106. tensor([[0., 1., 1.],
  5107. [0., 0., 0.],
  5108. [0., 0., 1.]])
  5109. >>> torch.count_nonzero(x)
  5110. tensor(3)
  5111. >>> torch.count_nonzero(x, dim=0)
  5112. tensor([0, 1, 2])
  5113. """
  5114. ...
  5115. @overload
  5116. def count_nonzero(input: Tensor, dim: _size) -> Tensor:
  5117. r"""
  5118. count_nonzero(input, dim=None) -> Tensor
  5119. Counts the number of non-zero values in the tensor :attr:`input` along the given :attr:`dim`.
  5120. If no dim is specified then all non-zeros in the tensor are counted.
  5121. Args:
  5122. input (Tensor): the input tensor.
  5123. dim (int or tuple of ints, optional): Dim or tuple of dims along which to count non-zeros.
  5124. Example::
  5125. >>> x = torch.zeros(3,3)
  5126. >>> x[torch.randn(3,3) > 0.5] = 1
  5127. >>> x
  5128. tensor([[0., 1., 1.],
  5129. [0., 0., 0.],
  5130. [0., 0., 1.]])
  5131. >>> torch.count_nonzero(x)
  5132. tensor(3)
  5133. >>> torch.count_nonzero(x, dim=0)
  5134. tensor([0, 1, 2])
  5135. """
  5136. ...
  5137. def cov(input: Tensor, *, correction: _int = 1, fweights: Optional[Tensor] = None, aweights: Optional[Tensor] = None) -> Tensor:
  5138. r"""
  5139. cov(input, *, correction=1, fweights=None, aweights=None) -> Tensor
  5140. Estimates the covariance matrix of the variables given by the :attr:`input` matrix, where rows are
  5141. the variables and columns are the observations.
  5142. A covariance matrix is a square matrix giving the covariance of each pair of variables. The diagonal contains
  5143. the variance of each variable (covariance of a variable with itself). By definition, if :attr:`input` represents
  5144. a single variable (Scalar or 1D) then its variance is returned.
  5145. The sample covariance of the variables :math:`x` and :math:`y` is given by:
  5146. .. math::
  5147. \text{cov}(x,y) = \frac{\sum^{N}_{i = 1}(x_{i} - \bar{x})(y_{i} - \bar{y})}{\max(0,~N~-~\delta N)}
  5148. where :math:`\bar{x}` and :math:`\bar{y}` are the simple means of the :math:`x` and :math:`y` respectively, and
  5149. :math:`\delta N` is the :attr:`correction`.
  5150. If :attr:`fweights` and/or :attr:`aweights` are provided, the weighted covariance
  5151. is calculated, which is given by:
  5152. .. math::
  5153. \text{cov}_w(x,y) = \frac{\sum^{N}_{i = 1}w_i(x_{i} - \mu_x^*)(y_{i} - \mu_y^*)}
  5154. {\max(0,~\sum^{N}_{i = 1}w_i~-~\frac{\sum^{N}_{i = 1}w_ia_i}{\sum^{N}_{i = 1}w_i}~\delta N)}
  5155. where :math:`w` denotes :attr:`fweights` or :attr:`aweights` (``f`` and ``a`` for brevity) based on whichever is
  5156. provided, or :math:`w = f \times a` if both are provided, and
  5157. :math:`\mu_x^* = \frac{\sum^{N}_{i = 1}w_ix_{i} }{\sum^{N}_{i = 1}w_i}` is the weighted mean of the variable. If not
  5158. provided, ``f`` and/or ``a`` can be seen as a :math:`\mathbb{1}` vector of appropriate size.
  5159. Args:
  5160. input (Tensor): A 2D matrix containing multiple variables and observations, or a
  5161. Scalar or 1D vector representing a single variable.
  5162. Keyword Args:
  5163. correction (int, optional): difference between the sample size and sample degrees of freedom.
  5164. Defaults to Bessel's correction, ``correction = 1`` which returns the unbiased estimate,
  5165. even if both :attr:`fweights` and :attr:`aweights` are specified. ``correction = 0``
  5166. will return the simple average. Defaults to ``1``.
  5167. fweights (tensor, optional): A Scalar or 1D tensor of observation vector frequencies representing the number of
  5168. times each observation should be repeated. Its numel must equal the number of columns of :attr:`input`.
  5169. Must have integral dtype. Ignored if ``None``. Defaults to ``None``.
  5170. aweights (tensor, optional): A Scalar or 1D array of observation vector weights.
  5171. These relative weights are typically large for observations considered "important" and smaller for
  5172. observations considered less "important". Its numel must equal the number of columns of :attr:`input`.
  5173. Must have floating point dtype. Ignored if ``None``. Defaults to ``None``.
  5174. Returns:
  5175. (Tensor) The covariance matrix of the variables.
  5176. .. seealso::
  5177. :func:`torch.corrcoef` normalized covariance matrix.
  5178. Example::
  5179. >>> x = torch.tensor([[0, 2], [1, 1], [2, 0]]).T
  5180. >>> x
  5181. tensor([[0, 1, 2],
  5182. [2, 1, 0]])
  5183. >>> torch.cov(x)
  5184. tensor([[ 1., -1.],
  5185. [-1., 1.]])
  5186. >>> torch.cov(x, correction=0)
  5187. tensor([[ 0.6667, -0.6667],
  5188. [-0.6667, 0.6667]])
  5189. >>> fw = torch.randint(1, 10, (3,))
  5190. >>> fw
  5191. tensor([1, 6, 9])
  5192. >>> aw = torch.rand(3)
  5193. >>> aw
  5194. tensor([0.4282, 0.0255, 0.4144])
  5195. >>> torch.cov(x, fweights=fw, aweights=aw)
  5196. tensor([[ 0.4169, -0.4169],
  5197. [-0.4169, 0.4169]])
  5198. """
  5199. ...
  5200. def cross(input: Tensor, other: Tensor, dim: Optional[_int] = None, *, out: Optional[Tensor] = None) -> Tensor:
  5201. r"""
  5202. cross(input, other, dim=None, *, out=None) -> Tensor
  5203. Returns the cross product of vectors in dimension :attr:`dim` of :attr:`input`
  5204. and :attr:`other`.
  5205. Supports input of float, double, cfloat and cdouble dtypes. Also supports batches
  5206. of vectors, for which it computes the product along the dimension :attr:`dim`.
  5207. In this case, the output has the same batch dimensions as the inputs.
  5208. .. warning::
  5209. If :attr:`dim` is not given, it defaults to the first dimension found
  5210. with the size 3. Note that this might be unexpected.
  5211. This behavior is deprecated and will be changed to match that of :func:`torch.linalg.cross`
  5212. in a future release.
  5213. .. seealso::
  5214. :func:`torch.linalg.cross` which has dim=-1 as default.
  5215. Args:
  5216. input (Tensor): the input tensor.
  5217. other (Tensor): the second input tensor
  5218. dim (int, optional): the dimension to take the cross-product in.
  5219. Keyword args:
  5220. out (Tensor, optional): the output tensor.
  5221. Example::
  5222. >>> a = torch.randn(4, 3)
  5223. >>> a
  5224. tensor([[-0.3956, 1.1455, 1.6895],
  5225. [-0.5849, 1.3672, 0.3599],
  5226. [-1.1626, 0.7180, -0.0521],
  5227. [-0.1339, 0.9902, -2.0225]])
  5228. >>> b = torch.randn(4, 3)
  5229. >>> b
  5230. tensor([[-0.0257, -1.4725, -1.2251],
  5231. [-1.1479, -0.7005, -1.9757],
  5232. [-1.3904, 0.3726, -1.1836],
  5233. [-0.9688, -0.7153, 0.2159]])
  5234. >>> torch.cross(a, b, dim=1)
  5235. tensor([[ 1.0844, -0.5281, 0.6120],
  5236. [-2.4490, -1.5687, 1.9792],
  5237. [-0.8304, -1.3037, 0.5650],
  5238. [-1.2329, 1.9883, 1.0551]])
  5239. >>> torch.cross(a, b)
  5240. tensor([[ 1.0844, -0.5281, 0.6120],
  5241. [-2.4490, -1.5687, 1.9792],
  5242. [-0.8304, -1.3037, 0.5650],
  5243. [-1.2329, 1.9883, 1.0551]])
  5244. """
  5245. ...
  5246. def crow_indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  5247. r"""
  5248. Performs the same operation as :func:`torch.crow_indices`, but all output tensors
  5249. are freshly created instead of aliasing the input.
  5250. """
  5251. ...
  5252. @overload
  5253. def ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int = 0, reduction: _int = 1, zero_infinity: _bool = False) -> Tensor: ...
  5254. @overload
  5255. def ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int = 0, reduction: _int = 1, zero_infinity: _bool = False) -> Tensor: ...
  5256. def cudnn_affine_grid_generator(theta: Tensor, N: _int, C: _int, H: _int, W: _int) -> Tensor: ...
  5257. def cudnn_batch_norm(input: Tensor, weight: Tensor, bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, exponential_average_factor: _float, epsilon: _float) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
  5258. def cudnn_convolution(input: Tensor, weight: Tensor, padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool, allow_tf32: _bool, *, out: Optional[Tensor] = None) -> Tensor: ...
  5259. def cudnn_convolution_add_relu(input: Tensor, weight: Tensor, z: Tensor, alpha: Optional[Union[Number, _complex]], bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
  5260. def cudnn_convolution_relu(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
  5261. def cudnn_convolution_transpose(input: Tensor, weight: Tensor, padding: Sequence[Union[_int, SymInt]], output_padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool, allow_tf32: _bool) -> Tensor: ...
  5262. def cudnn_grid_sampler(input: Tensor, grid: Tensor) -> Tensor: ...
  5263. def cudnn_is_acceptable(input: Tensor) -> _bool: ...
  5264. @overload
  5265. def cummax(input: Tensor, dim: _int, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.cummax:
  5266. r"""
  5267. cummax(input, dim, *, out=None) -> (Tensor, LongTensor)
  5268. Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative maximum of
  5269. elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index
  5270. location of each maximum value found in the dimension :attr:`dim`.
  5271. .. math::
  5272. y_i = max(x_1, x_2, x_3, \dots, x_i)
  5273. Args:
  5274. input (Tensor): the input tensor.
  5275. dim (int): the dimension to do the operation over
  5276. Keyword args:
  5277. out (tuple, optional): the result tuple of two output tensors (values, indices)
  5278. Example::
  5279. >>> a = torch.randn(10)
  5280. >>> a
  5281. tensor([-0.3449, -1.5447, 0.0685, -1.5104, -1.1706, 0.2259, 1.4696, -1.3284,
  5282. 1.9946, -0.8209])
  5283. >>> torch.cummax(a, dim=0)
  5284. torch.return_types.cummax(
  5285. values=tensor([-0.3449, -0.3449, 0.0685, 0.0685, 0.0685, 0.2259, 1.4696, 1.4696,
  5286. 1.9946, 1.9946]),
  5287. indices=tensor([0, 0, 2, 2, 2, 5, 6, 6, 8, 8]))
  5288. """
  5289. ...
  5290. @overload
  5291. def cummax(input: Tensor, dim: Union[str, ellipsis, None], *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.cummax:
  5292. r"""
  5293. cummax(input, dim, *, out=None) -> (Tensor, LongTensor)
  5294. Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative maximum of
  5295. elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index
  5296. location of each maximum value found in the dimension :attr:`dim`.
  5297. .. math::
  5298. y_i = max(x_1, x_2, x_3, \dots, x_i)
  5299. Args:
  5300. input (Tensor): the input tensor.
  5301. dim (int): the dimension to do the operation over
  5302. Keyword args:
  5303. out (tuple, optional): the result tuple of two output tensors (values, indices)
  5304. Example::
  5305. >>> a = torch.randn(10)
  5306. >>> a
  5307. tensor([-0.3449, -1.5447, 0.0685, -1.5104, -1.1706, 0.2259, 1.4696, -1.3284,
  5308. 1.9946, -0.8209])
  5309. >>> torch.cummax(a, dim=0)
  5310. torch.return_types.cummax(
  5311. values=tensor([-0.3449, -0.3449, 0.0685, 0.0685, 0.0685, 0.2259, 1.4696, 1.4696,
  5312. 1.9946, 1.9946]),
  5313. indices=tensor([0, 0, 2, 2, 2, 5, 6, 6, 8, 8]))
  5314. """
  5315. ...
  5316. @overload
  5317. def cummin(input: Tensor, dim: _int, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.cummin:
  5318. r"""
  5319. cummin(input, dim, *, out=None) -> (Tensor, LongTensor)
  5320. Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative minimum of
  5321. elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index
  5322. location of each maximum value found in the dimension :attr:`dim`.
  5323. .. math::
  5324. y_i = min(x_1, x_2, x_3, \dots, x_i)
  5325. Args:
  5326. input (Tensor): the input tensor.
  5327. dim (int): the dimension to do the operation over
  5328. Keyword args:
  5329. out (tuple, optional): the result tuple of two output tensors (values, indices)
  5330. Example::
  5331. >>> a = torch.randn(10)
  5332. >>> a
  5333. tensor([-0.2284, -0.6628, 0.0975, 0.2680, -1.3298, -0.4220, -0.3885, 1.1762,
  5334. 0.9165, 1.6684])
  5335. >>> torch.cummin(a, dim=0)
  5336. torch.return_types.cummin(
  5337. values=tensor([-0.2284, -0.6628, -0.6628, -0.6628, -1.3298, -1.3298, -1.3298, -1.3298,
  5338. -1.3298, -1.3298]),
  5339. indices=tensor([0, 1, 1, 1, 4, 4, 4, 4, 4, 4]))
  5340. """
  5341. ...
  5342. @overload
  5343. def cummin(input: Tensor, dim: Union[str, ellipsis, None], *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.cummin:
  5344. r"""
  5345. cummin(input, dim, *, out=None) -> (Tensor, LongTensor)
  5346. Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative minimum of
  5347. elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index
  5348. location of each maximum value found in the dimension :attr:`dim`.
  5349. .. math::
  5350. y_i = min(x_1, x_2, x_3, \dots, x_i)
  5351. Args:
  5352. input (Tensor): the input tensor.
  5353. dim (int): the dimension to do the operation over
  5354. Keyword args:
  5355. out (tuple, optional): the result tuple of two output tensors (values, indices)
  5356. Example::
  5357. >>> a = torch.randn(10)
  5358. >>> a
  5359. tensor([-0.2284, -0.6628, 0.0975, 0.2680, -1.3298, -0.4220, -0.3885, 1.1762,
  5360. 0.9165, 1.6684])
  5361. >>> torch.cummin(a, dim=0)
  5362. torch.return_types.cummin(
  5363. values=tensor([-0.2284, -0.6628, -0.6628, -0.6628, -1.3298, -1.3298, -1.3298, -1.3298,
  5364. -1.3298, -1.3298]),
  5365. indices=tensor([0, 1, 1, 1, 4, 4, 4, 4, 4, 4]))
  5366. """
  5367. ...
  5368. @overload
  5369. def cumprod(input: Tensor, dim: _int, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
  5370. r"""
  5371. cumprod(input, dim, *, dtype=None, out=None) -> Tensor
  5372. Returns the cumulative product of elements of :attr:`input` in the dimension
  5373. :attr:`dim`.
  5374. For example, if :attr:`input` is a vector of size N, the result will also be
  5375. a vector of size N, with elements.
  5376. .. math::
  5377. y_i = x_1 \times x_2\times x_3\times \dots \times x_i
  5378. Args:
  5379. input (Tensor): the input tensor.
  5380. dim (int): the dimension to do the operation over
  5381. Keyword args:
  5382. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  5383. If specified, the input tensor is casted to :attr:`dtype` before the operation
  5384. is performed. This is useful for preventing data type overflows. Default: None.
  5385. out (Tensor, optional): the output tensor.
  5386. Example::
  5387. >>> a = torch.randn(10)
  5388. >>> a
  5389. tensor([ 0.6001, 0.2069, -0.1919, 0.9792, 0.6727, 1.0062, 0.4126,
  5390. -0.2129, -0.4206, 0.1968])
  5391. >>> torch.cumprod(a, dim=0)
  5392. tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0158, -0.0065,
  5393. 0.0014, -0.0006, -0.0001])
  5394. >>> a[5] = 0.0
  5395. >>> torch.cumprod(a, dim=0)
  5396. tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0000, -0.0000,
  5397. 0.0000, -0.0000, -0.0000])
  5398. """
  5399. ...
  5400. @overload
  5401. def cumprod(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
  5402. r"""
  5403. cumprod(input, dim, *, dtype=None, out=None) -> Tensor
  5404. Returns the cumulative product of elements of :attr:`input` in the dimension
  5405. :attr:`dim`.
  5406. For example, if :attr:`input` is a vector of size N, the result will also be
  5407. a vector of size N, with elements.
  5408. .. math::
  5409. y_i = x_1 \times x_2\times x_3\times \dots \times x_i
  5410. Args:
  5411. input (Tensor): the input tensor.
  5412. dim (int): the dimension to do the operation over
  5413. Keyword args:
  5414. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  5415. If specified, the input tensor is casted to :attr:`dtype` before the operation
  5416. is performed. This is useful for preventing data type overflows. Default: None.
  5417. out (Tensor, optional): the output tensor.
  5418. Example::
  5419. >>> a = torch.randn(10)
  5420. >>> a
  5421. tensor([ 0.6001, 0.2069, -0.1919, 0.9792, 0.6727, 1.0062, 0.4126,
  5422. -0.2129, -0.4206, 0.1968])
  5423. >>> torch.cumprod(a, dim=0)
  5424. tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0158, -0.0065,
  5425. 0.0014, -0.0006, -0.0001])
  5426. >>> a[5] = 0.0
  5427. >>> torch.cumprod(a, dim=0)
  5428. tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0000, -0.0000,
  5429. 0.0000, -0.0000, -0.0000])
  5430. """
  5431. ...
  5432. @overload
  5433. def cumsum(input: Tensor, dim: _int, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
  5434. r"""
  5435. cumsum(input, dim, *, dtype=None, out=None) -> Tensor
  5436. Returns the cumulative sum of elements of :attr:`input` in the dimension
  5437. :attr:`dim`.
  5438. For example, if :attr:`input` is a vector of size N, the result will also be
  5439. a vector of size N, with elements.
  5440. .. math::
  5441. y_i = x_1 + x_2 + x_3 + \dots + x_i
  5442. Args:
  5443. input (Tensor): the input tensor.
  5444. dim (int): the dimension to do the operation over
  5445. Keyword args:
  5446. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  5447. If specified, the input tensor is casted to :attr:`dtype` before the operation
  5448. is performed. This is useful for preventing data type overflows. Default: None.
  5449. out (Tensor, optional): the output tensor.
  5450. Example::
  5451. >>> a = torch.randint(1, 20, (10,))
  5452. >>> a
  5453. tensor([13, 7, 3, 10, 13, 3, 15, 10, 9, 10])
  5454. >>> torch.cumsum(a, dim=0)
  5455. tensor([13, 20, 23, 33, 46, 49, 64, 74, 83, 93])
  5456. """
  5457. ...
  5458. @overload
  5459. def cumsum(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
  5460. r"""
  5461. cumsum(input, dim, *, dtype=None, out=None) -> Tensor
  5462. Returns the cumulative sum of elements of :attr:`input` in the dimension
  5463. :attr:`dim`.
  5464. For example, if :attr:`input` is a vector of size N, the result will also be
  5465. a vector of size N, with elements.
  5466. .. math::
  5467. y_i = x_1 + x_2 + x_3 + \dots + x_i
  5468. Args:
  5469. input (Tensor): the input tensor.
  5470. dim (int): the dimension to do the operation over
  5471. Keyword args:
  5472. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  5473. If specified, the input tensor is casted to :attr:`dtype` before the operation
  5474. is performed. This is useful for preventing data type overflows. Default: None.
  5475. out (Tensor, optional): the output tensor.
  5476. Example::
  5477. >>> a = torch.randint(1, 20, (10,))
  5478. >>> a
  5479. tensor([13, 7, 3, 10, 13, 3, 15, 10, 9, 10])
  5480. >>> torch.cumsum(a, dim=0)
  5481. tensor([13, 20, 23, 33, 46, 49, 64, 74, 83, 93])
  5482. """
  5483. ...
  5484. @overload
  5485. def cumulative_trapezoid(y: Tensor, x: Tensor, *, dim: _int = -1) -> Tensor:
  5486. r"""
  5487. cumulative_trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor
  5488. Cumulatively computes the `trapezoidal rule <https://en.wikipedia.org/wiki/Trapezoidal_rule>`_
  5489. along :attr:`dim`. By default the spacing between elements is assumed to be 1, but
  5490. :attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be
  5491. used to specify arbitrary spacing along :attr:`dim`.
  5492. For more details, please read :func:`torch.trapezoid`. The difference between :func:`torch.trapezoid`
  5493. and this function is that, :func:`torch.trapezoid` returns a value for each integration,
  5494. where as this function returns a cumulative value for every spacing within the integration. This
  5495. is analogous to how `.sum` returns a value and `.cumsum` returns a cumulative sum.
  5496. Arguments:
  5497. y (Tensor): Values to use when computing the trapezoidal rule.
  5498. x (Tensor): If specified, defines spacing between values as specified above.
  5499. Keyword arguments:
  5500. dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx`
  5501. are specified then this defaults to 1. Effectively multiplies the result by its value.
  5502. dim (int): The dimension along which to compute the trapezoidal rule.
  5503. The last (inner-most) dimension by default.
  5504. Examples::
  5505. >>> # Cumulatively computes the trapezoidal rule in 1D, spacing is implicitly 1.
  5506. >>> y = torch.tensor([1, 5, 10])
  5507. >>> torch.cumulative_trapezoid(y)
  5508. tensor([3., 10.5])
  5509. >>> # Computes the same trapezoidal rule directly up to each element to verify
  5510. >>> (1 + 5) / 2
  5511. 3.0
  5512. >>> (1 + 10 + 10) / 2
  5513. 10.5
  5514. >>> # Cumulatively computes the trapezoidal rule in 1D with constant spacing of 2
  5515. >>> # NOTE: the result is the same as before, but multiplied by 2
  5516. >>> torch.cumulative_trapezoid(y, dx=2)
  5517. tensor([6., 21.])
  5518. >>> # Cumulatively computes the trapezoidal rule in 1D with arbitrary spacing
  5519. >>> x = torch.tensor([1, 3, 6])
  5520. >>> torch.cumulative_trapezoid(y, x)
  5521. tensor([6., 28.5])
  5522. >>> # Computes the same trapezoidal rule directly up to each element to verify
  5523. >>> ((3 - 1) * (1 + 5)) / 2
  5524. 6.0
  5525. >>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2
  5526. 28.5
  5527. >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 matrix
  5528. >>> y = torch.arange(9).reshape(3, 3)
  5529. tensor([[0, 1, 2],
  5530. [3, 4, 5],
  5531. [6, 7, 8]])
  5532. >>> torch.cumulative_trapezoid(y)
  5533. tensor([[ 0.5, 2.],
  5534. [ 3.5, 8.],
  5535. [ 6.5, 14.]])
  5536. >>> # Cumulatively computes the trapezoidal rule for each column of the matrix
  5537. >>> torch.cumulative_trapezoid(y, dim=0)
  5538. tensor([[ 1.5, 2.5, 3.5],
  5539. [ 6.0, 8.0, 10.0]])
  5540. >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix
  5541. >>> # with the same arbitrary spacing
  5542. >>> y = torch.ones(3, 3)
  5543. >>> x = torch.tensor([1, 3, 6])
  5544. >>> torch.cumulative_trapezoid(y, x)
  5545. tensor([[2., 5.],
  5546. [2., 5.],
  5547. [2., 5.]])
  5548. >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix
  5549. >>> # with different arbitrary spacing per row
  5550. >>> y = torch.ones(3, 3)
  5551. >>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]])
  5552. >>> torch.cumulative_trapezoid(y, x)
  5553. tensor([[1., 2.],
  5554. [2., 4.],
  5555. [3., 6.]])
  5556. """
  5557. ...
  5558. @overload
  5559. def cumulative_trapezoid(y: Tensor, *, dx: Union[Number, _complex] = 1, dim: _int = -1) -> Tensor:
  5560. r"""
  5561. cumulative_trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor
  5562. Cumulatively computes the `trapezoidal rule <https://en.wikipedia.org/wiki/Trapezoidal_rule>`_
  5563. along :attr:`dim`. By default the spacing between elements is assumed to be 1, but
  5564. :attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be
  5565. used to specify arbitrary spacing along :attr:`dim`.
  5566. For more details, please read :func:`torch.trapezoid`. The difference between :func:`torch.trapezoid`
  5567. and this function is that, :func:`torch.trapezoid` returns a value for each integration,
  5568. where as this function returns a cumulative value for every spacing within the integration. This
  5569. is analogous to how `.sum` returns a value and `.cumsum` returns a cumulative sum.
  5570. Arguments:
  5571. y (Tensor): Values to use when computing the trapezoidal rule.
  5572. x (Tensor): If specified, defines spacing between values as specified above.
  5573. Keyword arguments:
  5574. dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx`
  5575. are specified then this defaults to 1. Effectively multiplies the result by its value.
  5576. dim (int): The dimension along which to compute the trapezoidal rule.
  5577. The last (inner-most) dimension by default.
  5578. Examples::
  5579. >>> # Cumulatively computes the trapezoidal rule in 1D, spacing is implicitly 1.
  5580. >>> y = torch.tensor([1, 5, 10])
  5581. >>> torch.cumulative_trapezoid(y)
  5582. tensor([3., 10.5])
  5583. >>> # Computes the same trapezoidal rule directly up to each element to verify
  5584. >>> (1 + 5) / 2
  5585. 3.0
  5586. >>> (1 + 10 + 10) / 2
  5587. 10.5
  5588. >>> # Cumulatively computes the trapezoidal rule in 1D with constant spacing of 2
  5589. >>> # NOTE: the result is the same as before, but multiplied by 2
  5590. >>> torch.cumulative_trapezoid(y, dx=2)
  5591. tensor([6., 21.])
  5592. >>> # Cumulatively computes the trapezoidal rule in 1D with arbitrary spacing
  5593. >>> x = torch.tensor([1, 3, 6])
  5594. >>> torch.cumulative_trapezoid(y, x)
  5595. tensor([6., 28.5])
  5596. >>> # Computes the same trapezoidal rule directly up to each element to verify
  5597. >>> ((3 - 1) * (1 + 5)) / 2
  5598. 6.0
  5599. >>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2
  5600. 28.5
  5601. >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 matrix
  5602. >>> y = torch.arange(9).reshape(3, 3)
  5603. tensor([[0, 1, 2],
  5604. [3, 4, 5],
  5605. [6, 7, 8]])
  5606. >>> torch.cumulative_trapezoid(y)
  5607. tensor([[ 0.5, 2.],
  5608. [ 3.5, 8.],
  5609. [ 6.5, 14.]])
  5610. >>> # Cumulatively computes the trapezoidal rule for each column of the matrix
  5611. >>> torch.cumulative_trapezoid(y, dim=0)
  5612. tensor([[ 1.5, 2.5, 3.5],
  5613. [ 6.0, 8.0, 10.0]])
  5614. >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix
  5615. >>> # with the same arbitrary spacing
  5616. >>> y = torch.ones(3, 3)
  5617. >>> x = torch.tensor([1, 3, 6])
  5618. >>> torch.cumulative_trapezoid(y, x)
  5619. tensor([[2., 5.],
  5620. [2., 5.],
  5621. [2., 5.]])
  5622. >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix
  5623. >>> # with different arbitrary spacing per row
  5624. >>> y = torch.ones(3, 3)
  5625. >>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]])
  5626. >>> torch.cumulative_trapezoid(y, x)
  5627. tensor([[1., 2.],
  5628. [2., 4.],
  5629. [3., 6.]])
  5630. """
  5631. ...
  5632. def deg2rad(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  5633. r"""
  5634. deg2rad(input, *, out=None) -> Tensor
  5635. Returns a new tensor with each of the elements of :attr:`input`
  5636. converted from angles in degrees to radians.
  5637. Args:
  5638. input (Tensor): the input tensor.
  5639. Keyword arguments:
  5640. out (Tensor, optional): the output tensor.
  5641. Example::
  5642. >>> a = torch.tensor([[180.0, -180.0], [360.0, -360.0], [90.0, -90.0]])
  5643. >>> torch.deg2rad(a)
  5644. tensor([[ 3.1416, -3.1416],
  5645. [ 6.2832, -6.2832],
  5646. [ 1.5708, -1.5708]])
  5647. """
  5648. ...
  5649. def deg2rad_(input: Tensor) -> Tensor: ...
  5650. @overload
  5651. def dequantize(input: Tensor) -> Tensor:
  5652. r"""
  5653. dequantize(tensor) -> Tensor
  5654. Returns an fp32 Tensor by dequantizing a quantized Tensor
  5655. Args:
  5656. tensor (Tensor): A quantized Tensor
  5657. .. function:: dequantize(tensors) -> sequence of Tensors
  5658. :noindex:
  5659. Given a list of quantized Tensors, dequantize them and return a list of fp32 Tensors
  5660. Args:
  5661. tensors (sequence of Tensors): A list of quantized Tensors
  5662. """
  5663. ...
  5664. @overload
  5665. def dequantize(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
  5666. r"""
  5667. dequantize(tensor) -> Tensor
  5668. Returns an fp32 Tensor by dequantizing a quantized Tensor
  5669. Args:
  5670. tensor (Tensor): A quantized Tensor
  5671. .. function:: dequantize(tensors) -> sequence of Tensors
  5672. :noindex:
  5673. Given a list of quantized Tensors, dequantize them and return a list of fp32 Tensors
  5674. Args:
  5675. tensors (sequence of Tensors): A list of quantized Tensors
  5676. """
  5677. ...
  5678. def det(input: Tensor) -> Tensor:
  5679. r"""
  5680. det(input) -> Tensor
  5681. Alias for :func:`torch.linalg.det`
  5682. """
  5683. ...
  5684. def detach(input: Tensor) -> Tensor: ...
  5685. def detach_(input: Tensor) -> Tensor: ...
  5686. def detach_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  5687. r"""
  5688. Performs the same operation as :func:`torch.detach`, but all output tensors
  5689. are freshly created instead of aliasing the input.
  5690. """
  5691. ...
  5692. def diag(input: Tensor, diagonal: _int = 0, *, out: Optional[Tensor] = None) -> Tensor:
  5693. r"""
  5694. diag(input, diagonal=0, *, out=None) -> Tensor
  5695. - If :attr:`input` is a vector (1-D tensor), then returns a 2-D square tensor
  5696. with the elements of :attr:`input` as the diagonal.
  5697. - If :attr:`input` is a matrix (2-D tensor), then returns a 1-D tensor with
  5698. the diagonal elements of :attr:`input`.
  5699. The argument :attr:`diagonal` controls which diagonal to consider:
  5700. - If :attr:`diagonal` = 0, it is the main diagonal.
  5701. - If :attr:`diagonal` > 0, it is above the main diagonal.
  5702. - If :attr:`diagonal` < 0, it is below the main diagonal.
  5703. Args:
  5704. input (Tensor): the input tensor.
  5705. diagonal (int, optional): the diagonal to consider
  5706. Keyword args:
  5707. out (Tensor, optional): the output tensor.
  5708. .. seealso::
  5709. :func:`torch.diagonal` always returns the diagonal of its input.
  5710. :func:`torch.diagflat` always constructs a tensor with diagonal elements
  5711. specified by the input.
  5712. Examples:
  5713. Get the square matrix where the input vector is the diagonal::
  5714. >>> a = torch.randn(3)
  5715. >>> a
  5716. tensor([ 0.5950,-0.0872, 2.3298])
  5717. >>> torch.diag(a)
  5718. tensor([[ 0.5950, 0.0000, 0.0000],
  5719. [ 0.0000,-0.0872, 0.0000],
  5720. [ 0.0000, 0.0000, 2.3298]])
  5721. >>> torch.diag(a, 1)
  5722. tensor([[ 0.0000, 0.5950, 0.0000, 0.0000],
  5723. [ 0.0000, 0.0000,-0.0872, 0.0000],
  5724. [ 0.0000, 0.0000, 0.0000, 2.3298],
  5725. [ 0.0000, 0.0000, 0.0000, 0.0000]])
  5726. Get the k-th diagonal of a given matrix::
  5727. >>> a = torch.randn(3, 3)
  5728. >>> a
  5729. tensor([[-0.4264, 0.0255,-0.1064],
  5730. [ 0.8795,-0.2429, 0.1374],
  5731. [ 0.1029,-0.6482,-1.6300]])
  5732. >>> torch.diag(a, 0)
  5733. tensor([-0.4264,-0.2429,-1.6300])
  5734. >>> torch.diag(a, 1)
  5735. tensor([ 0.0255, 0.1374])
  5736. """
  5737. ...
  5738. def diag_embed(input: Tensor, offset: _int = 0, dim1: _int = -2, dim2: _int = -1) -> Tensor:
  5739. r"""
  5740. diag_embed(input, offset=0, dim1=-2, dim2=-1) -> Tensor
  5741. Creates a tensor whose diagonals of certain 2D planes (specified by
  5742. :attr:`dim1` and :attr:`dim2`) are filled by :attr:`input`.
  5743. To facilitate creating batched diagonal matrices, the 2D planes formed by
  5744. the last two dimensions of the returned tensor are chosen by default.
  5745. The argument :attr:`offset` controls which diagonal to consider:
  5746. - If :attr:`offset` = 0, it is the main diagonal.
  5747. - If :attr:`offset` > 0, it is above the main diagonal.
  5748. - If :attr:`offset` < 0, it is below the main diagonal.
  5749. The size of the new matrix will be calculated to make the specified diagonal
  5750. of the size of the last input dimension.
  5751. Note that for :attr:`offset` other than :math:`0`, the order of :attr:`dim1`
  5752. and :attr:`dim2` matters. Exchanging them is equivalent to changing the
  5753. sign of :attr:`offset`.
  5754. Applying :meth:`torch.diagonal` to the output of this function with
  5755. the same arguments yields a matrix identical to input. However,
  5756. :meth:`torch.diagonal` has different default dimensions, so those
  5757. need to be explicitly specified.
  5758. Args:
  5759. input (Tensor): the input tensor. Must be at least 1-dimensional.
  5760. offset (int, optional): which diagonal to consider. Default: 0
  5761. (main diagonal).
  5762. dim1 (int, optional): first dimension with respect to which to
  5763. take diagonal. Default: -2.
  5764. dim2 (int, optional): second dimension with respect to which to
  5765. take diagonal. Default: -1.
  5766. Example::
  5767. >>> a = torch.randn(2, 3)
  5768. >>> torch.diag_embed(a)
  5769. tensor([[[ 1.5410, 0.0000, 0.0000],
  5770. [ 0.0000, -0.2934, 0.0000],
  5771. [ 0.0000, 0.0000, -2.1788]],
  5772. [[ 0.5684, 0.0000, 0.0000],
  5773. [ 0.0000, -1.0845, 0.0000],
  5774. [ 0.0000, 0.0000, -1.3986]]])
  5775. >>> torch.diag_embed(a, offset=1, dim1=0, dim2=2)
  5776. tensor([[[ 0.0000, 1.5410, 0.0000, 0.0000],
  5777. [ 0.0000, 0.5684, 0.0000, 0.0000]],
  5778. [[ 0.0000, 0.0000, -0.2934, 0.0000],
  5779. [ 0.0000, 0.0000, -1.0845, 0.0000]],
  5780. [[ 0.0000, 0.0000, 0.0000, -2.1788],
  5781. [ 0.0000, 0.0000, 0.0000, -1.3986]],
  5782. [[ 0.0000, 0.0000, 0.0000, 0.0000],
  5783. [ 0.0000, 0.0000, 0.0000, 0.0000]]])
  5784. """
  5785. ...
  5786. def diagflat(input: Tensor, offset: _int = 0) -> Tensor:
  5787. r"""
  5788. diagflat(input, offset=0) -> Tensor
  5789. - If :attr:`input` is a vector (1-D tensor), then returns a 2-D square tensor
  5790. with the elements of :attr:`input` as the diagonal.
  5791. - If :attr:`input` is a tensor with more than one dimension, then returns a
  5792. 2-D tensor with diagonal elements equal to a flattened :attr:`input`.
  5793. The argument :attr:`offset` controls which diagonal to consider:
  5794. - If :attr:`offset` = 0, it is the main diagonal.
  5795. - If :attr:`offset` > 0, it is above the main diagonal.
  5796. - If :attr:`offset` < 0, it is below the main diagonal.
  5797. Args:
  5798. input (Tensor): the input tensor.
  5799. offset (int, optional): the diagonal to consider. Default: 0 (main
  5800. diagonal).
  5801. Examples::
  5802. >>> a = torch.randn(3)
  5803. >>> a
  5804. tensor([-0.2956, -0.9068, 0.1695])
  5805. >>> torch.diagflat(a)
  5806. tensor([[-0.2956, 0.0000, 0.0000],
  5807. [ 0.0000, -0.9068, 0.0000],
  5808. [ 0.0000, 0.0000, 0.1695]])
  5809. >>> torch.diagflat(a, 1)
  5810. tensor([[ 0.0000, -0.2956, 0.0000, 0.0000],
  5811. [ 0.0000, 0.0000, -0.9068, 0.0000],
  5812. [ 0.0000, 0.0000, 0.0000, 0.1695],
  5813. [ 0.0000, 0.0000, 0.0000, 0.0000]])
  5814. >>> a = torch.randn(2, 2)
  5815. >>> a
  5816. tensor([[ 0.2094, -0.3018],
  5817. [-0.1516, 1.9342]])
  5818. >>> torch.diagflat(a)
  5819. tensor([[ 0.2094, 0.0000, 0.0000, 0.0000],
  5820. [ 0.0000, -0.3018, 0.0000, 0.0000],
  5821. [ 0.0000, 0.0000, -0.1516, 0.0000],
  5822. [ 0.0000, 0.0000, 0.0000, 1.9342]])
  5823. """
  5824. ...
  5825. @overload
  5826. def diagonal(input: Tensor, offset: _int = 0, dim1: _int = 0, dim2: _int = 1) -> Tensor:
  5827. r"""
  5828. diagonal(input, offset=0, dim1=0, dim2=1) -> Tensor
  5829. Returns a partial view of :attr:`input` with the its diagonal elements
  5830. with respect to :attr:`dim1` and :attr:`dim2` appended as a dimension
  5831. at the end of the shape.
  5832. The argument :attr:`offset` controls which diagonal to consider:
  5833. - If :attr:`offset` = 0, it is the main diagonal.
  5834. - If :attr:`offset` > 0, it is above the main diagonal.
  5835. - If :attr:`offset` < 0, it is below the main diagonal.
  5836. Applying :meth:`torch.diag_embed` to the output of this function with
  5837. the same arguments yields a diagonal matrix with the diagonal entries
  5838. of the input. However, :meth:`torch.diag_embed` has different default
  5839. dimensions, so those need to be explicitly specified.
  5840. Args:
  5841. input (Tensor): the input tensor. Must be at least 2-dimensional.
  5842. offset (int, optional): which diagonal to consider. Default: 0
  5843. (main diagonal).
  5844. dim1 (int, optional): first dimension with respect to which to
  5845. take diagonal. Default: 0.
  5846. dim2 (int, optional): second dimension with respect to which to
  5847. take diagonal. Default: 1.
  5848. .. note:: To take a batch diagonal, pass in dim1=-2, dim2=-1.
  5849. Examples::
  5850. >>> a = torch.randn(3, 3)
  5851. >>> a
  5852. tensor([[-1.0854, 1.1431, -0.1752],
  5853. [ 0.8536, -0.0905, 0.0360],
  5854. [ 0.6927, -0.3735, -0.4945]])
  5855. >>> torch.diagonal(a, 0)
  5856. tensor([-1.0854, -0.0905, -0.4945])
  5857. >>> torch.diagonal(a, 1)
  5858. tensor([ 1.1431, 0.0360])
  5859. >>> x = torch.randn(2, 5, 4, 2)
  5860. >>> torch.diagonal(x, offset=-1, dim1=1, dim2=2)
  5861. tensor([[[-1.2631, 0.3755, -1.5977, -1.8172],
  5862. [-1.1065, 1.0401, -0.2235, -0.7938]],
  5863. [[-1.7325, -0.3081, 0.6166, 0.2335],
  5864. [ 1.0500, 0.7336, -0.3836, -1.1015]]])
  5865. """
  5866. ...
  5867. @overload
  5868. def diagonal(input: Tensor, *, outdim: Union[str, ellipsis, None], dim1: Union[str, ellipsis, None], dim2: Union[str, ellipsis, None], offset: _int = 0) -> Tensor:
  5869. r"""
  5870. diagonal(input, offset=0, dim1=0, dim2=1) -> Tensor
  5871. Returns a partial view of :attr:`input` with the its diagonal elements
  5872. with respect to :attr:`dim1` and :attr:`dim2` appended as a dimension
  5873. at the end of the shape.
  5874. The argument :attr:`offset` controls which diagonal to consider:
  5875. - If :attr:`offset` = 0, it is the main diagonal.
  5876. - If :attr:`offset` > 0, it is above the main diagonal.
  5877. - If :attr:`offset` < 0, it is below the main diagonal.
  5878. Applying :meth:`torch.diag_embed` to the output of this function with
  5879. the same arguments yields a diagonal matrix with the diagonal entries
  5880. of the input. However, :meth:`torch.diag_embed` has different default
  5881. dimensions, so those need to be explicitly specified.
  5882. Args:
  5883. input (Tensor): the input tensor. Must be at least 2-dimensional.
  5884. offset (int, optional): which diagonal to consider. Default: 0
  5885. (main diagonal).
  5886. dim1 (int, optional): first dimension with respect to which to
  5887. take diagonal. Default: 0.
  5888. dim2 (int, optional): second dimension with respect to which to
  5889. take diagonal. Default: 1.
  5890. .. note:: To take a batch diagonal, pass in dim1=-2, dim2=-1.
  5891. Examples::
  5892. >>> a = torch.randn(3, 3)
  5893. >>> a
  5894. tensor([[-1.0854, 1.1431, -0.1752],
  5895. [ 0.8536, -0.0905, 0.0360],
  5896. [ 0.6927, -0.3735, -0.4945]])
  5897. >>> torch.diagonal(a, 0)
  5898. tensor([-1.0854, -0.0905, -0.4945])
  5899. >>> torch.diagonal(a, 1)
  5900. tensor([ 1.1431, 0.0360])
  5901. >>> x = torch.randn(2, 5, 4, 2)
  5902. >>> torch.diagonal(x, offset=-1, dim1=1, dim2=2)
  5903. tensor([[[-1.2631, 0.3755, -1.5977, -1.8172],
  5904. [-1.1065, 1.0401, -0.2235, -0.7938]],
  5905. [[-1.7325, -0.3081, 0.6166, 0.2335],
  5906. [ 1.0500, 0.7336, -0.3836, -1.1015]]])
  5907. """
  5908. ...
  5909. def diagonal_copy(input: Tensor, offset: _int = 0, dim1: _int = 0, dim2: _int = 1, *, out: Optional[Tensor] = None) -> Tensor:
  5910. r"""
  5911. Performs the same operation as :func:`torch.diagonal`, but all output tensors
  5912. are freshly created instead of aliasing the input.
  5913. """
  5914. ...
  5915. def diagonal_scatter(input: Tensor, src: Tensor, offset: _int = 0, dim1: _int = 0, dim2: _int = 1) -> Tensor:
  5916. r"""
  5917. diagonal_scatter(input, src, offset=0, dim1=0, dim2=1) -> Tensor
  5918. Embeds the values of the :attr:`src` tensor into :attr:`input` along
  5919. the diagonal elements of :attr:`input`, with respect to :attr:`dim1`
  5920. and :attr:`dim2`.
  5921. This function returns a tensor with fresh storage; it does not
  5922. return a view.
  5923. The argument :attr:`offset` controls which diagonal to consider:
  5924. - If :attr:`offset` = 0, it is the main diagonal.
  5925. - If :attr:`offset` > 0, it is above the main diagonal.
  5926. - If :attr:`offset` < 0, it is below the main diagonal.
  5927. Args:
  5928. input (Tensor): the input tensor. Must be at least 2-dimensional.
  5929. src (Tensor): the tensor to embed into :attr:`input`.
  5930. offset (int, optional): which diagonal to consider. Default: 0
  5931. (main diagonal).
  5932. dim1 (int, optional): first dimension with respect to which to
  5933. take diagonal. Default: 0.
  5934. dim2 (int, optional): second dimension with respect to which to
  5935. take diagonal. Default: 1.
  5936. .. note::
  5937. :attr:`src` must be of the proper size in order to be embedded
  5938. into :attr:`input`. Specifically, it should have the same shape as
  5939. ``torch.diagonal(input, offset, dim1, dim2)``
  5940. Examples::
  5941. >>> a = torch.zeros(3, 3)
  5942. >>> a
  5943. tensor([[0., 0., 0.],
  5944. [0., 0., 0.],
  5945. [0., 0., 0.]])
  5946. >>> torch.diagonal_scatter(a, torch.ones(3), 0)
  5947. tensor([[1., 0., 0.],
  5948. [0., 1., 0.],
  5949. [0., 0., 1.]])
  5950. >>> torch.diagonal_scatter(a, torch.ones(2), 1)
  5951. tensor([[0., 1., 0.],
  5952. [0., 0., 1.],
  5953. [0., 0., 0.]])
  5954. """
  5955. ...
  5956. def diff(input: Tensor, n: _int = 1, dim: _int = -1, prepend: Optional[Tensor] = None, append: Optional[Tensor] = None, *, out: Optional[Tensor] = None) -> Tensor:
  5957. r"""
  5958. diff(input, n=1, dim=-1, prepend=None, append=None) -> Tensor
  5959. Computes the n-th forward difference along the given dimension.
  5960. The first-order differences are given by `out[i] = input[i + 1] - input[i]`. Higher-order
  5961. differences are calculated by using :func:`torch.diff` recursively.
  5962. Args:
  5963. input (Tensor): the tensor to compute the differences on
  5964. n (int, optional): the number of times to recursively compute the difference
  5965. dim (int, optional): the dimension to compute the difference along.
  5966. Default is the last dimension.
  5967. prepend, append (Tensor, optional): values to prepend or append to
  5968. :attr:`input` along :attr:`dim` before computing the difference.
  5969. Their dimensions must be equivalent to that of input, and their shapes
  5970. must match input's shape except on :attr:`dim`.
  5971. Keyword args:
  5972. out (Tensor, optional): the output tensor.
  5973. Example::
  5974. >>> a = torch.tensor([1, 3, 2])
  5975. >>> torch.diff(a)
  5976. tensor([ 2, -1])
  5977. >>> b = torch.tensor([4, 5])
  5978. >>> torch.diff(a, append=b)
  5979. tensor([ 2, -1, 2, 1])
  5980. >>> c = torch.tensor([[1, 2, 3], [3, 4, 5]])
  5981. >>> torch.diff(c, dim=0)
  5982. tensor([[2, 2, 2]])
  5983. >>> torch.diff(c, dim=1)
  5984. tensor([[1, 1],
  5985. [1, 1]])
  5986. """
  5987. ...
  5988. def digamma(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  5989. r"""
  5990. digamma(input, *, out=None) -> Tensor
  5991. Alias for :func:`torch.special.digamma`.
  5992. """
  5993. ...
  5994. def dist(input: Tensor, other: Tensor, p: Union[Number, _complex] = 2) -> Tensor:
  5995. r"""
  5996. dist(input, other, p=2) -> Tensor
  5997. Returns the p-norm of (:attr:`input` - :attr:`other`)
  5998. The shapes of :attr:`input` and :attr:`other` must be
  5999. :ref:`broadcastable <broadcasting-semantics>`.
  6000. Args:
  6001. input (Tensor): the input tensor.
  6002. other (Tensor): the Right-hand-side input tensor
  6003. p (float, optional): the norm to be computed
  6004. Example::
  6005. >>> x = torch.randn(4)
  6006. >>> x
  6007. tensor([-1.5393, -0.8675, 0.5916, 1.6321])
  6008. >>> y = torch.randn(4)
  6009. >>> y
  6010. tensor([ 0.0967, -1.0511, 0.6295, 0.8360])
  6011. >>> torch.dist(x, y, 3.5)
  6012. tensor(1.6727)
  6013. >>> torch.dist(x, y, 3)
  6014. tensor(1.6973)
  6015. >>> torch.dist(x, y, 0)
  6016. tensor(4.)
  6017. >>> torch.dist(x, y, 1)
  6018. tensor(2.6537)
  6019. """
  6020. ...
  6021. def div(input: Union[Tensor, Number], other: Union[Tensor, Number], *, rounding_mode: Optional[str] = None, out: Optional[Tensor] = None) -> Tensor:
  6022. r"""
  6023. div(input, other, *, rounding_mode=None, out=None) -> Tensor
  6024. Divides each element of the input ``input`` by the corresponding element of
  6025. :attr:`other`.
  6026. .. math::
  6027. \text{out}_i = \frac{\text{input}_i}{\text{other}_i}
  6028. .. note::
  6029. By default, this performs a "true" division like Python 3.
  6030. See the :attr:`rounding_mode` argument for floor division.
  6031. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
  6032. :ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
  6033. Always promotes integer types to the default scalar type.
  6034. Args:
  6035. input (Tensor): the dividend
  6036. other (Tensor or Number): the divisor
  6037. Keyword args:
  6038. rounding_mode (str, optional): Type of rounding applied to the result:
  6039. * None - default behavior. Performs no rounding and, if both :attr:`input` and
  6040. :attr:`other` are integer types, promotes the inputs to the default scalar type.
  6041. Equivalent to true division in Python (the ``/`` operator) and NumPy's ``np.true_divide``.
  6042. * ``"trunc"`` - rounds the results of the division towards zero.
  6043. Equivalent to C-style integer division.
  6044. * ``"floor"`` - rounds the results of the division down.
  6045. Equivalent to floor division in Python (the ``//`` operator) and NumPy's ``np.floor_divide``.
  6046. out (Tensor, optional): the output tensor.
  6047. Examples::
  6048. >>> x = torch.tensor([ 0.3810, 1.2774, -0.2972, -0.3719, 0.4637])
  6049. >>> torch.div(x, 0.5)
  6050. tensor([ 0.7620, 2.5548, -0.5944, -0.7438, 0.9274])
  6051. >>> a = torch.tensor([[-0.3711, -1.9353, -0.4605, -0.2917],
  6052. ... [ 0.1815, -1.0111, 0.9805, -1.5923],
  6053. ... [ 0.1062, 1.4581, 0.7759, -1.2344],
  6054. ... [-0.1830, -0.0313, 1.1908, -1.4757]])
  6055. >>> b = torch.tensor([ 0.8032, 0.2930, -0.8113, -0.2308])
  6056. >>> torch.div(a, b)
  6057. tensor([[-0.4620, -6.6051, 0.5676, 1.2639],
  6058. [ 0.2260, -3.4509, -1.2086, 6.8990],
  6059. [ 0.1322, 4.9764, -0.9564, 5.3484],
  6060. [-0.2278, -0.1068, -1.4678, 6.3938]])
  6061. >>> torch.div(a, b, rounding_mode='trunc')
  6062. tensor([[-0., -6., 0., 1.],
  6063. [ 0., -3., -1., 6.],
  6064. [ 0., 4., -0., 5.],
  6065. [-0., -0., -1., 6.]])
  6066. >>> torch.div(a, b, rounding_mode='floor')
  6067. tensor([[-1., -7., 0., 1.],
  6068. [ 0., -4., -2., 6.],
  6069. [ 0., 4., -1., 5.],
  6070. [-1., -1., -2., 6.]])
  6071. """
  6072. ...
  6073. @overload
  6074. def divide(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  6075. r"""
  6076. divide(input, other, *, rounding_mode=None, out=None) -> Tensor
  6077. Alias for :func:`torch.div`.
  6078. """
  6079. ...
  6080. @overload
  6081. def divide(input: Tensor, other: Tensor, *, rounding_mode: Optional[str], out: Optional[Tensor] = None) -> Tensor:
  6082. r"""
  6083. divide(input, other, *, rounding_mode=None, out=None) -> Tensor
  6084. Alias for :func:`torch.div`.
  6085. """
  6086. ...
  6087. @overload
  6088. def divide(input: Tensor, other: Union[Number, _complex], *, rounding_mode: Optional[str]) -> Tensor:
  6089. r"""
  6090. divide(input, other, *, rounding_mode=None, out=None) -> Tensor
  6091. Alias for :func:`torch.div`.
  6092. """
  6093. ...
  6094. @overload
  6095. def divide(input: Tensor, other: Union[Number, _complex]) -> Tensor:
  6096. r"""
  6097. divide(input, other, *, rounding_mode=None, out=None) -> Tensor
  6098. Alias for :func:`torch.div`.
  6099. """
  6100. ...
  6101. def dot(input: Tensor, tensor: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  6102. r"""
  6103. dot(input, tensor, *, out=None) -> Tensor
  6104. Computes the dot product of two 1D tensors.
  6105. .. note::
  6106. Unlike NumPy's dot, torch.dot intentionally only supports computing the dot product
  6107. of two 1D tensors with the same number of elements.
  6108. Args:
  6109. input (Tensor): first tensor in the dot product, must be 1D.
  6110. tensor (Tensor): second tensor in the dot product, must be 1D.
  6111. Keyword args:
  6112. out (Tensor, optional): the output tensor.
  6113. Example::
  6114. >>> torch.dot(torch.tensor([2, 3]), torch.tensor([2, 1]))
  6115. tensor(7)
  6116. >>> t1, t2 = torch.tensor([0, 1]), torch.tensor([2, 3])
  6117. >>> torch.dot(t1, t2)
  6118. tensor(3)
  6119. """
  6120. ...
  6121. def dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ...
  6122. def dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ...
  6123. def dsmm(input: Tensor, mat2: Tensor) -> Tensor: ...
  6124. @overload
  6125. def dsplit(input: Tensor, sections: _int) -> Tuple[Tensor, ...]:
  6126. r"""
  6127. dsplit(input, indices_or_sections) -> List of Tensors
  6128. Splits :attr:`input`, a tensor with three or more dimensions, into multiple tensors
  6129. depthwise according to :attr:`indices_or_sections`. Each split is a view of
  6130. :attr:`input`.
  6131. This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=2)
  6132. (the split dimension is 2), except that if :attr:`indices_or_sections` is an integer
  6133. it must evenly divide the split dimension or a runtime error will be thrown.
  6134. This function is based on NumPy's :func:`numpy.dsplit`.
  6135. Args:
  6136. input (Tensor): tensor to split.
  6137. indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
  6138. Example::
  6139. >>> t = torch.arange(16.0).reshape(2, 2, 4)
  6140. >>> t
  6141. tensor([[[ 0., 1., 2., 3.],
  6142. [ 4., 5., 6., 7.]],
  6143. [[ 8., 9., 10., 11.],
  6144. [12., 13., 14., 15.]]])
  6145. >>> torch.dsplit(t, 2)
  6146. (tensor([[[ 0., 1.],
  6147. [ 4., 5.]],
  6148. [[ 8., 9.],
  6149. [12., 13.]]]),
  6150. tensor([[[ 2., 3.],
  6151. [ 6., 7.]],
  6152. [[10., 11.],
  6153. [14., 15.]]]))
  6154. >>> torch.dsplit(t, [3, 6])
  6155. (tensor([[[ 0., 1., 2.],
  6156. [ 4., 5., 6.]],
  6157. [[ 8., 9., 10.],
  6158. [12., 13., 14.]]]),
  6159. tensor([[[ 3.],
  6160. [ 7.]],
  6161. [[11.],
  6162. [15.]]]),
  6163. tensor([], size=(2, 2, 0)))
  6164. """
  6165. ...
  6166. @overload
  6167. def dsplit(input: Tensor, indices: _size) -> Tuple[Tensor, ...]:
  6168. r"""
  6169. dsplit(input, indices_or_sections) -> List of Tensors
  6170. Splits :attr:`input`, a tensor with three or more dimensions, into multiple tensors
  6171. depthwise according to :attr:`indices_or_sections`. Each split is a view of
  6172. :attr:`input`.
  6173. This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=2)
  6174. (the split dimension is 2), except that if :attr:`indices_or_sections` is an integer
  6175. it must evenly divide the split dimension or a runtime error will be thrown.
  6176. This function is based on NumPy's :func:`numpy.dsplit`.
  6177. Args:
  6178. input (Tensor): tensor to split.
  6179. indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
  6180. Example::
  6181. >>> t = torch.arange(16.0).reshape(2, 2, 4)
  6182. >>> t
  6183. tensor([[[ 0., 1., 2., 3.],
  6184. [ 4., 5., 6., 7.]],
  6185. [[ 8., 9., 10., 11.],
  6186. [12., 13., 14., 15.]]])
  6187. >>> torch.dsplit(t, 2)
  6188. (tensor([[[ 0., 1.],
  6189. [ 4., 5.]],
  6190. [[ 8., 9.],
  6191. [12., 13.]]]),
  6192. tensor([[[ 2., 3.],
  6193. [ 6., 7.]],
  6194. [[10., 11.],
  6195. [14., 15.]]]))
  6196. >>> torch.dsplit(t, [3, 6])
  6197. (tensor([[[ 0., 1., 2.],
  6198. [ 4., 5., 6.]],
  6199. [[ 8., 9., 10.],
  6200. [12., 13., 14.]]]),
  6201. tensor([[[ 3.],
  6202. [ 7.]],
  6203. [[11.],
  6204. [15.]]]),
  6205. tensor([], size=(2, 2, 0)))
  6206. """
  6207. ...
  6208. def dstack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor] = None) -> Tensor:
  6209. r"""
  6210. dstack(tensors, *, out=None) -> Tensor
  6211. Stack tensors in sequence depthwise (along third axis).
  6212. This is equivalent to concatenation along the third axis after 1-D and 2-D tensors have been reshaped by :func:`torch.atleast_3d`.
  6213. Args:
  6214. tensors (sequence of Tensors): sequence of tensors to concatenate
  6215. Keyword args:
  6216. out (Tensor, optional): the output tensor.
  6217. Example::
  6218. >>> a = torch.tensor([1, 2, 3])
  6219. >>> b = torch.tensor([4, 5, 6])
  6220. >>> torch.dstack((a,b))
  6221. tensor([[[1, 4],
  6222. [2, 5],
  6223. [3, 6]]])
  6224. >>> a = torch.tensor([[1],[2],[3]])
  6225. >>> b = torch.tensor([[4],[5],[6]])
  6226. >>> torch.dstack((a,b))
  6227. tensor([[[1, 4]],
  6228. [[2, 5]],
  6229. [[3, 6]]])
  6230. """
  6231. ...
  6232. def embedding(weight: Tensor, indices: Tensor, padding_idx: Union[_int, SymInt] = -1, scale_grad_by_freq: _bool = False, sparse: _bool = False) -> Tensor: ...
  6233. @overload
  6234. def embedding_bag(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool, mode: _int, sparse: _bool, per_sample_weights: Optional[Tensor], include_last_offset: _bool, padding_idx: Optional[_int]) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
  6235. @overload
  6236. def embedding_bag(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool = False, mode: _int = 0, sparse: _bool = False, per_sample_weights: Optional[Tensor] = None, include_last_offset: _bool = False) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
  6237. def embedding_renorm_(input: Tensor, indices: Tensor, max_norm: _float, norm_type: _float) -> Tensor: ...
  6238. @overload
  6239. def empty(size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format] = None, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  6240. r"""
  6241. empty(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format) -> Tensor
  6242. Returns a tensor filled with uninitialized data. The shape of the tensor is
  6243. defined by the variable argument :attr:`size`.
  6244. .. note::
  6245. If :func:`torch.use_deterministic_algorithms()` and
  6246. :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
  6247. ``True``, the output tensor is initialized to prevent any possible
  6248. nondeterministic behavior from using the data as an input to an operation.
  6249. Floating point and complex tensors are filled with NaN, and integer tensors
  6250. are filled with the maximum value.
  6251. Args:
  6252. size (int...): a sequence of integers defining the shape of the output tensor.
  6253. Can be a variable number of arguments or a collection like a list or tuple.
  6254. Keyword args:
  6255. out (Tensor, optional): the output tensor.
  6256. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  6257. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  6258. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  6259. Default: ``torch.strided``.
  6260. device (:class:`torch.device`, optional): the desired device of returned tensor.
  6261. Default: if ``None``, uses the current device for the default tensor type
  6262. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  6263. for CPU tensor types and the current CUDA device for CUDA tensor types.
  6264. requires_grad (bool, optional): If autograd should record operations on the
  6265. returned tensor. Default: ``False``.
  6266. pin_memory (bool, optional): If set, returned tensor would be allocated in
  6267. the pinned memory. Works only for CPU tensors. Default: ``False``.
  6268. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  6269. returned Tensor. Default: ``torch.contiguous_format``.
  6270. Example::
  6271. >>> torch.empty((2,3), dtype=torch.int64)
  6272. tensor([[ 9.4064e+13, 2.8000e+01, 9.3493e+13],
  6273. [ 7.5751e+18, 7.1428e+18, 7.5955e+18]])
  6274. """
  6275. ...
  6276. @overload
  6277. def empty(*size: _int, memory_format: Optional[memory_format] = None, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  6278. r"""
  6279. empty(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format) -> Tensor
  6280. Returns a tensor filled with uninitialized data. The shape of the tensor is
  6281. defined by the variable argument :attr:`size`.
  6282. .. note::
  6283. If :func:`torch.use_deterministic_algorithms()` and
  6284. :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
  6285. ``True``, the output tensor is initialized to prevent any possible
  6286. nondeterministic behavior from using the data as an input to an operation.
  6287. Floating point and complex tensors are filled with NaN, and integer tensors
  6288. are filled with the maximum value.
  6289. Args:
  6290. size (int...): a sequence of integers defining the shape of the output tensor.
  6291. Can be a variable number of arguments or a collection like a list or tuple.
  6292. Keyword args:
  6293. out (Tensor, optional): the output tensor.
  6294. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  6295. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  6296. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  6297. Default: ``torch.strided``.
  6298. device (:class:`torch.device`, optional): the desired device of returned tensor.
  6299. Default: if ``None``, uses the current device for the default tensor type
  6300. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  6301. for CPU tensor types and the current CUDA device for CUDA tensor types.
  6302. requires_grad (bool, optional): If autograd should record operations on the
  6303. returned tensor. Default: ``False``.
  6304. pin_memory (bool, optional): If set, returned tensor would be allocated in
  6305. the pinned memory. Works only for CPU tensors. Default: ``False``.
  6306. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  6307. returned Tensor. Default: ``torch.contiguous_format``.
  6308. Example::
  6309. >>> torch.empty((2,3), dtype=torch.int64)
  6310. tensor([[ 9.4064e+13, 2.8000e+01, 9.3493e+13],
  6311. [ 7.5751e+18, 7.1428e+18, 7.5955e+18]])
  6312. """
  6313. ...
  6314. @overload
  6315. def empty(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  6316. r"""
  6317. empty(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format) -> Tensor
  6318. Returns a tensor filled with uninitialized data. The shape of the tensor is
  6319. defined by the variable argument :attr:`size`.
  6320. .. note::
  6321. If :func:`torch.use_deterministic_algorithms()` and
  6322. :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
  6323. ``True``, the output tensor is initialized to prevent any possible
  6324. nondeterministic behavior from using the data as an input to an operation.
  6325. Floating point and complex tensors are filled with NaN, and integer tensors
  6326. are filled with the maximum value.
  6327. Args:
  6328. size (int...): a sequence of integers defining the shape of the output tensor.
  6329. Can be a variable number of arguments or a collection like a list or tuple.
  6330. Keyword args:
  6331. out (Tensor, optional): the output tensor.
  6332. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  6333. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  6334. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  6335. Default: ``torch.strided``.
  6336. device (:class:`torch.device`, optional): the desired device of returned tensor.
  6337. Default: if ``None``, uses the current device for the default tensor type
  6338. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  6339. for CPU tensor types and the current CUDA device for CUDA tensor types.
  6340. requires_grad (bool, optional): If autograd should record operations on the
  6341. returned tensor. Default: ``False``.
  6342. pin_memory (bool, optional): If set, returned tensor would be allocated in
  6343. the pinned memory. Works only for CPU tensors. Default: ``False``.
  6344. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  6345. returned Tensor. Default: ``torch.contiguous_format``.
  6346. Example::
  6347. >>> torch.empty((2,3), dtype=torch.int64)
  6348. tensor([[ 9.4064e+13, 2.8000e+01, 9.3493e+13],
  6349. [ 7.5751e+18, 7.1428e+18, 7.5955e+18]])
  6350. """
  6351. ...
  6352. @overload
  6353. def empty(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  6354. r"""
  6355. empty(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format) -> Tensor
  6356. Returns a tensor filled with uninitialized data. The shape of the tensor is
  6357. defined by the variable argument :attr:`size`.
  6358. .. note::
  6359. If :func:`torch.use_deterministic_algorithms()` and
  6360. :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
  6361. ``True``, the output tensor is initialized to prevent any possible
  6362. nondeterministic behavior from using the data as an input to an operation.
  6363. Floating point and complex tensors are filled with NaN, and integer tensors
  6364. are filled with the maximum value.
  6365. Args:
  6366. size (int...): a sequence of integers defining the shape of the output tensor.
  6367. Can be a variable number of arguments or a collection like a list or tuple.
  6368. Keyword args:
  6369. out (Tensor, optional): the output tensor.
  6370. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  6371. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  6372. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  6373. Default: ``torch.strided``.
  6374. device (:class:`torch.device`, optional): the desired device of returned tensor.
  6375. Default: if ``None``, uses the current device for the default tensor type
  6376. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  6377. for CPU tensor types and the current CUDA device for CUDA tensor types.
  6378. requires_grad (bool, optional): If autograd should record operations on the
  6379. returned tensor. Default: ``False``.
  6380. pin_memory (bool, optional): If set, returned tensor would be allocated in
  6381. the pinned memory. Works only for CPU tensors. Default: ``False``.
  6382. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  6383. returned Tensor. Default: ``torch.contiguous_format``.
  6384. Example::
  6385. >>> torch.empty((2,3), dtype=torch.int64)
  6386. tensor([[ 9.4064e+13, 2.8000e+01, 9.3493e+13],
  6387. [ 7.5751e+18, 7.1428e+18, 7.5955e+18]])
  6388. """
  6389. ...
  6390. def empty_like(input: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  6391. r"""
  6392. empty_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
  6393. Returns an uninitialized tensor with the same size as :attr:`input`.
  6394. ``torch.empty_like(input)`` is equivalent to
  6395. ``torch.empty(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
  6396. .. note::
  6397. If :func:`torch.use_deterministic_algorithms()` and
  6398. :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
  6399. ``True``, the output tensor is initialized to prevent any possible
  6400. nondeterministic behavior from using the data as an input to an operation.
  6401. Floating point and complex tensors are filled with NaN, and integer tensors
  6402. are filled with the maximum value.
  6403. Args:
  6404. input (Tensor): the size of :attr:`input` will determine size of the output tensor.
  6405. Keyword args:
  6406. dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
  6407. Default: if ``None``, defaults to the dtype of :attr:`input`.
  6408. layout (:class:`torch.layout`, optional): the desired layout of returned tensor.
  6409. Default: if ``None``, defaults to the layout of :attr:`input`.
  6410. device (:class:`torch.device`, optional): the desired device of returned tensor.
  6411. Default: if ``None``, defaults to the device of :attr:`input`.
  6412. requires_grad (bool, optional): If autograd should record operations on the
  6413. returned tensor. Default: ``False``.
  6414. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  6415. returned Tensor. Default: ``torch.preserve_format``.
  6416. Example::
  6417. >>> a=torch.empty((2,3), dtype=torch.int32, device = 'cuda')
  6418. >>> torch.empty_like(a)
  6419. tensor([[0, 0, 0],
  6420. [0, 0, 0]], device='cuda:0', dtype=torch.int32)
  6421. """
  6422. ...
  6423. def empty_permuted(size: Sequence[Union[_int, SymInt]], physical_layout: _size, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  6424. r"""
  6425. empty_permuted(size, physical_layout, *, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) -> Tensor
  6426. Creates an uninitialized, non-overlapping and dense tensor with the
  6427. specified :attr:`size`, with :attr:`physical_layout` specifying how the
  6428. dimensions are physically laid out in memory (each logical dimension is listed
  6429. from outermost to innermost). :attr:`physical_layout` is a generalization
  6430. of NCHW/NHWC notation: if each dimension is assigned a number according to
  6431. what order they occur in size (N=0, C=1, H=2, W=3), then NCHW is ``(0, 1, 2, 3)``
  6432. while NHWC is ``(0, 2, 3, 1)``. Equivalently, the strides of the output
  6433. tensor ``t`` are such that ``t.stride(physical_layout[i]) == contiguous_strides[i]``
  6434. (notably, this function is *not* equivalent to ``torch.empty(size).permute(physical_layout)``).
  6435. Unlike :func:`torch.empty_strided`, this is guaranteed to produce a dense
  6436. tensor with no overlaps. If possible, prefer using this function over
  6437. :func:`torch.empty_strided` or manual use of :func:`torch.as_strided`.
  6438. .. note::
  6439. If :func:`torch.use_deterministic_algorithms()` and
  6440. :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
  6441. ``True``, the output tensor is initialized to prevent any possible
  6442. nondeterministic behavior from using the data as an input to an operation.
  6443. Floating point and complex tensors are filled with NaN, and integer tensors
  6444. are filled with the maximum value.
  6445. Args:
  6446. size (tuple of int): the shape of the output tensor
  6447. physical_layout (tuple of int): the ordering of dimensions physically in memory
  6448. Keyword args:
  6449. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  6450. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  6451. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  6452. Default: ``torch.strided``.
  6453. device (:class:`torch.device`, optional): the desired device of returned tensor.
  6454. Default: if ``None``, uses the current device for the default tensor type
  6455. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  6456. for CPU tensor types and the current CUDA device for CUDA tensor types.
  6457. requires_grad (bool, optional): If autograd should record operations on the
  6458. returned tensor. Default: ``False``.
  6459. pin_memory (bool, optional): If set, returned tensor would be allocated in
  6460. the pinned memory. Works only for CPU tensors. Default: ``False``.
  6461. Examples:
  6462. >>> torch.empty((2, 3, 5, 7)).stride()
  6463. (105, 35, 7, 1)
  6464. >>> torch.empty_permuted((2, 3, 5, 7), (0, 1, 2, 3)).stride()
  6465. (105, 35, 7, 1)
  6466. >>> torch.empty((2, 3, 5, 7), memory_format=torch.channels_last).stride()
  6467. (105, 1, 21, 3)
  6468. >>> torch.empty_permuted((2, 3, 5, 7), (0, 2, 3, 1)).stride()
  6469. (105, 1, 21, 3)
  6470. >>> torch.empty_permuted((2, 3, 5, 7), (0, 2, 3, 1)).dim_order()
  6471. (0, 2, 3, 1)
  6472. """
  6473. ...
  6474. def empty_quantized(size: _size, qtensor: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
  6475. def empty_strided(size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  6476. r"""
  6477. empty_strided(size, stride, *, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) -> Tensor
  6478. Creates a tensor with the specified :attr:`size` and :attr:`stride` and filled with undefined data.
  6479. .. warning::
  6480. If the constructed tensor is "overlapped" (with multiple indices referring to the same element
  6481. in memory) its behavior is undefined.
  6482. .. note::
  6483. If :func:`torch.use_deterministic_algorithms()` and
  6484. :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
  6485. ``True``, the output tensor is initialized to prevent any possible
  6486. nondeterministic behavior from using the data as an input to an operation.
  6487. Floating point and complex tensors are filled with NaN, and integer tensors
  6488. are filled with the maximum value.
  6489. Args:
  6490. size (tuple of int): the shape of the output tensor
  6491. stride (tuple of int): the strides of the output tensor
  6492. Keyword args:
  6493. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  6494. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  6495. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  6496. Default: ``torch.strided``.
  6497. device (:class:`torch.device`, optional): the desired device of returned tensor.
  6498. Default: if ``None``, uses the current device for the default tensor type
  6499. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  6500. for CPU tensor types and the current CUDA device for CUDA tensor types.
  6501. requires_grad (bool, optional): If autograd should record operations on the
  6502. returned tensor. Default: ``False``.
  6503. pin_memory (bool, optional): If set, returned tensor would be allocated in
  6504. the pinned memory. Works only for CPU tensors. Default: ``False``.
  6505. Example::
  6506. >>> a = torch.empty_strided((2, 3), (1, 2))
  6507. >>> a
  6508. tensor([[8.9683e-44, 4.4842e-44, 5.1239e+07],
  6509. [0.0000e+00, 0.0000e+00, 3.0705e-41]])
  6510. >>> a.stride()
  6511. (1, 2)
  6512. >>> a.size()
  6513. torch.Size([2, 3])
  6514. """
  6515. ...
  6516. @overload
  6517. def eq(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  6518. r"""
  6519. eq(input, other, *, out=None) -> Tensor
  6520. Computes element-wise equality
  6521. The second argument can be a number or a tensor whose shape is
  6522. :ref:`broadcastable <broadcasting-semantics>` with the first argument.
  6523. Args:
  6524. input (Tensor): the tensor to compare
  6525. other (Tensor or float): the tensor or value to compare
  6526. Keyword args:
  6527. out (Tensor, optional): the output tensor.
  6528. Returns:
  6529. A boolean tensor that is True where :attr:`input` is equal to :attr:`other` and False elsewhere
  6530. Example::
  6531. >>> torch.eq(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
  6532. tensor([[ True, False],
  6533. [False, True]])
  6534. """
  6535. ...
  6536. @overload
  6537. def eq(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
  6538. r"""
  6539. eq(input, other, *, out=None) -> Tensor
  6540. Computes element-wise equality
  6541. The second argument can be a number or a tensor whose shape is
  6542. :ref:`broadcastable <broadcasting-semantics>` with the first argument.
  6543. Args:
  6544. input (Tensor): the tensor to compare
  6545. other (Tensor or float): the tensor or value to compare
  6546. Keyword args:
  6547. out (Tensor, optional): the output tensor.
  6548. Returns:
  6549. A boolean tensor that is True where :attr:`input` is equal to :attr:`other` and False elsewhere
  6550. Example::
  6551. >>> torch.eq(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
  6552. tensor([[ True, False],
  6553. [False, True]])
  6554. """
  6555. ...
  6556. def equal(input: Tensor, other: Tensor) -> _bool:
  6557. r"""
  6558. equal(input, other) -> bool
  6559. ``True`` if two tensors have the same size and elements, ``False`` otherwise.
  6560. Example::
  6561. >>> torch.equal(torch.tensor([1, 2]), torch.tensor([1, 2]))
  6562. True
  6563. """
  6564. ...
  6565. def erf(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  6566. r"""
  6567. erf(input, *, out=None) -> Tensor
  6568. Alias for :func:`torch.special.erf`.
  6569. """
  6570. ...
  6571. def erf_(input: Tensor) -> Tensor: ...
  6572. def erfc(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  6573. r"""
  6574. erfc(input, *, out=None) -> Tensor
  6575. Alias for :func:`torch.special.erfc`.
  6576. """
  6577. ...
  6578. def erfc_(input: Tensor) -> Tensor: ...
  6579. def erfinv(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  6580. r"""
  6581. erfinv(input, *, out=None) -> Tensor
  6582. Alias for :func:`torch.special.erfinv`.
  6583. """
  6584. ...
  6585. def exp(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  6586. r"""
  6587. exp(input, *, out=None) -> Tensor
  6588. Returns a new tensor with the exponential of the elements
  6589. of the input tensor :attr:`input`.
  6590. .. math::
  6591. y_{i} = e^{x_{i}}
  6592. Args:
  6593. input (Tensor): the input tensor.
  6594. Keyword args:
  6595. out (Tensor, optional): the output tensor.
  6596. Example::
  6597. >>> torch.exp(torch.tensor([0, math.log(2.)]))
  6598. tensor([ 1., 2.])
  6599. """
  6600. ...
  6601. def exp2(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  6602. r"""
  6603. exp2(input, *, out=None) -> Tensor
  6604. Alias for :func:`torch.special.exp2`.
  6605. """
  6606. ...
  6607. def exp2_(input: Tensor) -> Tensor: ...
  6608. def exp_(input: Tensor) -> Tensor: ...
  6609. def expand_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], *, implicit: _bool = False, out: Optional[Tensor] = None) -> Tensor:
  6610. r"""
  6611. Performs the same operation as :func:`torch.expand`, but all output tensors
  6612. are freshly created instead of aliasing the input.
  6613. """
  6614. ...
  6615. def expm1(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  6616. r"""
  6617. expm1(input, *, out=None) -> Tensor
  6618. Alias for :func:`torch.special.expm1`.
  6619. """
  6620. ...
  6621. def expm1_(input: Tensor) -> Tensor: ...
  6622. @overload
  6623. def eye(n: Union[_int, SymInt], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  6624. r"""
  6625. eye(n, m=None, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  6626. Returns a 2-D tensor with ones on the diagonal and zeros elsewhere.
  6627. Args:
  6628. n (int): the number of rows
  6629. m (int, optional): the number of columns with default being :attr:`n`
  6630. Keyword arguments:
  6631. out (Tensor, optional): the output tensor.
  6632. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  6633. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  6634. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  6635. Default: ``torch.strided``.
  6636. device (:class:`torch.device`, optional): the desired device of returned tensor.
  6637. Default: if ``None``, uses the current device for the default tensor type
  6638. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  6639. for CPU tensor types and the current CUDA device for CUDA tensor types.
  6640. requires_grad (bool, optional): If autograd should record operations on the
  6641. returned tensor. Default: ``False``.
  6642. Returns:
  6643. Tensor: A 2-D tensor with ones on the diagonal and zeros elsewhere
  6644. Example::
  6645. >>> torch.eye(3)
  6646. tensor([[ 1., 0., 0.],
  6647. [ 0., 1., 0.],
  6648. [ 0., 0., 1.]])
  6649. """
  6650. ...
  6651. @overload
  6652. def eye(n: Union[_int, SymInt], m: Union[_int, SymInt], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  6653. r"""
  6654. eye(n, m=None, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  6655. Returns a 2-D tensor with ones on the diagonal and zeros elsewhere.
  6656. Args:
  6657. n (int): the number of rows
  6658. m (int, optional): the number of columns with default being :attr:`n`
  6659. Keyword arguments:
  6660. out (Tensor, optional): the output tensor.
  6661. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  6662. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  6663. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  6664. Default: ``torch.strided``.
  6665. device (:class:`torch.device`, optional): the desired device of returned tensor.
  6666. Default: if ``None``, uses the current device for the default tensor type
  6667. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  6668. for CPU tensor types and the current CUDA device for CUDA tensor types.
  6669. requires_grad (bool, optional): If autograd should record operations on the
  6670. returned tensor. Default: ``False``.
  6671. Returns:
  6672. Tensor: A 2-D tensor with ones on the diagonal and zeros elsewhere
  6673. Example::
  6674. >>> torch.eye(3)
  6675. tensor([[ 1., 0., 0.],
  6676. [ 0., 1., 0.],
  6677. [ 0., 0., 1.]])
  6678. """
  6679. ...
  6680. def fake_quantize_per_channel_affine(input: Tensor, scale: Tensor, zero_point: Tensor, axis: _int, quant_min: _int, quant_max: _int) -> Tensor:
  6681. r"""
  6682. fake_quantize_per_channel_affine(input, scale, zero_point, axis, quant_min, quant_max) -> Tensor
  6683. Returns a new tensor with the data in :attr:`input` fake quantized per channel using :attr:`scale`,
  6684. :attr:`zero_point`, :attr:`quant_min` and :attr:`quant_max`, across the channel specified by :attr:`axis`.
  6685. .. math::
  6686. \text{output} = (
  6687. min(
  6688. \text{quant\_max},
  6689. max(
  6690. \text{quant\_min},
  6691. \text{std::nearby\_int}(\text{input} / \text{scale}) + \text{zero\_point}
  6692. )
  6693. ) - \text{zero\_point}
  6694. ) \times \text{scale}
  6695. Args:
  6696. input (Tensor): the input value(s), in ``torch.float32``
  6697. scale (Tensor): quantization scale, per channel in ``torch.float32``
  6698. zero_point (Tensor): quantization zero_point, per channel in ``torch.int32`` or ``torch.half`` or ``torch.float32``
  6699. axis (int32): channel axis
  6700. quant_min (int64): lower bound of the quantized domain
  6701. quant_max (int64): upper bound of the quantized domain
  6702. Returns:
  6703. Tensor: A newly fake_quantized per channel ``torch.float32`` tensor
  6704. Example::
  6705. >>> x = torch.randn(2, 2, 2)
  6706. >>> x
  6707. tensor([[[-0.2525, -0.0466],
  6708. [ 0.3491, -0.2168]],
  6709. [[-0.5906, 1.6258],
  6710. [ 0.6444, -0.0542]]])
  6711. >>> scales = (torch.randn(2) + 1) * 0.05
  6712. >>> scales
  6713. tensor([0.0475, 0.0486])
  6714. >>> zero_points = torch.zeros(2).to(torch.int32)
  6715. >>> zero_points
  6716. tensor([0, 0])
  6717. >>> torch.fake_quantize_per_channel_affine(x, scales, zero_points, 1, 0, 255)
  6718. tensor([[[0.0000, 0.0000],
  6719. [0.3405, 0.0000]],
  6720. [[0.0000, 1.6134],
  6721. [0.6323, 0.0000]]])
  6722. """
  6723. ...
  6724. @overload
  6725. def fake_quantize_per_tensor_affine(input: Tensor, scale: _float, zero_point: _int, quant_min: _int, quant_max: _int) -> Tensor:
  6726. r"""
  6727. fake_quantize_per_tensor_affine(input, scale, zero_point, quant_min, quant_max) -> Tensor
  6728. Returns a new tensor with the data in :attr:`input` fake quantized using :attr:`scale`,
  6729. :attr:`zero_point`, :attr:`quant_min` and :attr:`quant_max`.
  6730. .. math::
  6731. \text{output} = (
  6732. min(
  6733. \text{quant\_max},
  6734. max(
  6735. \text{quant\_min},
  6736. \text{std::nearby\_int}(\text{input} / \text{scale}) + \text{zero\_point}
  6737. )
  6738. ) - \text{zero\_point}
  6739. ) \times \text{scale}
  6740. Args:
  6741. input (Tensor): the input value(s), ``torch.float32`` tensor
  6742. scale (double scalar or ``float32`` Tensor): quantization scale
  6743. zero_point (int64 scalar or ``int32`` Tensor): quantization zero_point
  6744. quant_min (int64): lower bound of the quantized domain
  6745. quant_max (int64): upper bound of the quantized domain
  6746. Returns:
  6747. Tensor: A newly fake_quantized ``torch.float32`` tensor
  6748. Example::
  6749. >>> x = torch.randn(4)
  6750. >>> x
  6751. tensor([ 0.0552, 0.9730, 0.3973, -1.0780])
  6752. >>> torch.fake_quantize_per_tensor_affine(x, 0.1, 0, 0, 255)
  6753. tensor([0.1000, 1.0000, 0.4000, 0.0000])
  6754. >>> torch.fake_quantize_per_tensor_affine(x, torch.tensor(0.1), torch.tensor(0), 0, 255)
  6755. tensor([0.1000, 1.0000, 0.4000, 0.0000])
  6756. """
  6757. ...
  6758. @overload
  6759. def fake_quantize_per_tensor_affine(input: Tensor, scale: Tensor, zero_point: Tensor, quant_min: _int, quant_max: _int) -> Tensor:
  6760. r"""
  6761. fake_quantize_per_tensor_affine(input, scale, zero_point, quant_min, quant_max) -> Tensor
  6762. Returns a new tensor with the data in :attr:`input` fake quantized using :attr:`scale`,
  6763. :attr:`zero_point`, :attr:`quant_min` and :attr:`quant_max`.
  6764. .. math::
  6765. \text{output} = (
  6766. min(
  6767. \text{quant\_max},
  6768. max(
  6769. \text{quant\_min},
  6770. \text{std::nearby\_int}(\text{input} / \text{scale}) + \text{zero\_point}
  6771. )
  6772. ) - \text{zero\_point}
  6773. ) \times \text{scale}
  6774. Args:
  6775. input (Tensor): the input value(s), ``torch.float32`` tensor
  6776. scale (double scalar or ``float32`` Tensor): quantization scale
  6777. zero_point (int64 scalar or ``int32`` Tensor): quantization zero_point
  6778. quant_min (int64): lower bound of the quantized domain
  6779. quant_max (int64): upper bound of the quantized domain
  6780. Returns:
  6781. Tensor: A newly fake_quantized ``torch.float32`` tensor
  6782. Example::
  6783. >>> x = torch.randn(4)
  6784. >>> x
  6785. tensor([ 0.0552, 0.9730, 0.3973, -1.0780])
  6786. >>> torch.fake_quantize_per_tensor_affine(x, 0.1, 0, 0, 255)
  6787. tensor([0.1000, 1.0000, 0.4000, 0.0000])
  6788. >>> torch.fake_quantize_per_tensor_affine(x, torch.tensor(0.1), torch.tensor(0), 0, 255)
  6789. tensor([0.1000, 1.0000, 0.4000, 0.0000])
  6790. """
  6791. ...
  6792. def fbgemm_linear_fp16_weight(input: Tensor, packed_weight: Tensor, bias: Tensor) -> Tensor: ...
  6793. def fbgemm_linear_fp16_weight_fp32_activation(input: Tensor, packed_weight: Tensor, bias: Tensor) -> Tensor: ...
  6794. def fbgemm_linear_int8_weight(input: Tensor, weight: Tensor, packed: Tensor, col_offsets: Tensor, weight_scale: Union[Number, _complex], weight_zero_point: Union[Number, _complex], bias: Tensor) -> Tensor: ...
  6795. def fbgemm_linear_int8_weight_fp32_activation(input: Tensor, weight: Tensor, packed: Tensor, col_offsets: Tensor, weight_scale: Union[Number, _complex], weight_zero_point: Union[Number, _complex], bias: Tensor) -> Tensor: ...
  6796. def fbgemm_linear_quantize_weight(input: Tensor) -> Tuple[Tensor, Tensor, _float, _int]: ...
  6797. def fbgemm_pack_gemm_matrix_fp16(input: Tensor) -> Tensor: ...
  6798. @overload
  6799. def fbgemm_pack_quantized_matrix(input: Tensor) -> Tensor: ...
  6800. @overload
  6801. def fbgemm_pack_quantized_matrix(input: Tensor, K: _int, N: _int) -> Tensor: ...
  6802. def feature_alpha_dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ...
  6803. def feature_alpha_dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ...
  6804. def feature_dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ...
  6805. def feature_dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ...
  6806. @overload
  6807. def fill(input: Tensor, value: Tensor) -> Tensor: ...
  6808. @overload
  6809. def fill(input: Tensor, value: Union[Number, _complex]) -> Tensor: ...
  6810. @overload
  6811. def fill_(input: Tensor, value: Tensor) -> Tensor: ...
  6812. @overload
  6813. def fill_(input: Tensor, value: Union[Number, _complex]) -> Tensor: ...
  6814. def fix(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  6815. r"""
  6816. fix(input, *, out=None) -> Tensor
  6817. Alias for :func:`torch.trunc`
  6818. """
  6819. ...
  6820. def fix_(input: Tensor) -> Tensor: ...
  6821. @overload
  6822. def flatten(input: Tensor, start_dim: _int = 0, end_dim: _int = -1) -> Tensor:
  6823. r"""
  6824. flatten(input, start_dim=0, end_dim=-1) -> Tensor
  6825. Flattens :attr:`input` by reshaping it into a one-dimensional tensor. If :attr:`start_dim` or :attr:`end_dim`
  6826. are passed, only dimensions starting with :attr:`start_dim` and ending with :attr:`end_dim` are flattened.
  6827. The order of elements in :attr:`input` is unchanged.
  6828. Unlike NumPy's flatten, which always copies input's data, this function may return the original object, a view,
  6829. or copy. If no dimensions are flattened, then the original object :attr:`input` is returned. Otherwise, if input can
  6830. be viewed as the flattened shape, then that view is returned. Finally, only if the input cannot be viewed as the
  6831. flattened shape is input's data copied. See :meth:`torch.Tensor.view` for details on when a view will be returned.
  6832. .. note::
  6833. Flattening a zero-dimensional tensor will return a one-dimensional view.
  6834. Args:
  6835. input (Tensor): the input tensor.
  6836. start_dim (int): the first dim to flatten
  6837. end_dim (int): the last dim to flatten
  6838. Example::
  6839. >>> t = torch.tensor([[[1, 2],
  6840. ... [3, 4]],
  6841. ... [[5, 6],
  6842. ... [7, 8]]])
  6843. >>> torch.flatten(t)
  6844. tensor([1, 2, 3, 4, 5, 6, 7, 8])
  6845. >>> torch.flatten(t, start_dim=1)
  6846. tensor([[1, 2, 3, 4],
  6847. [5, 6, 7, 8]])
  6848. """
  6849. ...
  6850. @overload
  6851. def flatten(input: Tensor, start_dim: _int, end_dim: _int, out_dim: Union[str, ellipsis, None]) -> Tensor:
  6852. r"""
  6853. flatten(input, start_dim=0, end_dim=-1) -> Tensor
  6854. Flattens :attr:`input` by reshaping it into a one-dimensional tensor. If :attr:`start_dim` or :attr:`end_dim`
  6855. are passed, only dimensions starting with :attr:`start_dim` and ending with :attr:`end_dim` are flattened.
  6856. The order of elements in :attr:`input` is unchanged.
  6857. Unlike NumPy's flatten, which always copies input's data, this function may return the original object, a view,
  6858. or copy. If no dimensions are flattened, then the original object :attr:`input` is returned. Otherwise, if input can
  6859. be viewed as the flattened shape, then that view is returned. Finally, only if the input cannot be viewed as the
  6860. flattened shape is input's data copied. See :meth:`torch.Tensor.view` for details on when a view will be returned.
  6861. .. note::
  6862. Flattening a zero-dimensional tensor will return a one-dimensional view.
  6863. Args:
  6864. input (Tensor): the input tensor.
  6865. start_dim (int): the first dim to flatten
  6866. end_dim (int): the last dim to flatten
  6867. Example::
  6868. >>> t = torch.tensor([[[1, 2],
  6869. ... [3, 4]],
  6870. ... [[5, 6],
  6871. ... [7, 8]]])
  6872. >>> torch.flatten(t)
  6873. tensor([1, 2, 3, 4, 5, 6, 7, 8])
  6874. >>> torch.flatten(t, start_dim=1)
  6875. tensor([[1, 2, 3, 4],
  6876. [5, 6, 7, 8]])
  6877. """
  6878. ...
  6879. @overload
  6880. def flatten(input: Tensor, start_dim: Union[str, ellipsis, None], end_dim: Union[str, ellipsis, None], out_dim: Union[str, ellipsis, None]) -> Tensor:
  6881. r"""
  6882. flatten(input, start_dim=0, end_dim=-1) -> Tensor
  6883. Flattens :attr:`input` by reshaping it into a one-dimensional tensor. If :attr:`start_dim` or :attr:`end_dim`
  6884. are passed, only dimensions starting with :attr:`start_dim` and ending with :attr:`end_dim` are flattened.
  6885. The order of elements in :attr:`input` is unchanged.
  6886. Unlike NumPy's flatten, which always copies input's data, this function may return the original object, a view,
  6887. or copy. If no dimensions are flattened, then the original object :attr:`input` is returned. Otherwise, if input can
  6888. be viewed as the flattened shape, then that view is returned. Finally, only if the input cannot be viewed as the
  6889. flattened shape is input's data copied. See :meth:`torch.Tensor.view` for details on when a view will be returned.
  6890. .. note::
  6891. Flattening a zero-dimensional tensor will return a one-dimensional view.
  6892. Args:
  6893. input (Tensor): the input tensor.
  6894. start_dim (int): the first dim to flatten
  6895. end_dim (int): the last dim to flatten
  6896. Example::
  6897. >>> t = torch.tensor([[[1, 2],
  6898. ... [3, 4]],
  6899. ... [[5, 6],
  6900. ... [7, 8]]])
  6901. >>> torch.flatten(t)
  6902. tensor([1, 2, 3, 4, 5, 6, 7, 8])
  6903. >>> torch.flatten(t, start_dim=1)
  6904. tensor([[1, 2, 3, 4],
  6905. [5, 6, 7, 8]])
  6906. """
  6907. ...
  6908. @overload
  6909. def flatten(input: Tensor, dims: Sequence[Union[str, ellipsis, None]], out_dim: Union[str, ellipsis, None]) -> Tensor:
  6910. r"""
  6911. flatten(input, start_dim=0, end_dim=-1) -> Tensor
  6912. Flattens :attr:`input` by reshaping it into a one-dimensional tensor. If :attr:`start_dim` or :attr:`end_dim`
  6913. are passed, only dimensions starting with :attr:`start_dim` and ending with :attr:`end_dim` are flattened.
  6914. The order of elements in :attr:`input` is unchanged.
  6915. Unlike NumPy's flatten, which always copies input's data, this function may return the original object, a view,
  6916. or copy. If no dimensions are flattened, then the original object :attr:`input` is returned. Otherwise, if input can
  6917. be viewed as the flattened shape, then that view is returned. Finally, only if the input cannot be viewed as the
  6918. flattened shape is input's data copied. See :meth:`torch.Tensor.view` for details on when a view will be returned.
  6919. .. note::
  6920. Flattening a zero-dimensional tensor will return a one-dimensional view.
  6921. Args:
  6922. input (Tensor): the input tensor.
  6923. start_dim (int): the first dim to flatten
  6924. end_dim (int): the last dim to flatten
  6925. Example::
  6926. >>> t = torch.tensor([[[1, 2],
  6927. ... [3, 4]],
  6928. ... [[5, 6],
  6929. ... [7, 8]]])
  6930. >>> torch.flatten(t)
  6931. tensor([1, 2, 3, 4, 5, 6, 7, 8])
  6932. >>> torch.flatten(t, start_dim=1)
  6933. tensor([[1, 2, 3, 4],
  6934. [5, 6, 7, 8]])
  6935. """
  6936. ...
  6937. def flip(input: Tensor, dims: _size) -> Tensor:
  6938. r"""
  6939. flip(input, dims) -> Tensor
  6940. Reverse the order of an n-D tensor along given axis in dims.
  6941. .. note::
  6942. `torch.flip` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.flip`,
  6943. which returns a view in constant time. Since copying a tensor's data is more work than viewing that data,
  6944. `torch.flip` is expected to be slower than `np.flip`.
  6945. Args:
  6946. input (Tensor): the input tensor.
  6947. dims (a list or tuple): axis to flip on
  6948. Example::
  6949. >>> x = torch.arange(8).view(2, 2, 2)
  6950. >>> x
  6951. tensor([[[ 0, 1],
  6952. [ 2, 3]],
  6953. [[ 4, 5],
  6954. [ 6, 7]]])
  6955. >>> torch.flip(x, [0, 1])
  6956. tensor([[[ 6, 7],
  6957. [ 4, 5]],
  6958. [[ 2, 3],
  6959. [ 0, 1]]])
  6960. """
  6961. ...
  6962. def fliplr(input: Tensor) -> Tensor:
  6963. r"""
  6964. fliplr(input) -> Tensor
  6965. Flip tensor in the left/right direction, returning a new tensor.
  6966. Flip the entries in each row in the left/right direction.
  6967. Columns are preserved, but appear in a different order than before.
  6968. Note:
  6969. Requires the tensor to be at least 2-D.
  6970. .. note::
  6971. `torch.fliplr` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.fliplr`,
  6972. which returns a view in constant time. Since copying a tensor's data is more work than viewing that data,
  6973. `torch.fliplr` is expected to be slower than `np.fliplr`.
  6974. Args:
  6975. input (Tensor): Must be at least 2-dimensional.
  6976. Example::
  6977. >>> x = torch.arange(4).view(2, 2)
  6978. >>> x
  6979. tensor([[0, 1],
  6980. [2, 3]])
  6981. >>> torch.fliplr(x)
  6982. tensor([[1, 0],
  6983. [3, 2]])
  6984. """
  6985. ...
  6986. def flipud(input: Tensor) -> Tensor:
  6987. r"""
  6988. flipud(input) -> Tensor
  6989. Flip tensor in the up/down direction, returning a new tensor.
  6990. Flip the entries in each column in the up/down direction.
  6991. Rows are preserved, but appear in a different order than before.
  6992. Note:
  6993. Requires the tensor to be at least 1-D.
  6994. .. note::
  6995. `torch.flipud` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.flipud`,
  6996. which returns a view in constant time. Since copying a tensor's data is more work than viewing that data,
  6997. `torch.flipud` is expected to be slower than `np.flipud`.
  6998. Args:
  6999. input (Tensor): Must be at least 1-dimensional.
  7000. Example::
  7001. >>> x = torch.arange(4).view(2, 2)
  7002. >>> x
  7003. tensor([[0, 1],
  7004. [2, 3]])
  7005. >>> torch.flipud(x)
  7006. tensor([[2, 3],
  7007. [0, 1]])
  7008. """
  7009. ...
  7010. @overload
  7011. def float_power(input: Tensor, exponent: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  7012. r"""
  7013. float_power(input, exponent, *, out=None) -> Tensor
  7014. Raises :attr:`input` to the power of :attr:`exponent`, elementwise, in double precision.
  7015. If neither input is complex returns a ``torch.float64`` tensor,
  7016. and if one or more inputs is complex returns a ``torch.complex128`` tensor.
  7017. .. note::
  7018. This function always computes in double precision, unlike :func:`torch.pow`,
  7019. which implements more typical :ref:`type promotion <type-promotion-doc>`.
  7020. This is useful when the computation needs to be performed in a wider or more precise dtype,
  7021. or the results of the computation may contain fractional values not representable in the input dtypes,
  7022. like when an integer base is raised to a negative integer exponent.
  7023. Args:
  7024. input (Tensor or Number): the base value(s)
  7025. exponent (Tensor or Number): the exponent value(s)
  7026. Keyword args:
  7027. out (Tensor, optional): the output tensor.
  7028. Example::
  7029. >>> a = torch.randint(10, (4,))
  7030. >>> a
  7031. tensor([6, 4, 7, 1])
  7032. >>> torch.float_power(a, 2)
  7033. tensor([36., 16., 49., 1.], dtype=torch.float64)
  7034. >>> a = torch.arange(1, 5)
  7035. >>> a
  7036. tensor([ 1, 2, 3, 4])
  7037. >>> exp = torch.tensor([2, -3, 4, -5])
  7038. >>> exp
  7039. tensor([ 2, -3, 4, -5])
  7040. >>> torch.float_power(a, exp)
  7041. tensor([1.0000e+00, 1.2500e-01, 8.1000e+01, 9.7656e-04], dtype=torch.float64)
  7042. """
  7043. ...
  7044. @overload
  7045. def float_power(self: Union[Number, _complex], exponent: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  7046. r"""
  7047. float_power(input, exponent, *, out=None) -> Tensor
  7048. Raises :attr:`input` to the power of :attr:`exponent`, elementwise, in double precision.
  7049. If neither input is complex returns a ``torch.float64`` tensor,
  7050. and if one or more inputs is complex returns a ``torch.complex128`` tensor.
  7051. .. note::
  7052. This function always computes in double precision, unlike :func:`torch.pow`,
  7053. which implements more typical :ref:`type promotion <type-promotion-doc>`.
  7054. This is useful when the computation needs to be performed in a wider or more precise dtype,
  7055. or the results of the computation may contain fractional values not representable in the input dtypes,
  7056. like when an integer base is raised to a negative integer exponent.
  7057. Args:
  7058. input (Tensor or Number): the base value(s)
  7059. exponent (Tensor or Number): the exponent value(s)
  7060. Keyword args:
  7061. out (Tensor, optional): the output tensor.
  7062. Example::
  7063. >>> a = torch.randint(10, (4,))
  7064. >>> a
  7065. tensor([6, 4, 7, 1])
  7066. >>> torch.float_power(a, 2)
  7067. tensor([36., 16., 49., 1.], dtype=torch.float64)
  7068. >>> a = torch.arange(1, 5)
  7069. >>> a
  7070. tensor([ 1, 2, 3, 4])
  7071. >>> exp = torch.tensor([2, -3, 4, -5])
  7072. >>> exp
  7073. tensor([ 2, -3, 4, -5])
  7074. >>> torch.float_power(a, exp)
  7075. tensor([1.0000e+00, 1.2500e-01, 8.1000e+01, 9.7656e-04], dtype=torch.float64)
  7076. """
  7077. ...
  7078. @overload
  7079. def float_power(input: Tensor, exponent: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
  7080. r"""
  7081. float_power(input, exponent, *, out=None) -> Tensor
  7082. Raises :attr:`input` to the power of :attr:`exponent`, elementwise, in double precision.
  7083. If neither input is complex returns a ``torch.float64`` tensor,
  7084. and if one or more inputs is complex returns a ``torch.complex128`` tensor.
  7085. .. note::
  7086. This function always computes in double precision, unlike :func:`torch.pow`,
  7087. which implements more typical :ref:`type promotion <type-promotion-doc>`.
  7088. This is useful when the computation needs to be performed in a wider or more precise dtype,
  7089. or the results of the computation may contain fractional values not representable in the input dtypes,
  7090. like when an integer base is raised to a negative integer exponent.
  7091. Args:
  7092. input (Tensor or Number): the base value(s)
  7093. exponent (Tensor or Number): the exponent value(s)
  7094. Keyword args:
  7095. out (Tensor, optional): the output tensor.
  7096. Example::
  7097. >>> a = torch.randint(10, (4,))
  7098. >>> a
  7099. tensor([6, 4, 7, 1])
  7100. >>> torch.float_power(a, 2)
  7101. tensor([36., 16., 49., 1.], dtype=torch.float64)
  7102. >>> a = torch.arange(1, 5)
  7103. >>> a
  7104. tensor([ 1, 2, 3, 4])
  7105. >>> exp = torch.tensor([2, -3, 4, -5])
  7106. >>> exp
  7107. tensor([ 2, -3, 4, -5])
  7108. >>> torch.float_power(a, exp)
  7109. tensor([1.0000e+00, 1.2500e-01, 8.1000e+01, 9.7656e-04], dtype=torch.float64)
  7110. """
  7111. ...
  7112. def floor(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  7113. r"""
  7114. floor(input, *, out=None) -> Tensor
  7115. Returns a new tensor with the floor of the elements of :attr:`input`,
  7116. the largest integer less than or equal to each element.
  7117. For integer inputs, follows the array-api convention of returning a
  7118. copy of the input tensor.
  7119. .. math::
  7120. \text{out}_{i} = \left\lfloor \text{input}_{i} \right\rfloor
  7121. Args:
  7122. input (Tensor): the input tensor.
  7123. Keyword args:
  7124. out (Tensor, optional): the output tensor.
  7125. Example::
  7126. >>> a = torch.randn(4)
  7127. >>> a
  7128. tensor([-0.8166, 1.5308, -0.2530, -0.2091])
  7129. >>> torch.floor(a)
  7130. tensor([-1., 1., -1., -1.])
  7131. """
  7132. ...
  7133. def floor_(input: Tensor) -> Tensor: ...
  7134. def floor_divide(input: Union[Tensor, Number], other: Union[Tensor, Number], *, out: Optional[Tensor] = None) -> Tensor:
  7135. r"""
  7136. floor_divide(input, other, *, out=None) -> Tensor
  7137. .. note::
  7138. Before PyTorch 1.13 :func:`torch.floor_divide` incorrectly performed
  7139. truncation division. To restore the previous behavior use
  7140. :func:`torch.div` with ``rounding_mode='trunc'``.
  7141. Computes :attr:`input` divided by :attr:`other`, elementwise, and floors
  7142. the result.
  7143. .. math::
  7144. \text{{out}}_i = \text{floor} \left( \frac{{\text{{input}}_i}}{{\text{{other}}_i}} \right)
  7145. Supports broadcasting to a common shape, type promotion, and integer and float inputs.
  7146. Args:
  7147. input (Tensor or Number): the dividend
  7148. other (Tensor or Number): the divisor
  7149. Keyword args:
  7150. out (Tensor, optional): the output tensor.
  7151. Example::
  7152. >>> a = torch.tensor([4.0, 3.0])
  7153. >>> b = torch.tensor([2.0, 2.0])
  7154. >>> torch.floor_divide(a, b)
  7155. tensor([2.0, 1.0])
  7156. >>> torch.floor_divide(a, 1.4)
  7157. tensor([2.0, 2.0])
  7158. """
  7159. ...
  7160. def fmax(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  7161. r"""
  7162. fmax(input, other, *, out=None) -> Tensor
  7163. Computes the element-wise maximum of :attr:`input` and :attr:`other`.
  7164. This is like :func:`torch.maximum` except it handles NaNs differently:
  7165. if exactly one of the two elements being compared is a NaN then the non-NaN element is taken as the maximum.
  7166. Only if both elements are NaN is NaN propagated.
  7167. This function is a wrapper around C++'s ``std::fmax`` and is similar to NumPy's ``fmax`` function.
  7168. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
  7169. :ref:`type promotion <type-promotion-doc>`, and integer and floating-point inputs.
  7170. Args:
  7171. input (Tensor): the input tensor.
  7172. other (Tensor): the second input tensor
  7173. Keyword args:
  7174. out (Tensor, optional): the output tensor.
  7175. Example::
  7176. >>> a = torch.tensor([9.7, float('nan'), 3.1, float('nan')])
  7177. >>> b = torch.tensor([-2.2, 0.5, float('nan'), float('nan')])
  7178. >>> torch.fmax(a, b)
  7179. tensor([9.7000, 0.5000, 3.1000, nan])
  7180. """
  7181. ...
  7182. def fmin(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  7183. r"""
  7184. fmin(input, other, *, out=None) -> Tensor
  7185. Computes the element-wise minimum of :attr:`input` and :attr:`other`.
  7186. This is like :func:`torch.minimum` except it handles NaNs differently:
  7187. if exactly one of the two elements being compared is a NaN then the non-NaN element is taken as the minimum.
  7188. Only if both elements are NaN is NaN propagated.
  7189. This function is a wrapper around C++'s ``std::fmin`` and is similar to NumPy's ``fmin`` function.
  7190. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
  7191. :ref:`type promotion <type-promotion-doc>`, and integer and floating-point inputs.
  7192. Args:
  7193. input (Tensor): the input tensor.
  7194. other (Tensor): the second input tensor
  7195. Keyword args:
  7196. out (Tensor, optional): the output tensor.
  7197. Example::
  7198. >>> a = torch.tensor([2.2, float('nan'), 2.1, float('nan')])
  7199. >>> b = torch.tensor([-9.3, 0.1, float('nan'), float('nan')])
  7200. >>> torch.fmin(a, b)
  7201. tensor([-9.3000, 0.1000, 2.1000, nan])
  7202. """
  7203. ...
  7204. @overload
  7205. def fmod(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  7206. r"""
  7207. fmod(input, other, *, out=None) -> Tensor
  7208. Applies C++'s `std::fmod <https://en.cppreference.com/w/cpp/numeric/math/fmod>`_ entrywise.
  7209. The result has the same sign as the dividend :attr:`input` and its absolute value
  7210. is less than that of :attr:`other`.
  7211. This function may be defined in terms of :func:`torch.div` as
  7212. .. code:: python
  7213. torch.fmod(a, b) == a - a.div(b, rounding_mode="trunc") * b
  7214. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
  7215. :ref:`type promotion <type-promotion-doc>`, and integer and float inputs.
  7216. .. note::
  7217. When the divisor is zero, returns ``NaN`` for floating point dtypes
  7218. on both CPU and GPU; raises ``RuntimeError`` for integer division by
  7219. zero on CPU; Integer division by zero on GPU may return any value.
  7220. .. note::
  7221. Complex inputs are not supported. In some cases, it is not mathematically
  7222. possible to satisfy the definition of a modulo operation with complex numbers.
  7223. .. seealso::
  7224. :func:`torch.remainder` which implements Python's modulus operator.
  7225. This one is defined using division rounding down the result.
  7226. Args:
  7227. input (Tensor): the dividend
  7228. other (Tensor or Scalar): the divisor
  7229. Keyword args:
  7230. out (Tensor, optional): the output tensor.
  7231. Example::
  7232. >>> torch.fmod(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
  7233. tensor([-1., -0., -1., 1., 0., 1.])
  7234. >>> torch.fmod(torch.tensor([1, 2, 3, 4, 5]), -1.5)
  7235. tensor([1.0000, 0.5000, 0.0000, 1.0000, 0.5000])
  7236. """
  7237. ...
  7238. @overload
  7239. def fmod(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
  7240. r"""
  7241. fmod(input, other, *, out=None) -> Tensor
  7242. Applies C++'s `std::fmod <https://en.cppreference.com/w/cpp/numeric/math/fmod>`_ entrywise.
  7243. The result has the same sign as the dividend :attr:`input` and its absolute value
  7244. is less than that of :attr:`other`.
  7245. This function may be defined in terms of :func:`torch.div` as
  7246. .. code:: python
  7247. torch.fmod(a, b) == a - a.div(b, rounding_mode="trunc") * b
  7248. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
  7249. :ref:`type promotion <type-promotion-doc>`, and integer and float inputs.
  7250. .. note::
  7251. When the divisor is zero, returns ``NaN`` for floating point dtypes
  7252. on both CPU and GPU; raises ``RuntimeError`` for integer division by
  7253. zero on CPU; Integer division by zero on GPU may return any value.
  7254. .. note::
  7255. Complex inputs are not supported. In some cases, it is not mathematically
  7256. possible to satisfy the definition of a modulo operation with complex numbers.
  7257. .. seealso::
  7258. :func:`torch.remainder` which implements Python's modulus operator.
  7259. This one is defined using division rounding down the result.
  7260. Args:
  7261. input (Tensor): the dividend
  7262. other (Tensor or Scalar): the divisor
  7263. Keyword args:
  7264. out (Tensor, optional): the output tensor.
  7265. Example::
  7266. >>> torch.fmod(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
  7267. tensor([-1., -0., -1., 1., 0., 1.])
  7268. >>> torch.fmod(torch.tensor([1, 2, 3, 4, 5]), -1.5)
  7269. tensor([1.0000, 0.5000, 0.0000, 1.0000, 0.5000])
  7270. """
  7271. ...
  7272. def frac(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  7273. r"""
  7274. frac(input, *, out=None) -> Tensor
  7275. Computes the fractional portion of each element in :attr:`input`.
  7276. .. math::
  7277. \text{out}_{i} = \text{input}_{i} - \left\lfloor |\text{input}_{i}| \right\rfloor * \operatorname{sgn}(\text{input}_{i})
  7278. Example::
  7279. >>> torch.frac(torch.tensor([1, 2.5, -3.2]))
  7280. tensor([ 0.0000, 0.5000, -0.2000])
  7281. """
  7282. ...
  7283. def frac_(input: Tensor) -> Tensor: ...
  7284. def frexp(input: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.frexp:
  7285. r"""
  7286. frexp(input, *, out=None) -> (Tensor mantissa, Tensor exponent)
  7287. Decomposes :attr:`input` into mantissa and exponent tensors
  7288. such that :math:`\text{input} = \text{mantissa} \times 2^{\text{exponent}}`.
  7289. The range of mantissa is the open interval (-1, 1).
  7290. Supports float inputs.
  7291. Args:
  7292. input (Tensor): the input tensor
  7293. Keyword args:
  7294. out (tuple, optional): the output tensors
  7295. Example::
  7296. >>> x = torch.arange(9.)
  7297. >>> mantissa, exponent = torch.frexp(x)
  7298. >>> mantissa
  7299. tensor([0.0000, 0.5000, 0.5000, 0.7500, 0.5000, 0.6250, 0.7500, 0.8750, 0.5000])
  7300. >>> exponent
  7301. tensor([0, 1, 2, 2, 3, 3, 3, 3, 4], dtype=torch.int32)
  7302. >>> torch.ldexp(mantissa, exponent)
  7303. tensor([0., 1., 2., 3., 4., 5., 6., 7., 8.])
  7304. """
  7305. ...
  7306. def frobenius_norm(input: Tensor, dim: Union[_int, _size], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ...
  7307. def from_file(filename: str, shared: Optional[_bool] = None, size: Optional[_int] = 0, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  7308. r"""
  7309. from_file(filename, shared=None, size=0, *, dtype=None, layout=None, device=None, pin_memory=False)
  7310. Creates a CPU tensor with a storage backed by a memory-mapped file.
  7311. If ``shared`` is True, then memory is shared between processes. All changes are written to the file.
  7312. If ``shared`` is False, then changes to the tensor do not affect the file.
  7313. ``size`` is the number of elements in the Tensor. If ``shared`` is ``False``, then the file must contain
  7314. at least ``size * sizeof(dtype)`` bytes. If ``shared`` is ``True`` the file will be created if needed.
  7315. .. note::
  7316. Only CPU tensors can be mapped to files.
  7317. .. note::
  7318. For now, tensors with storages backed by a memory-mapped file cannot be created in pinned memory.
  7319. Args:
  7320. filename (str): file name to map
  7321. shared (bool): whether to share memory (whether ``MAP_SHARED`` or ``MAP_PRIVATE`` is passed to the
  7322. underlying `mmap(2) call <https://man7.org/linux/man-pages/man2/mmap.2.html>`_)
  7323. size (int): number of elements in the tensor
  7324. Keyword args:
  7325. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  7326. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  7327. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  7328. Default: ``torch.strided``.
  7329. device (:class:`torch.device`, optional): the desired device of returned tensor.
  7330. Default: if ``None``, uses the current device for the default tensor type
  7331. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  7332. for CPU tensor types and the current CUDA device for CUDA tensor types.
  7333. pin_memory (bool, optional): If set, returned tensor would be allocated in
  7334. the pinned memory. Works only for CPU tensors. Default: ``False``.
  7335. Example::
  7336. >>> t = torch.randn(2, 5, dtype=torch.float64)
  7337. >>> t.numpy().tofile('storage.pt')
  7338. >>> t_mapped = torch.from_file('storage.pt', shared=False, size=10, dtype=torch.float64)
  7339. """
  7340. ...
  7341. def from_numpy(ndarray) -> Tensor:
  7342. r"""
  7343. from_numpy(ndarray) -> Tensor
  7344. Creates a :class:`Tensor` from a :class:`numpy.ndarray`.
  7345. The returned tensor and :attr:`ndarray` share the same memory. Modifications to
  7346. the tensor will be reflected in the :attr:`ndarray` and vice versa. The returned
  7347. tensor is not resizable.
  7348. It currently accepts :attr:`ndarray` with dtypes of ``numpy.float64``,
  7349. ``numpy.float32``, ``numpy.float16``, ``numpy.complex64``, ``numpy.complex128``,
  7350. ``numpy.int64``, ``numpy.int32``, ``numpy.int16``, ``numpy.int8``, ``numpy.uint8``,
  7351. and ``bool``.
  7352. .. warning::
  7353. Writing to a tensor created from a read-only NumPy array is not supported and will result in undefined behavior.
  7354. Example::
  7355. >>> a = numpy.array([1, 2, 3])
  7356. >>> t = torch.from_numpy(a)
  7357. >>> t
  7358. tensor([ 1, 2, 3])
  7359. >>> t[0] = -1
  7360. >>> a
  7361. array([-1, 2, 3])
  7362. """
  7363. ...
  7364. def frombuffer(buffer: Any, *, dtype: _dtype, count: int = -1, offset: int = 0, requires_grad: _bool = False) -> Tensor:
  7365. r"""
  7366. frombuffer(buffer, *, dtype, count=-1, offset=0, requires_grad=False) -> Tensor
  7367. Creates a 1-dimensional :class:`Tensor` from an object that implements
  7368. the Python buffer protocol.
  7369. Skips the first :attr:`offset` bytes in the buffer, and interprets the rest of
  7370. the raw bytes as a 1-dimensional tensor of type :attr:`dtype` with :attr:`count`
  7371. elements.
  7372. Note that either of the following must be true:
  7373. 1. :attr:`count` is a positive non-zero number, and the total number of bytes
  7374. in the buffer is more than :attr:`offset` plus :attr:`count` times the size
  7375. (in bytes) of :attr:`dtype`.
  7376. 2. :attr:`count` is negative, and the length (number of bytes) of the buffer
  7377. subtracted by the :attr:`offset` is a multiple of the size (in bytes) of
  7378. :attr:`dtype`.
  7379. The returned tensor and buffer share the same memory. Modifications to
  7380. the tensor will be reflected in the buffer and vice versa. The returned
  7381. tensor is not resizable.
  7382. .. note::
  7383. This function increments the reference count for the object that
  7384. owns the shared memory. Therefore, such memory will not be deallocated
  7385. before the returned tensor goes out of scope.
  7386. .. warning::
  7387. This function's behavior is undefined when passed an object implementing
  7388. the buffer protocol whose data is not on the CPU. Doing so is likely to
  7389. cause a segmentation fault.
  7390. .. warning::
  7391. This function does not try to infer the :attr:`dtype` (hence, it is not
  7392. optional). Passing a different :attr:`dtype` than its source may result
  7393. in unexpected behavior.
  7394. Args:
  7395. buffer (object): a Python object that exposes the buffer interface.
  7396. Keyword args:
  7397. dtype (:class:`torch.dtype`): the desired data type of returned tensor.
  7398. count (int, optional): the number of desired elements to be read.
  7399. If negative, all the elements (until the end of the buffer) will be
  7400. read. Default: -1.
  7401. offset (int, optional): the number of bytes to skip at the start of
  7402. the buffer. Default: 0.
  7403. requires_grad (bool, optional): If autograd should record operations on the
  7404. returned tensor. Default: ``False``.
  7405. Example::
  7406. >>> import array
  7407. >>> a = array.array('i', [1, 2, 3])
  7408. >>> t = torch.frombuffer(a, dtype=torch.int32)
  7409. >>> t
  7410. tensor([ 1, 2, 3])
  7411. >>> t[0] = -1
  7412. >>> a
  7413. array([-1, 2, 3])
  7414. >>> # Interprets the signed char bytes as 32-bit integers.
  7415. >>> # Each 4 signed char elements will be interpreted as
  7416. >>> # 1 signed 32-bit integer.
  7417. >>> import array
  7418. >>> a = array.array('b', [-1, 0, 0, 0])
  7419. >>> torch.frombuffer(a, dtype=torch.int32)
  7420. tensor([255], dtype=torch.int32)
  7421. """
  7422. ...
  7423. @overload
  7424. def full(size: _size, fill_value: Union[Number, _complex], *, out: Optional[Tensor] = None, layout: _layout = strided, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
  7425. r"""
  7426. full(size, fill_value, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  7427. Creates a tensor of size :attr:`size` filled with :attr:`fill_value`. The
  7428. tensor's dtype is inferred from :attr:`fill_value`.
  7429. Args:
  7430. size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
  7431. shape of the output tensor.
  7432. fill_value (Scalar): the value to fill the output tensor with.
  7433. Keyword args:
  7434. out (Tensor, optional): the output tensor.
  7435. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  7436. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  7437. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  7438. Default: ``torch.strided``.
  7439. device (:class:`torch.device`, optional): the desired device of returned tensor.
  7440. Default: if ``None``, uses the current device for the default tensor type
  7441. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  7442. for CPU tensor types and the current CUDA device for CUDA tensor types.
  7443. requires_grad (bool, optional): If autograd should record operations on the
  7444. returned tensor. Default: ``False``.
  7445. Example::
  7446. >>> torch.full((2, 3), 3.141592)
  7447. tensor([[ 3.1416, 3.1416, 3.1416],
  7448. [ 3.1416, 3.1416, 3.1416]])
  7449. """
  7450. ...
  7451. @overload
  7452. def full(size: _size, fill_value: Union[Number, _complex], *, names: List[Union[str, None]], layout: _layout = strided, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
  7453. r"""
  7454. full(size, fill_value, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  7455. Creates a tensor of size :attr:`size` filled with :attr:`fill_value`. The
  7456. tensor's dtype is inferred from :attr:`fill_value`.
  7457. Args:
  7458. size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
  7459. shape of the output tensor.
  7460. fill_value (Scalar): the value to fill the output tensor with.
  7461. Keyword args:
  7462. out (Tensor, optional): the output tensor.
  7463. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  7464. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  7465. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  7466. Default: ``torch.strided``.
  7467. device (:class:`torch.device`, optional): the desired device of returned tensor.
  7468. Default: if ``None``, uses the current device for the default tensor type
  7469. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  7470. for CPU tensor types and the current CUDA device for CUDA tensor types.
  7471. requires_grad (bool, optional): If autograd should record operations on the
  7472. returned tensor. Default: ``False``.
  7473. Example::
  7474. >>> torch.full((2, 3), 3.141592)
  7475. tensor([[ 3.1416, 3.1416, 3.1416],
  7476. [ 3.1416, 3.1416, 3.1416]])
  7477. """
  7478. ...
  7479. @overload
  7480. def full(size: Sequence[Union[_int, SymInt]], fill_value: Union[Number, _complex], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  7481. r"""
  7482. full(size, fill_value, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  7483. Creates a tensor of size :attr:`size` filled with :attr:`fill_value`. The
  7484. tensor's dtype is inferred from :attr:`fill_value`.
  7485. Args:
  7486. size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
  7487. shape of the output tensor.
  7488. fill_value (Scalar): the value to fill the output tensor with.
  7489. Keyword args:
  7490. out (Tensor, optional): the output tensor.
  7491. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  7492. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  7493. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  7494. Default: ``torch.strided``.
  7495. device (:class:`torch.device`, optional): the desired device of returned tensor.
  7496. Default: if ``None``, uses the current device for the default tensor type
  7497. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  7498. for CPU tensor types and the current CUDA device for CUDA tensor types.
  7499. requires_grad (bool, optional): If autograd should record operations on the
  7500. returned tensor. Default: ``False``.
  7501. Example::
  7502. >>> torch.full((2, 3), 3.141592)
  7503. tensor([[ 3.1416, 3.1416, 3.1416],
  7504. [ 3.1416, 3.1416, 3.1416]])
  7505. """
  7506. ...
  7507. @overload
  7508. def full(size: _size, fill_value: Union[Number, _complex], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  7509. r"""
  7510. full(size, fill_value, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  7511. Creates a tensor of size :attr:`size` filled with :attr:`fill_value`. The
  7512. tensor's dtype is inferred from :attr:`fill_value`.
  7513. Args:
  7514. size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
  7515. shape of the output tensor.
  7516. fill_value (Scalar): the value to fill the output tensor with.
  7517. Keyword args:
  7518. out (Tensor, optional): the output tensor.
  7519. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  7520. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  7521. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  7522. Default: ``torch.strided``.
  7523. device (:class:`torch.device`, optional): the desired device of returned tensor.
  7524. Default: if ``None``, uses the current device for the default tensor type
  7525. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  7526. for CPU tensor types and the current CUDA device for CUDA tensor types.
  7527. requires_grad (bool, optional): If autograd should record operations on the
  7528. returned tensor. Default: ``False``.
  7529. Example::
  7530. >>> torch.full((2, 3), 3.141592)
  7531. tensor([[ 3.1416, 3.1416, 3.1416],
  7532. [ 3.1416, 3.1416, 3.1416]])
  7533. """
  7534. ...
  7535. def full_like(input: Tensor, fill_value: Union[Number, _complex], *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  7536. r"""
  7537. full_like(input, fill_value, \*, dtype=None, layout=torch.strided, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
  7538. Returns a tensor with the same size as :attr:`input` filled with :attr:`fill_value`.
  7539. ``torch.full_like(input, fill_value)`` is equivalent to
  7540. ``torch.full(input.size(), fill_value, dtype=input.dtype, layout=input.layout, device=input.device)``.
  7541. Args:
  7542. input (Tensor): the size of :attr:`input` will determine size of the output tensor.
  7543. fill_value: the number to fill the output tensor with.
  7544. Keyword args:
  7545. dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
  7546. Default: if ``None``, defaults to the dtype of :attr:`input`.
  7547. layout (:class:`torch.layout`, optional): the desired layout of returned tensor.
  7548. Default: if ``None``, defaults to the layout of :attr:`input`.
  7549. device (:class:`torch.device`, optional): the desired device of returned tensor.
  7550. Default: if ``None``, defaults to the device of :attr:`input`.
  7551. requires_grad (bool, optional): If autograd should record operations on the
  7552. returned tensor. Default: ``False``.
  7553. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  7554. returned Tensor. Default: ``torch.preserve_format``.
  7555. """
  7556. ...
  7557. def fused_moving_avg_obs_fake_quant(input: Tensor, observer_on: Tensor, fake_quant_on: Tensor, running_min: Tensor, running_max: Tensor, scale: Tensor, zero_point: Tensor, averaging_const: _float, quant_min: _int, quant_max: _int, ch_axis: _int, per_row_fake_quant: _bool = False, symmetric_quant: _bool = False) -> Tensor: ...
  7558. @overload
  7559. def gather(input: Tensor, dim: _int, index: Tensor, *, sparse_grad: _bool = False, out: Optional[Tensor] = None) -> Tensor:
  7560. r"""
  7561. gather(input, dim, index, *, sparse_grad=False, out=None) -> Tensor
  7562. Gathers values along an axis specified by `dim`.
  7563. For a 3-D tensor the output is specified by::
  7564. out[i][j][k] = input[index[i][j][k]][j][k] # if dim == 0
  7565. out[i][j][k] = input[i][index[i][j][k]][k] # if dim == 1
  7566. out[i][j][k] = input[i][j][index[i][j][k]] # if dim == 2
  7567. :attr:`input` and :attr:`index` must have the same number of dimensions.
  7568. It is also required that ``index.size(d) <= input.size(d)`` for all
  7569. dimensions ``d != dim``. :attr:`out` will have the same shape as :attr:`index`.
  7570. Note that ``input`` and ``index`` do not broadcast against each other.
  7571. Args:
  7572. input (Tensor): the source tensor
  7573. dim (int): the axis along which to index
  7574. index (LongTensor): the indices of elements to gather
  7575. Keyword arguments:
  7576. sparse_grad (bool, optional): If ``True``, gradient w.r.t. :attr:`input` will be a sparse tensor.
  7577. out (Tensor, optional): the destination tensor
  7578. Example::
  7579. >>> t = torch.tensor([[1, 2], [3, 4]])
  7580. >>> torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]]))
  7581. tensor([[ 1, 1],
  7582. [ 4, 3]])
  7583. """
  7584. ...
  7585. @overload
  7586. def gather(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, *, sparse_grad: _bool = False, out: Optional[Tensor] = None) -> Tensor:
  7587. r"""
  7588. gather(input, dim, index, *, sparse_grad=False, out=None) -> Tensor
  7589. Gathers values along an axis specified by `dim`.
  7590. For a 3-D tensor the output is specified by::
  7591. out[i][j][k] = input[index[i][j][k]][j][k] # if dim == 0
  7592. out[i][j][k] = input[i][index[i][j][k]][k] # if dim == 1
  7593. out[i][j][k] = input[i][j][index[i][j][k]] # if dim == 2
  7594. :attr:`input` and :attr:`index` must have the same number of dimensions.
  7595. It is also required that ``index.size(d) <= input.size(d)`` for all
  7596. dimensions ``d != dim``. :attr:`out` will have the same shape as :attr:`index`.
  7597. Note that ``input`` and ``index`` do not broadcast against each other.
  7598. Args:
  7599. input (Tensor): the source tensor
  7600. dim (int): the axis along which to index
  7601. index (LongTensor): the indices of elements to gather
  7602. Keyword arguments:
  7603. sparse_grad (bool, optional): If ``True``, gradient w.r.t. :attr:`input` will be a sparse tensor.
  7604. out (Tensor, optional): the destination tensor
  7605. Example::
  7606. >>> t = torch.tensor([[1, 2], [3, 4]])
  7607. >>> torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]]))
  7608. tensor([[ 1, 1],
  7609. [ 4, 3]])
  7610. """
  7611. ...
  7612. def gcd(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  7613. r"""
  7614. gcd(input, other, *, out=None) -> Tensor
  7615. Computes the element-wise greatest common divisor (GCD) of :attr:`input` and :attr:`other`.
  7616. Both :attr:`input` and :attr:`other` must have integer types.
  7617. .. note::
  7618. This defines :math:`gcd(0, 0) = 0`.
  7619. Args:
  7620. input (Tensor): the input tensor.
  7621. other (Tensor): the second input tensor
  7622. Keyword arguments:
  7623. out (Tensor, optional): the output tensor.
  7624. Example::
  7625. >>> a = torch.tensor([5, 10, 15])
  7626. >>> b = torch.tensor([3, 4, 5])
  7627. >>> torch.gcd(a, b)
  7628. tensor([1, 2, 5])
  7629. >>> c = torch.tensor([3])
  7630. >>> torch.gcd(a, c)
  7631. tensor([1, 1, 3])
  7632. """
  7633. ...
  7634. def gcd_(input: Tensor, other: Tensor) -> Tensor: ...
  7635. @overload
  7636. def ge(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  7637. r"""
  7638. ge(input, other, *, out=None) -> Tensor
  7639. Computes :math:`\text{input} \geq \text{other}` element-wise.
  7640. The second argument can be a number or a tensor whose shape is
  7641. :ref:`broadcastable <broadcasting-semantics>` with the first argument.
  7642. Args:
  7643. input (Tensor): the tensor to compare
  7644. other (Tensor or float): the tensor or value to compare
  7645. Keyword args:
  7646. out (Tensor, optional): the output tensor.
  7647. Returns:
  7648. A boolean tensor that is True where :attr:`input` is greater than or equal to :attr:`other` and False elsewhere
  7649. Example::
  7650. >>> torch.ge(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
  7651. tensor([[True, True], [False, True]])
  7652. """
  7653. ...
  7654. @overload
  7655. def ge(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
  7656. r"""
  7657. ge(input, other, *, out=None) -> Tensor
  7658. Computes :math:`\text{input} \geq \text{other}` element-wise.
  7659. The second argument can be a number or a tensor whose shape is
  7660. :ref:`broadcastable <broadcasting-semantics>` with the first argument.
  7661. Args:
  7662. input (Tensor): the tensor to compare
  7663. other (Tensor or float): the tensor or value to compare
  7664. Keyword args:
  7665. out (Tensor, optional): the output tensor.
  7666. Returns:
  7667. A boolean tensor that is True where :attr:`input` is greater than or equal to :attr:`other` and False elsewhere
  7668. Example::
  7669. >>> torch.ge(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
  7670. tensor([[True, True], [False, True]])
  7671. """
  7672. ...
  7673. def geqrf(input: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.geqrf:
  7674. r"""
  7675. geqrf(input, *, out=None) -> (Tensor, Tensor)
  7676. This is a low-level function for calling LAPACK's geqrf directly. This function
  7677. returns a namedtuple (a, tau) as defined in `LAPACK documentation for geqrf`_ .
  7678. Computes a QR decomposition of :attr:`input`.
  7679. Both `Q` and `R` matrices are stored in the same output tensor `a`.
  7680. The elements of `R` are stored on and above the diagonal.
  7681. Elementary reflectors (or Householder vectors) implicitly defining matrix `Q`
  7682. are stored below the diagonal.
  7683. The results of this function can be used together with :func:`torch.linalg.householder_product`
  7684. to obtain the `Q` matrix or
  7685. with :func:`torch.ormqr`, which uses an implicit representation of the `Q` matrix,
  7686. for an efficient matrix-matrix multiplication.
  7687. See `LAPACK documentation for geqrf`_ for further details.
  7688. .. note::
  7689. See also :func:`torch.linalg.qr`, which computes Q and R matrices, and :func:`torch.linalg.lstsq`
  7690. with the ``driver="gels"`` option for a function that can solve matrix equations using a QR decomposition.
  7691. Args:
  7692. input (Tensor): the input matrix
  7693. Keyword args:
  7694. out (tuple, optional): the output tuple of (Tensor, Tensor). Ignored if `None`. Default: `None`.
  7695. .. _LAPACK documentation for geqrf:
  7696. http://www.netlib.org/lapack/explore-html/df/dc5/group__variants_g_ecomputational_ga3766ea903391b5cf9008132f7440ec7b.html
  7697. """
  7698. ...
  7699. def ger(input: Tensor, vec2: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  7700. r"""
  7701. ger(input, vec2, *, out=None) -> Tensor
  7702. Alias of :func:`torch.outer`.
  7703. .. warning::
  7704. This function is deprecated and will be removed in a future PyTorch release.
  7705. Use :func:`torch.outer` instead.
  7706. """
  7707. ...
  7708. def get_default_dtype() -> _dtype:
  7709. r"""
  7710. get_default_dtype() -> torch.dtype
  7711. Get the current default floating point :class:`torch.dtype`.
  7712. Example::
  7713. >>> torch.get_default_dtype() # initial default for floating point is torch.float32
  7714. torch.float32
  7715. >>> torch.set_default_dtype(torch.float64)
  7716. >>> torch.get_default_dtype() # default is now changed to torch.float64
  7717. torch.float64
  7718. """
  7719. ...
  7720. def get_num_interop_threads() -> _int:
  7721. r"""
  7722. get_num_interop_threads() -> int
  7723. Returns the number of threads used for inter-op parallelism on CPU
  7724. (e.g. in JIT interpreter)
  7725. """
  7726. ...
  7727. def get_num_threads() -> _int:
  7728. r"""
  7729. get_num_threads() -> int
  7730. Returns the number of threads used for parallelizing CPU operations
  7731. """
  7732. ...
  7733. @overload
  7734. def gradient(input: Tensor, *, spacing: Optional[Union[Number, _complex]] = None, dim: Optional[_int] = None, edge_order: _int = 1) -> Tuple[Tensor, ...]:
  7735. r"""
  7736. gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors
  7737. Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in
  7738. one or more dimensions using the `second-order accurate central differences method
  7739. <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ and
  7740. either first or second order estimates at the boundaries.
  7741. The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not
  7742. specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates
  7743. to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional
  7744. :attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and
  7745. :math:`g(1, 2, 3)\ == input[1, 2, 3]`.
  7746. When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates.
  7747. This is detailed in the "Keyword Arguments" section below.
  7748. The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is
  7749. accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be
  7750. improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative
  7751. is estimated using `Taylor's theorem with remainder <https://en.wikipedia.org/wiki/Taylor%27s_theorem>`_.
  7752. Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring
  7753. it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using:
  7754. .. math::
  7755. \begin{aligned}
  7756. f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\
  7757. f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\
  7758. \end{aligned}
  7759. Using the fact that :math:`f \in C^3` and solving the linear system, we derive:
  7760. .. math::
  7761. f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l)
  7762. + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} }
  7763. .. note::
  7764. We estimate the gradient of functions in complex domain
  7765. :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way.
  7766. The value of each partial derivative at the boundary points is computed differently. See edge_order below.
  7767. Args:
  7768. input (``Tensor``): the tensor that represents the values of the function
  7769. Keyword args:
  7770. spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify
  7771. how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then
  7772. the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the
  7773. indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding
  7774. indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9).
  7775. Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for
  7776. the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then
  7777. the coordinates are (t0[1], t1[2], t2[3])
  7778. dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default
  7779. the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of
  7780. the :attr:`spacing` argument must correspond with the specified dims."
  7781. edge_order (``int``, optional): 1 or 2, for `first-order
  7782. <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ or
  7783. `second-order <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_
  7784. estimation of the boundary ("edge") values, respectively.
  7785. Examples::
  7786. >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4]
  7787. >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),)
  7788. >>> values = torch.tensor([4., 1., 1., 16.], )
  7789. >>> torch.gradient(values, spacing = coordinates)
  7790. (tensor([-3., -2., 2., 5.]),)
  7791. >>> # Estimates the gradient of the R^2 -> R function whose samples are
  7792. >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost
  7793. >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates
  7794. >>> # partial derivative for both dimensions.
  7795. >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]])
  7796. >>> torch.gradient(t)
  7797. (tensor([[ 9., 18., 36., 72.],
  7798. [ 9., 18., 36., 72.]]),
  7799. tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
  7800. [10.0000, 15.0000, 30.0000, 40.0000]]))
  7801. >>> # A scalar value for spacing modifies the relationship between tensor indices
  7802. >>> # and input coordinates by multiplying the indices to find the
  7803. >>> # coordinates. For example, below the indices of the innermost
  7804. >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of
  7805. >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2].
  7806. >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1])
  7807. (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
  7808. [ 4.5000, 9.0000, 18.0000, 36.0000]]),
  7809. tensor([[ 0.5000, 0.7500, 1.5000, 2.0000],
  7810. [ 5.0000, 7.5000, 15.0000, 20.0000]]))
  7811. >>> # doubling the spacing between samples halves the estimated partial gradients.
  7812. >>>
  7813. >>> # Estimates only the partial derivative for dimension 1
  7814. >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.)
  7815. (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
  7816. [10.0000, 15.0000, 30.0000, 40.0000]]),)
  7817. >>> # When spacing is a list of scalars, the relationship between the tensor
  7818. >>> # indices and input coordinates changes based on dimension.
  7819. >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate
  7820. >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension
  7821. >>> # 0, 1 translate to coordinates of [0, 2].
  7822. >>> torch.gradient(t, spacing = [3., 2.])
  7823. (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
  7824. [ 4.5000, 9.0000, 18.0000, 36.0000]]),
  7825. tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
  7826. [ 3.3333, 5.0000, 10.0000, 13.3333]]))
  7827. >>> # The following example is a replication of the previous one with explicit
  7828. >>> # coordinates.
  7829. >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9]))
  7830. >>> torch.gradient(t, spacing = coords)
  7831. (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
  7832. [ 4.5000, 9.0000, 18.0000, 36.0000]]),
  7833. tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
  7834. [ 3.3333, 5.0000, 10.0000, 13.3333]]))
  7835. """
  7836. ...
  7837. @overload
  7838. def gradient(input: Tensor, *, spacing: Sequence[Union[Number, _complex]], dim: Optional[_int] = None, edge_order: _int = 1) -> Tuple[Tensor, ...]:
  7839. r"""
  7840. gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors
  7841. Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in
  7842. one or more dimensions using the `second-order accurate central differences method
  7843. <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ and
  7844. either first or second order estimates at the boundaries.
  7845. The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not
  7846. specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates
  7847. to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional
  7848. :attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and
  7849. :math:`g(1, 2, 3)\ == input[1, 2, 3]`.
  7850. When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates.
  7851. This is detailed in the "Keyword Arguments" section below.
  7852. The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is
  7853. accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be
  7854. improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative
  7855. is estimated using `Taylor's theorem with remainder <https://en.wikipedia.org/wiki/Taylor%27s_theorem>`_.
  7856. Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring
  7857. it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using:
  7858. .. math::
  7859. \begin{aligned}
  7860. f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\
  7861. f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\
  7862. \end{aligned}
  7863. Using the fact that :math:`f \in C^3` and solving the linear system, we derive:
  7864. .. math::
  7865. f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l)
  7866. + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} }
  7867. .. note::
  7868. We estimate the gradient of functions in complex domain
  7869. :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way.
  7870. The value of each partial derivative at the boundary points is computed differently. See edge_order below.
  7871. Args:
  7872. input (``Tensor``): the tensor that represents the values of the function
  7873. Keyword args:
  7874. spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify
  7875. how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then
  7876. the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the
  7877. indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding
  7878. indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9).
  7879. Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for
  7880. the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then
  7881. the coordinates are (t0[1], t1[2], t2[3])
  7882. dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default
  7883. the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of
  7884. the :attr:`spacing` argument must correspond with the specified dims."
  7885. edge_order (``int``, optional): 1 or 2, for `first-order
  7886. <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ or
  7887. `second-order <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_
  7888. estimation of the boundary ("edge") values, respectively.
  7889. Examples::
  7890. >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4]
  7891. >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),)
  7892. >>> values = torch.tensor([4., 1., 1., 16.], )
  7893. >>> torch.gradient(values, spacing = coordinates)
  7894. (tensor([-3., -2., 2., 5.]),)
  7895. >>> # Estimates the gradient of the R^2 -> R function whose samples are
  7896. >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost
  7897. >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates
  7898. >>> # partial derivative for both dimensions.
  7899. >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]])
  7900. >>> torch.gradient(t)
  7901. (tensor([[ 9., 18., 36., 72.],
  7902. [ 9., 18., 36., 72.]]),
  7903. tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
  7904. [10.0000, 15.0000, 30.0000, 40.0000]]))
  7905. >>> # A scalar value for spacing modifies the relationship between tensor indices
  7906. >>> # and input coordinates by multiplying the indices to find the
  7907. >>> # coordinates. For example, below the indices of the innermost
  7908. >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of
  7909. >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2].
  7910. >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1])
  7911. (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
  7912. [ 4.5000, 9.0000, 18.0000, 36.0000]]),
  7913. tensor([[ 0.5000, 0.7500, 1.5000, 2.0000],
  7914. [ 5.0000, 7.5000, 15.0000, 20.0000]]))
  7915. >>> # doubling the spacing between samples halves the estimated partial gradients.
  7916. >>>
  7917. >>> # Estimates only the partial derivative for dimension 1
  7918. >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.)
  7919. (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
  7920. [10.0000, 15.0000, 30.0000, 40.0000]]),)
  7921. >>> # When spacing is a list of scalars, the relationship between the tensor
  7922. >>> # indices and input coordinates changes based on dimension.
  7923. >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate
  7924. >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension
  7925. >>> # 0, 1 translate to coordinates of [0, 2].
  7926. >>> torch.gradient(t, spacing = [3., 2.])
  7927. (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
  7928. [ 4.5000, 9.0000, 18.0000, 36.0000]]),
  7929. tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
  7930. [ 3.3333, 5.0000, 10.0000, 13.3333]]))
  7931. >>> # The following example is a replication of the previous one with explicit
  7932. >>> # coordinates.
  7933. >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9]))
  7934. >>> torch.gradient(t, spacing = coords)
  7935. (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
  7936. [ 4.5000, 9.0000, 18.0000, 36.0000]]),
  7937. tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
  7938. [ 3.3333, 5.0000, 10.0000, 13.3333]]))
  7939. """
  7940. ...
  7941. @overload
  7942. def gradient(input: Tensor, *, spacing: Sequence[Union[Number, _complex]], dim: _size, edge_order: _int = 1) -> Tuple[Tensor, ...]:
  7943. r"""
  7944. gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors
  7945. Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in
  7946. one or more dimensions using the `second-order accurate central differences method
  7947. <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ and
  7948. either first or second order estimates at the boundaries.
  7949. The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not
  7950. specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates
  7951. to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional
  7952. :attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and
  7953. :math:`g(1, 2, 3)\ == input[1, 2, 3]`.
  7954. When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates.
  7955. This is detailed in the "Keyword Arguments" section below.
  7956. The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is
  7957. accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be
  7958. improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative
  7959. is estimated using `Taylor's theorem with remainder <https://en.wikipedia.org/wiki/Taylor%27s_theorem>`_.
  7960. Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring
  7961. it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using:
  7962. .. math::
  7963. \begin{aligned}
  7964. f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\
  7965. f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\
  7966. \end{aligned}
  7967. Using the fact that :math:`f \in C^3` and solving the linear system, we derive:
  7968. .. math::
  7969. f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l)
  7970. + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} }
  7971. .. note::
  7972. We estimate the gradient of functions in complex domain
  7973. :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way.
  7974. The value of each partial derivative at the boundary points is computed differently. See edge_order below.
  7975. Args:
  7976. input (``Tensor``): the tensor that represents the values of the function
  7977. Keyword args:
  7978. spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify
  7979. how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then
  7980. the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the
  7981. indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding
  7982. indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9).
  7983. Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for
  7984. the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then
  7985. the coordinates are (t0[1], t1[2], t2[3])
  7986. dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default
  7987. the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of
  7988. the :attr:`spacing` argument must correspond with the specified dims."
  7989. edge_order (``int``, optional): 1 or 2, for `first-order
  7990. <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ or
  7991. `second-order <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_
  7992. estimation of the boundary ("edge") values, respectively.
  7993. Examples::
  7994. >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4]
  7995. >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),)
  7996. >>> values = torch.tensor([4., 1., 1., 16.], )
  7997. >>> torch.gradient(values, spacing = coordinates)
  7998. (tensor([-3., -2., 2., 5.]),)
  7999. >>> # Estimates the gradient of the R^2 -> R function whose samples are
  8000. >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost
  8001. >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates
  8002. >>> # partial derivative for both dimensions.
  8003. >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]])
  8004. >>> torch.gradient(t)
  8005. (tensor([[ 9., 18., 36., 72.],
  8006. [ 9., 18., 36., 72.]]),
  8007. tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
  8008. [10.0000, 15.0000, 30.0000, 40.0000]]))
  8009. >>> # A scalar value for spacing modifies the relationship between tensor indices
  8010. >>> # and input coordinates by multiplying the indices to find the
  8011. >>> # coordinates. For example, below the indices of the innermost
  8012. >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of
  8013. >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2].
  8014. >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1])
  8015. (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
  8016. [ 4.5000, 9.0000, 18.0000, 36.0000]]),
  8017. tensor([[ 0.5000, 0.7500, 1.5000, 2.0000],
  8018. [ 5.0000, 7.5000, 15.0000, 20.0000]]))
  8019. >>> # doubling the spacing between samples halves the estimated partial gradients.
  8020. >>>
  8021. >>> # Estimates only the partial derivative for dimension 1
  8022. >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.)
  8023. (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
  8024. [10.0000, 15.0000, 30.0000, 40.0000]]),)
  8025. >>> # When spacing is a list of scalars, the relationship between the tensor
  8026. >>> # indices and input coordinates changes based on dimension.
  8027. >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate
  8028. >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension
  8029. >>> # 0, 1 translate to coordinates of [0, 2].
  8030. >>> torch.gradient(t, spacing = [3., 2.])
  8031. (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
  8032. [ 4.5000, 9.0000, 18.0000, 36.0000]]),
  8033. tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
  8034. [ 3.3333, 5.0000, 10.0000, 13.3333]]))
  8035. >>> # The following example is a replication of the previous one with explicit
  8036. >>> # coordinates.
  8037. >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9]))
  8038. >>> torch.gradient(t, spacing = coords)
  8039. (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
  8040. [ 4.5000, 9.0000, 18.0000, 36.0000]]),
  8041. tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
  8042. [ 3.3333, 5.0000, 10.0000, 13.3333]]))
  8043. """
  8044. ...
  8045. @overload
  8046. def gradient(input: Tensor, *, spacing: Union[Tuple[Tensor, ...], List[Tensor]], dim: Optional[_int] = None, edge_order: _int = 1) -> Tuple[Tensor, ...]:
  8047. r"""
  8048. gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors
  8049. Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in
  8050. one or more dimensions using the `second-order accurate central differences method
  8051. <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ and
  8052. either first or second order estimates at the boundaries.
  8053. The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not
  8054. specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates
  8055. to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional
  8056. :attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and
  8057. :math:`g(1, 2, 3)\ == input[1, 2, 3]`.
  8058. When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates.
  8059. This is detailed in the "Keyword Arguments" section below.
  8060. The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is
  8061. accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be
  8062. improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative
  8063. is estimated using `Taylor's theorem with remainder <https://en.wikipedia.org/wiki/Taylor%27s_theorem>`_.
  8064. Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring
  8065. it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using:
  8066. .. math::
  8067. \begin{aligned}
  8068. f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\
  8069. f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\
  8070. \end{aligned}
  8071. Using the fact that :math:`f \in C^3` and solving the linear system, we derive:
  8072. .. math::
  8073. f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l)
  8074. + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} }
  8075. .. note::
  8076. We estimate the gradient of functions in complex domain
  8077. :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way.
  8078. The value of each partial derivative at the boundary points is computed differently. See edge_order below.
  8079. Args:
  8080. input (``Tensor``): the tensor that represents the values of the function
  8081. Keyword args:
  8082. spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify
  8083. how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then
  8084. the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the
  8085. indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding
  8086. indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9).
  8087. Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for
  8088. the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then
  8089. the coordinates are (t0[1], t1[2], t2[3])
  8090. dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default
  8091. the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of
  8092. the :attr:`spacing` argument must correspond with the specified dims."
  8093. edge_order (``int``, optional): 1 or 2, for `first-order
  8094. <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ or
  8095. `second-order <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_
  8096. estimation of the boundary ("edge") values, respectively.
  8097. Examples::
  8098. >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4]
  8099. >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),)
  8100. >>> values = torch.tensor([4., 1., 1., 16.], )
  8101. >>> torch.gradient(values, spacing = coordinates)
  8102. (tensor([-3., -2., 2., 5.]),)
  8103. >>> # Estimates the gradient of the R^2 -> R function whose samples are
  8104. >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost
  8105. >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates
  8106. >>> # partial derivative for both dimensions.
  8107. >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]])
  8108. >>> torch.gradient(t)
  8109. (tensor([[ 9., 18., 36., 72.],
  8110. [ 9., 18., 36., 72.]]),
  8111. tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
  8112. [10.0000, 15.0000, 30.0000, 40.0000]]))
  8113. >>> # A scalar value for spacing modifies the relationship between tensor indices
  8114. >>> # and input coordinates by multiplying the indices to find the
  8115. >>> # coordinates. For example, below the indices of the innermost
  8116. >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of
  8117. >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2].
  8118. >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1])
  8119. (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
  8120. [ 4.5000, 9.0000, 18.0000, 36.0000]]),
  8121. tensor([[ 0.5000, 0.7500, 1.5000, 2.0000],
  8122. [ 5.0000, 7.5000, 15.0000, 20.0000]]))
  8123. >>> # doubling the spacing between samples halves the estimated partial gradients.
  8124. >>>
  8125. >>> # Estimates only the partial derivative for dimension 1
  8126. >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.)
  8127. (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
  8128. [10.0000, 15.0000, 30.0000, 40.0000]]),)
  8129. >>> # When spacing is a list of scalars, the relationship between the tensor
  8130. >>> # indices and input coordinates changes based on dimension.
  8131. >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate
  8132. >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension
  8133. >>> # 0, 1 translate to coordinates of [0, 2].
  8134. >>> torch.gradient(t, spacing = [3., 2.])
  8135. (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
  8136. [ 4.5000, 9.0000, 18.0000, 36.0000]]),
  8137. tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
  8138. [ 3.3333, 5.0000, 10.0000, 13.3333]]))
  8139. >>> # The following example is a replication of the previous one with explicit
  8140. >>> # coordinates.
  8141. >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9]))
  8142. >>> torch.gradient(t, spacing = coords)
  8143. (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
  8144. [ 4.5000, 9.0000, 18.0000, 36.0000]]),
  8145. tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
  8146. [ 3.3333, 5.0000, 10.0000, 13.3333]]))
  8147. """
  8148. ...
  8149. @overload
  8150. def gradient(input: Tensor, *, spacing: Union[Number, _complex], dim: _size, edge_order: _int = 1) -> Tuple[Tensor, ...]:
  8151. r"""
  8152. gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors
  8153. Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in
  8154. one or more dimensions using the `second-order accurate central differences method
  8155. <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ and
  8156. either first or second order estimates at the boundaries.
  8157. The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not
  8158. specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates
  8159. to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional
  8160. :attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and
  8161. :math:`g(1, 2, 3)\ == input[1, 2, 3]`.
  8162. When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates.
  8163. This is detailed in the "Keyword Arguments" section below.
  8164. The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is
  8165. accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be
  8166. improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative
  8167. is estimated using `Taylor's theorem with remainder <https://en.wikipedia.org/wiki/Taylor%27s_theorem>`_.
  8168. Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring
  8169. it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using:
  8170. .. math::
  8171. \begin{aligned}
  8172. f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\
  8173. f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\
  8174. \end{aligned}
  8175. Using the fact that :math:`f \in C^3` and solving the linear system, we derive:
  8176. .. math::
  8177. f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l)
  8178. + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} }
  8179. .. note::
  8180. We estimate the gradient of functions in complex domain
  8181. :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way.
  8182. The value of each partial derivative at the boundary points is computed differently. See edge_order below.
  8183. Args:
  8184. input (``Tensor``): the tensor that represents the values of the function
  8185. Keyword args:
  8186. spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify
  8187. how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then
  8188. the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the
  8189. indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding
  8190. indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9).
  8191. Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for
  8192. the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then
  8193. the coordinates are (t0[1], t1[2], t2[3])
  8194. dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default
  8195. the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of
  8196. the :attr:`spacing` argument must correspond with the specified dims."
  8197. edge_order (``int``, optional): 1 or 2, for `first-order
  8198. <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ or
  8199. `second-order <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_
  8200. estimation of the boundary ("edge") values, respectively.
  8201. Examples::
  8202. >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4]
  8203. >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),)
  8204. >>> values = torch.tensor([4., 1., 1., 16.], )
  8205. >>> torch.gradient(values, spacing = coordinates)
  8206. (tensor([-3., -2., 2., 5.]),)
  8207. >>> # Estimates the gradient of the R^2 -> R function whose samples are
  8208. >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost
  8209. >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates
  8210. >>> # partial derivative for both dimensions.
  8211. >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]])
  8212. >>> torch.gradient(t)
  8213. (tensor([[ 9., 18., 36., 72.],
  8214. [ 9., 18., 36., 72.]]),
  8215. tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
  8216. [10.0000, 15.0000, 30.0000, 40.0000]]))
  8217. >>> # A scalar value for spacing modifies the relationship between tensor indices
  8218. >>> # and input coordinates by multiplying the indices to find the
  8219. >>> # coordinates. For example, below the indices of the innermost
  8220. >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of
  8221. >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2].
  8222. >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1])
  8223. (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
  8224. [ 4.5000, 9.0000, 18.0000, 36.0000]]),
  8225. tensor([[ 0.5000, 0.7500, 1.5000, 2.0000],
  8226. [ 5.0000, 7.5000, 15.0000, 20.0000]]))
  8227. >>> # doubling the spacing between samples halves the estimated partial gradients.
  8228. >>>
  8229. >>> # Estimates only the partial derivative for dimension 1
  8230. >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.)
  8231. (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
  8232. [10.0000, 15.0000, 30.0000, 40.0000]]),)
  8233. >>> # When spacing is a list of scalars, the relationship between the tensor
  8234. >>> # indices and input coordinates changes based on dimension.
  8235. >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate
  8236. >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension
  8237. >>> # 0, 1 translate to coordinates of [0, 2].
  8238. >>> torch.gradient(t, spacing = [3., 2.])
  8239. (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
  8240. [ 4.5000, 9.0000, 18.0000, 36.0000]]),
  8241. tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
  8242. [ 3.3333, 5.0000, 10.0000, 13.3333]]))
  8243. >>> # The following example is a replication of the previous one with explicit
  8244. >>> # coordinates.
  8245. >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9]))
  8246. >>> torch.gradient(t, spacing = coords)
  8247. (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
  8248. [ 4.5000, 9.0000, 18.0000, 36.0000]]),
  8249. tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
  8250. [ 3.3333, 5.0000, 10.0000, 13.3333]]))
  8251. """
  8252. ...
  8253. @overload
  8254. def gradient(input: Tensor, *, spacing: Union[Tuple[Tensor, ...], List[Tensor]], dim: _size, edge_order: _int = 1) -> Tuple[Tensor, ...]:
  8255. r"""
  8256. gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors
  8257. Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in
  8258. one or more dimensions using the `second-order accurate central differences method
  8259. <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ and
  8260. either first or second order estimates at the boundaries.
  8261. The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not
  8262. specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates
  8263. to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional
  8264. :attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and
  8265. :math:`g(1, 2, 3)\ == input[1, 2, 3]`.
  8266. When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates.
  8267. This is detailed in the "Keyword Arguments" section below.
  8268. The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is
  8269. accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be
  8270. improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative
  8271. is estimated using `Taylor's theorem with remainder <https://en.wikipedia.org/wiki/Taylor%27s_theorem>`_.
  8272. Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring
  8273. it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using:
  8274. .. math::
  8275. \begin{aligned}
  8276. f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\
  8277. f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\
  8278. \end{aligned}
  8279. Using the fact that :math:`f \in C^3` and solving the linear system, we derive:
  8280. .. math::
  8281. f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l)
  8282. + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} }
  8283. .. note::
  8284. We estimate the gradient of functions in complex domain
  8285. :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way.
  8286. The value of each partial derivative at the boundary points is computed differently. See edge_order below.
  8287. Args:
  8288. input (``Tensor``): the tensor that represents the values of the function
  8289. Keyword args:
  8290. spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify
  8291. how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then
  8292. the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the
  8293. indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding
  8294. indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9).
  8295. Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for
  8296. the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then
  8297. the coordinates are (t0[1], t1[2], t2[3])
  8298. dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default
  8299. the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of
  8300. the :attr:`spacing` argument must correspond with the specified dims."
  8301. edge_order (``int``, optional): 1 or 2, for `first-order
  8302. <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ or
  8303. `second-order <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_
  8304. estimation of the boundary ("edge") values, respectively.
  8305. Examples::
  8306. >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4]
  8307. >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),)
  8308. >>> values = torch.tensor([4., 1., 1., 16.], )
  8309. >>> torch.gradient(values, spacing = coordinates)
  8310. (tensor([-3., -2., 2., 5.]),)
  8311. >>> # Estimates the gradient of the R^2 -> R function whose samples are
  8312. >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost
  8313. >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates
  8314. >>> # partial derivative for both dimensions.
  8315. >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]])
  8316. >>> torch.gradient(t)
  8317. (tensor([[ 9., 18., 36., 72.],
  8318. [ 9., 18., 36., 72.]]),
  8319. tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
  8320. [10.0000, 15.0000, 30.0000, 40.0000]]))
  8321. >>> # A scalar value for spacing modifies the relationship between tensor indices
  8322. >>> # and input coordinates by multiplying the indices to find the
  8323. >>> # coordinates. For example, below the indices of the innermost
  8324. >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of
  8325. >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2].
  8326. >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1])
  8327. (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
  8328. [ 4.5000, 9.0000, 18.0000, 36.0000]]),
  8329. tensor([[ 0.5000, 0.7500, 1.5000, 2.0000],
  8330. [ 5.0000, 7.5000, 15.0000, 20.0000]]))
  8331. >>> # doubling the spacing between samples halves the estimated partial gradients.
  8332. >>>
  8333. >>> # Estimates only the partial derivative for dimension 1
  8334. >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.)
  8335. (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
  8336. [10.0000, 15.0000, 30.0000, 40.0000]]),)
  8337. >>> # When spacing is a list of scalars, the relationship between the tensor
  8338. >>> # indices and input coordinates changes based on dimension.
  8339. >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate
  8340. >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension
  8341. >>> # 0, 1 translate to coordinates of [0, 2].
  8342. >>> torch.gradient(t, spacing = [3., 2.])
  8343. (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
  8344. [ 4.5000, 9.0000, 18.0000, 36.0000]]),
  8345. tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
  8346. [ 3.3333, 5.0000, 10.0000, 13.3333]]))
  8347. >>> # The following example is a replication of the previous one with explicit
  8348. >>> # coordinates.
  8349. >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9]))
  8350. >>> torch.gradient(t, spacing = coords)
  8351. (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
  8352. [ 4.5000, 9.0000, 18.0000, 36.0000]]),
  8353. tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
  8354. [ 3.3333, 5.0000, 10.0000, 13.3333]]))
  8355. """
  8356. ...
  8357. @overload
  8358. def gradient(input: Tensor, *, dim: _size, edge_order: _int = 1) -> Tuple[Tensor, ...]:
  8359. r"""
  8360. gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors
  8361. Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in
  8362. one or more dimensions using the `second-order accurate central differences method
  8363. <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ and
  8364. either first or second order estimates at the boundaries.
  8365. The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not
  8366. specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates
  8367. to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional
  8368. :attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and
  8369. :math:`g(1, 2, 3)\ == input[1, 2, 3]`.
  8370. When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates.
  8371. This is detailed in the "Keyword Arguments" section below.
  8372. The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is
  8373. accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be
  8374. improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative
  8375. is estimated using `Taylor's theorem with remainder <https://en.wikipedia.org/wiki/Taylor%27s_theorem>`_.
  8376. Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring
  8377. it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using:
  8378. .. math::
  8379. \begin{aligned}
  8380. f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\
  8381. f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\
  8382. \end{aligned}
  8383. Using the fact that :math:`f \in C^3` and solving the linear system, we derive:
  8384. .. math::
  8385. f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l)
  8386. + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} }
  8387. .. note::
  8388. We estimate the gradient of functions in complex domain
  8389. :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way.
  8390. The value of each partial derivative at the boundary points is computed differently. See edge_order below.
  8391. Args:
  8392. input (``Tensor``): the tensor that represents the values of the function
  8393. Keyword args:
  8394. spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify
  8395. how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then
  8396. the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the
  8397. indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding
  8398. indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9).
  8399. Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for
  8400. the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then
  8401. the coordinates are (t0[1], t1[2], t2[3])
  8402. dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default
  8403. the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of
  8404. the :attr:`spacing` argument must correspond with the specified dims."
  8405. edge_order (``int``, optional): 1 or 2, for `first-order
  8406. <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ or
  8407. `second-order <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_
  8408. estimation of the boundary ("edge") values, respectively.
  8409. Examples::
  8410. >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4]
  8411. >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),)
  8412. >>> values = torch.tensor([4., 1., 1., 16.], )
  8413. >>> torch.gradient(values, spacing = coordinates)
  8414. (tensor([-3., -2., 2., 5.]),)
  8415. >>> # Estimates the gradient of the R^2 -> R function whose samples are
  8416. >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost
  8417. >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates
  8418. >>> # partial derivative for both dimensions.
  8419. >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]])
  8420. >>> torch.gradient(t)
  8421. (tensor([[ 9., 18., 36., 72.],
  8422. [ 9., 18., 36., 72.]]),
  8423. tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
  8424. [10.0000, 15.0000, 30.0000, 40.0000]]))
  8425. >>> # A scalar value for spacing modifies the relationship between tensor indices
  8426. >>> # and input coordinates by multiplying the indices to find the
  8427. >>> # coordinates. For example, below the indices of the innermost
  8428. >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of
  8429. >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2].
  8430. >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1])
  8431. (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
  8432. [ 4.5000, 9.0000, 18.0000, 36.0000]]),
  8433. tensor([[ 0.5000, 0.7500, 1.5000, 2.0000],
  8434. [ 5.0000, 7.5000, 15.0000, 20.0000]]))
  8435. >>> # doubling the spacing between samples halves the estimated partial gradients.
  8436. >>>
  8437. >>> # Estimates only the partial derivative for dimension 1
  8438. >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.)
  8439. (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
  8440. [10.0000, 15.0000, 30.0000, 40.0000]]),)
  8441. >>> # When spacing is a list of scalars, the relationship between the tensor
  8442. >>> # indices and input coordinates changes based on dimension.
  8443. >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate
  8444. >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension
  8445. >>> # 0, 1 translate to coordinates of [0, 2].
  8446. >>> torch.gradient(t, spacing = [3., 2.])
  8447. (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
  8448. [ 4.5000, 9.0000, 18.0000, 36.0000]]),
  8449. tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
  8450. [ 3.3333, 5.0000, 10.0000, 13.3333]]))
  8451. >>> # The following example is a replication of the previous one with explicit
  8452. >>> # coordinates.
  8453. >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9]))
  8454. >>> torch.gradient(t, spacing = coords)
  8455. (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
  8456. [ 4.5000, 9.0000, 18.0000, 36.0000]]),
  8457. tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
  8458. [ 3.3333, 5.0000, 10.0000, 13.3333]]))
  8459. """
  8460. ...
  8461. @overload
  8462. def greater(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  8463. r"""
  8464. greater(input, other, *, out=None) -> Tensor
  8465. Alias for :func:`torch.gt`.
  8466. """
  8467. ...
  8468. @overload
  8469. def greater(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
  8470. r"""
  8471. greater(input, other, *, out=None) -> Tensor
  8472. Alias for :func:`torch.gt`.
  8473. """
  8474. ...
  8475. @overload
  8476. def greater_equal(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  8477. r"""
  8478. greater_equal(input, other, *, out=None) -> Tensor
  8479. Alias for :func:`torch.ge`.
  8480. """
  8481. ...
  8482. @overload
  8483. def greater_equal(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
  8484. r"""
  8485. greater_equal(input, other, *, out=None) -> Tensor
  8486. Alias for :func:`torch.ge`.
  8487. """
  8488. ...
  8489. def grid_sampler(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ...
  8490. def grid_sampler_2d(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ...
  8491. def grid_sampler_3d(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ...
  8492. def group_norm(input: Tensor, num_groups: _int, weight: Optional[Tensor] = None, bias: Optional[Tensor] = None, eps: _float = 1e-05, cudnn_enabled: _bool = True) -> Tensor: ...
  8493. @overload
  8494. def gru(data: Tensor, batch_sizes: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor]: ...
  8495. @overload
  8496. def gru(input: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor]: ...
  8497. def gru_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor] = None, b_hh: Optional[Tensor] = None) -> Tensor: ...
  8498. @overload
  8499. def gt(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  8500. r"""
  8501. gt(input, other, *, out=None) -> Tensor
  8502. Computes :math:`\text{input} > \text{other}` element-wise.
  8503. The second argument can be a number or a tensor whose shape is
  8504. :ref:`broadcastable <broadcasting-semantics>` with the first argument.
  8505. Args:
  8506. input (Tensor): the tensor to compare
  8507. other (Tensor or float): the tensor or value to compare
  8508. Keyword args:
  8509. out (Tensor, optional): the output tensor.
  8510. Returns:
  8511. A boolean tensor that is True where :attr:`input` is greater than :attr:`other` and False elsewhere
  8512. Example::
  8513. >>> torch.gt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
  8514. tensor([[False, True], [False, False]])
  8515. """
  8516. ...
  8517. @overload
  8518. def gt(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
  8519. r"""
  8520. gt(input, other, *, out=None) -> Tensor
  8521. Computes :math:`\text{input} > \text{other}` element-wise.
  8522. The second argument can be a number or a tensor whose shape is
  8523. :ref:`broadcastable <broadcasting-semantics>` with the first argument.
  8524. Args:
  8525. input (Tensor): the tensor to compare
  8526. other (Tensor or float): the tensor or value to compare
  8527. Keyword args:
  8528. out (Tensor, optional): the output tensor.
  8529. Returns:
  8530. A boolean tensor that is True where :attr:`input` is greater than :attr:`other` and False elsewhere
  8531. Example::
  8532. >>> torch.gt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
  8533. tensor([[False, True], [False, False]])
  8534. """
  8535. ...
  8536. @overload
  8537. def hamming_window(window_length: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  8538. r"""
  8539. hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  8540. Hamming window function.
  8541. .. math::
  8542. w[n] = \alpha - \beta\ \cos \left( \frac{2 \pi n}{N - 1} \right),
  8543. where :math:`N` is the full window size.
  8544. The input :attr:`window_length` is a positive integer controlling the
  8545. returned window size. :attr:`periodic` flag determines whether the returned
  8546. window trims off the last duplicate value from the symmetric window and is
  8547. ready to be used as a periodic window with functions like
  8548. :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
  8549. above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
  8550. ``torch.hamming_window(L, periodic=True)`` equal to
  8551. ``torch.hamming_window(L + 1, periodic=False)[:-1])``.
  8552. .. note::
  8553. If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
  8554. .. note::
  8555. This is a generalized version of :meth:`torch.hann_window`.
  8556. Arguments:
  8557. window_length (int): the size of returned window
  8558. periodic (bool, optional): If True, returns a window to be used as periodic
  8559. function. If False, return a symmetric window.
  8560. alpha (float, optional): The coefficient :math:`\alpha` in the equation above
  8561. beta (float, optional): The coefficient :math:`\beta` in the equation above
  8562. Keyword args:
  8563. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  8564. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported.
  8565. layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
  8566. ``torch.strided`` (dense layout) is supported.
  8567. device (:class:`torch.device`, optional): the desired device of returned tensor.
  8568. Default: if ``None``, uses the current device for the default tensor type
  8569. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  8570. for CPU tensor types and the current CUDA device for CUDA tensor types.
  8571. requires_grad (bool, optional): If autograd should record operations on the
  8572. returned tensor. Default: ``False``.
  8573. Returns:
  8574. Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window.
  8575. """
  8576. ...
  8577. @overload
  8578. def hamming_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  8579. r"""
  8580. hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  8581. Hamming window function.
  8582. .. math::
  8583. w[n] = \alpha - \beta\ \cos \left( \frac{2 \pi n}{N - 1} \right),
  8584. where :math:`N` is the full window size.
  8585. The input :attr:`window_length` is a positive integer controlling the
  8586. returned window size. :attr:`periodic` flag determines whether the returned
  8587. window trims off the last duplicate value from the symmetric window and is
  8588. ready to be used as a periodic window with functions like
  8589. :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
  8590. above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
  8591. ``torch.hamming_window(L, periodic=True)`` equal to
  8592. ``torch.hamming_window(L + 1, periodic=False)[:-1])``.
  8593. .. note::
  8594. If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
  8595. .. note::
  8596. This is a generalized version of :meth:`torch.hann_window`.
  8597. Arguments:
  8598. window_length (int): the size of returned window
  8599. periodic (bool, optional): If True, returns a window to be used as periodic
  8600. function. If False, return a symmetric window.
  8601. alpha (float, optional): The coefficient :math:`\alpha` in the equation above
  8602. beta (float, optional): The coefficient :math:`\beta` in the equation above
  8603. Keyword args:
  8604. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  8605. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported.
  8606. layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
  8607. ``torch.strided`` (dense layout) is supported.
  8608. device (:class:`torch.device`, optional): the desired device of returned tensor.
  8609. Default: if ``None``, uses the current device for the default tensor type
  8610. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  8611. for CPU tensor types and the current CUDA device for CUDA tensor types.
  8612. requires_grad (bool, optional): If autograd should record operations on the
  8613. returned tensor. Default: ``False``.
  8614. Returns:
  8615. Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window.
  8616. """
  8617. ...
  8618. @overload
  8619. def hamming_window(window_length: _int, periodic: _bool, alpha: _float, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  8620. r"""
  8621. hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  8622. Hamming window function.
  8623. .. math::
  8624. w[n] = \alpha - \beta\ \cos \left( \frac{2 \pi n}{N - 1} \right),
  8625. where :math:`N` is the full window size.
  8626. The input :attr:`window_length` is a positive integer controlling the
  8627. returned window size. :attr:`periodic` flag determines whether the returned
  8628. window trims off the last duplicate value from the symmetric window and is
  8629. ready to be used as a periodic window with functions like
  8630. :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
  8631. above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
  8632. ``torch.hamming_window(L, periodic=True)`` equal to
  8633. ``torch.hamming_window(L + 1, periodic=False)[:-1])``.
  8634. .. note::
  8635. If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
  8636. .. note::
  8637. This is a generalized version of :meth:`torch.hann_window`.
  8638. Arguments:
  8639. window_length (int): the size of returned window
  8640. periodic (bool, optional): If True, returns a window to be used as periodic
  8641. function. If False, return a symmetric window.
  8642. alpha (float, optional): The coefficient :math:`\alpha` in the equation above
  8643. beta (float, optional): The coefficient :math:`\beta` in the equation above
  8644. Keyword args:
  8645. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  8646. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported.
  8647. layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
  8648. ``torch.strided`` (dense layout) is supported.
  8649. device (:class:`torch.device`, optional): the desired device of returned tensor.
  8650. Default: if ``None``, uses the current device for the default tensor type
  8651. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  8652. for CPU tensor types and the current CUDA device for CUDA tensor types.
  8653. requires_grad (bool, optional): If autograd should record operations on the
  8654. returned tensor. Default: ``False``.
  8655. Returns:
  8656. Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window.
  8657. """
  8658. ...
  8659. @overload
  8660. def hamming_window(window_length: _int, periodic: _bool, alpha: _float, beta: _float, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  8661. r"""
  8662. hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  8663. Hamming window function.
  8664. .. math::
  8665. w[n] = \alpha - \beta\ \cos \left( \frac{2 \pi n}{N - 1} \right),
  8666. where :math:`N` is the full window size.
  8667. The input :attr:`window_length` is a positive integer controlling the
  8668. returned window size. :attr:`periodic` flag determines whether the returned
  8669. window trims off the last duplicate value from the symmetric window and is
  8670. ready to be used as a periodic window with functions like
  8671. :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
  8672. above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
  8673. ``torch.hamming_window(L, periodic=True)`` equal to
  8674. ``torch.hamming_window(L + 1, periodic=False)[:-1])``.
  8675. .. note::
  8676. If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
  8677. .. note::
  8678. This is a generalized version of :meth:`torch.hann_window`.
  8679. Arguments:
  8680. window_length (int): the size of returned window
  8681. periodic (bool, optional): If True, returns a window to be used as periodic
  8682. function. If False, return a symmetric window.
  8683. alpha (float, optional): The coefficient :math:`\alpha` in the equation above
  8684. beta (float, optional): The coefficient :math:`\beta` in the equation above
  8685. Keyword args:
  8686. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  8687. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported.
  8688. layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
  8689. ``torch.strided`` (dense layout) is supported.
  8690. device (:class:`torch.device`, optional): the desired device of returned tensor.
  8691. Default: if ``None``, uses the current device for the default tensor type
  8692. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  8693. for CPU tensor types and the current CUDA device for CUDA tensor types.
  8694. requires_grad (bool, optional): If autograd should record operations on the
  8695. returned tensor. Default: ``False``.
  8696. Returns:
  8697. Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window.
  8698. """
  8699. ...
  8700. @overload
  8701. def hann_window(window_length: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  8702. r"""
  8703. hann_window(window_length, periodic=True, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  8704. Hann window function.
  8705. .. math::
  8706. w[n] = \frac{1}{2}\ \left[1 - \cos \left( \frac{2 \pi n}{N - 1} \right)\right] =
  8707. \sin^2 \left( \frac{\pi n}{N - 1} \right),
  8708. where :math:`N` is the full window size.
  8709. The input :attr:`window_length` is a positive integer controlling the
  8710. returned window size. :attr:`periodic` flag determines whether the returned
  8711. window trims off the last duplicate value from the symmetric window and is
  8712. ready to be used as a periodic window with functions like
  8713. :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
  8714. above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
  8715. ``torch.hann_window(L, periodic=True)`` equal to
  8716. ``torch.hann_window(L + 1, periodic=False)[:-1])``.
  8717. .. note::
  8718. If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
  8719. Arguments:
  8720. window_length (int): the size of returned window
  8721. periodic (bool, optional): If True, returns a window to be used as periodic
  8722. function. If False, return a symmetric window.
  8723. Keyword args:
  8724. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  8725. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported.
  8726. layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
  8727. ``torch.strided`` (dense layout) is supported.
  8728. device (:class:`torch.device`, optional): the desired device of returned tensor.
  8729. Default: if ``None``, uses the current device for the default tensor type
  8730. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  8731. for CPU tensor types and the current CUDA device for CUDA tensor types.
  8732. requires_grad (bool, optional): If autograd should record operations on the
  8733. returned tensor. Default: ``False``.
  8734. Returns:
  8735. Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window
  8736. """
  8737. ...
  8738. @overload
  8739. def hann_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  8740. r"""
  8741. hann_window(window_length, periodic=True, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  8742. Hann window function.
  8743. .. math::
  8744. w[n] = \frac{1}{2}\ \left[1 - \cos \left( \frac{2 \pi n}{N - 1} \right)\right] =
  8745. \sin^2 \left( \frac{\pi n}{N - 1} \right),
  8746. where :math:`N` is the full window size.
  8747. The input :attr:`window_length` is a positive integer controlling the
  8748. returned window size. :attr:`periodic` flag determines whether the returned
  8749. window trims off the last duplicate value from the symmetric window and is
  8750. ready to be used as a periodic window with functions like
  8751. :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
  8752. above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
  8753. ``torch.hann_window(L, periodic=True)`` equal to
  8754. ``torch.hann_window(L + 1, periodic=False)[:-1])``.
  8755. .. note::
  8756. If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
  8757. Arguments:
  8758. window_length (int): the size of returned window
  8759. periodic (bool, optional): If True, returns a window to be used as periodic
  8760. function. If False, return a symmetric window.
  8761. Keyword args:
  8762. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  8763. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported.
  8764. layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
  8765. ``torch.strided`` (dense layout) is supported.
  8766. device (:class:`torch.device`, optional): the desired device of returned tensor.
  8767. Default: if ``None``, uses the current device for the default tensor type
  8768. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  8769. for CPU tensor types and the current CUDA device for CUDA tensor types.
  8770. requires_grad (bool, optional): If autograd should record operations on the
  8771. returned tensor. Default: ``False``.
  8772. Returns:
  8773. Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window
  8774. """
  8775. ...
  8776. def hardshrink(input: Tensor, lambd: Union[Number, _complex] = 0.5, *, out: Optional[Tensor] = None) -> Tensor: ...
  8777. def heaviside(input: Tensor, values: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  8778. r"""
  8779. heaviside(input, values, *, out=None) -> Tensor
  8780. Computes the Heaviside step function for each element in :attr:`input`.
  8781. The Heaviside step function is defined as:
  8782. .. math::
  8783. \text{{heaviside}}(input, values) = \begin{cases}
  8784. 0, & \text{if input < 0}\\
  8785. values, & \text{if input == 0}\\
  8786. 1, & \text{if input > 0}
  8787. \end{cases}
  8788. Args:
  8789. input (Tensor): the input tensor.
  8790. values (Tensor): The values to use where :attr:`input` is zero.
  8791. Keyword arguments:
  8792. out (Tensor, optional): the output tensor.
  8793. Example::
  8794. >>> input = torch.tensor([-1.5, 0, 2.0])
  8795. >>> values = torch.tensor([0.5])
  8796. >>> torch.heaviside(input, values)
  8797. tensor([0.0000, 0.5000, 1.0000])
  8798. >>> values = torch.tensor([1.2, -2.0, 3.5])
  8799. >>> torch.heaviside(input, values)
  8800. tensor([0., -2., 1.])
  8801. """
  8802. ...
  8803. def hinge_embedding_loss(input: Tensor, target: Tensor, margin: _float = 1.0, reduction: _int = 1) -> Tensor: ...
  8804. def histc(input: Tensor, bins: _int = 100, min: Union[Number, _complex] = 0, max: Union[Number, _complex] = 0, *, out: Optional[Tensor] = None) -> Tensor:
  8805. r"""
  8806. histc(input, bins=100, min=0, max=0, *, out=None) -> Tensor
  8807. Computes the histogram of a tensor.
  8808. The elements are sorted into equal width bins between :attr:`min` and
  8809. :attr:`max`. If :attr:`min` and :attr:`max` are both zero, the minimum and
  8810. maximum values of the data are used.
  8811. Elements lower than min and higher than max and ``NaN`` elements are ignored.
  8812. Args:
  8813. input (Tensor): the input tensor.
  8814. bins (int): number of histogram bins
  8815. min (Scalar): lower end of the range (inclusive)
  8816. max (Scalar): upper end of the range (inclusive)
  8817. Keyword args:
  8818. out (Tensor, optional): the output tensor.
  8819. Returns:
  8820. Tensor: Histogram represented as a tensor
  8821. Example::
  8822. >>> torch.histc(torch.tensor([1., 2, 1]), bins=4, min=0, max=3)
  8823. tensor([ 0., 2., 1., 0.])
  8824. """
  8825. ...
  8826. @overload
  8827. def histogram(input: Tensor, bins: Tensor, *, weight: Optional[Tensor] = None, density: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.histogram:
  8828. r"""
  8829. histogram(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor)
  8830. Computes a histogram of the values in a tensor.
  8831. :attr:`bins` can be an integer or a 1D tensor.
  8832. If :attr:`bins` is an int, it specifies the number of equal-width bins.
  8833. By default, the lower and upper range of the bins is determined by the
  8834. minimum and maximum elements of the input tensor. The :attr:`range`
  8835. argument can be provided to specify a range for the bins.
  8836. If :attr:`bins` is a 1D tensor, it specifies the sequence of bin edges
  8837. including the rightmost edge. It should contain at least 2 elements
  8838. and its elements should be increasing.
  8839. Args:
  8840. input (Tensor): the input tensor.
  8841. bins: int or 1D Tensor. If int, defines the number of equal-width bins. If tensor,
  8842. defines the sequence of bin edges including the rightmost edge.
  8843. Keyword args:
  8844. range (tuple of float): Defines the range of the bins.
  8845. weight (Tensor): If provided, weight should have the same shape as input. Each value in
  8846. input contributes its associated weight towards its bin's result.
  8847. density (bool): If False, the result will contain the count (or total weight) in each bin.
  8848. If True, the result is the value of the probability density function over the bins,
  8849. normalized such that the integral over the range of the bins is 1.
  8850. out (Tensor, optional): the output tensor. (tuple, optional): The result tuple of two output tensors (hist, bin_edges).
  8851. Returns:
  8852. hist (Tensor): 1D Tensor containing the values of the histogram.
  8853. bin_edges(Tensor): 1D Tensor containing the edges of the histogram bins.
  8854. Example::
  8855. >>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.]))
  8856. (tensor([ 0., 5., 2., 0.]), tensor([0., 0.75, 1.5, 2.25, 3.]))
  8857. >>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.]), density=True)
  8858. (tensor([ 0., 0.9524, 0.3810, 0.]), tensor([0., 0.75, 1.5, 2.25, 3.]))
  8859. """
  8860. ...
  8861. @overload
  8862. def histogram(input: Tensor, bins: _int = 100, *, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.histogram:
  8863. r"""
  8864. histogram(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor)
  8865. Computes a histogram of the values in a tensor.
  8866. :attr:`bins` can be an integer or a 1D tensor.
  8867. If :attr:`bins` is an int, it specifies the number of equal-width bins.
  8868. By default, the lower and upper range of the bins is determined by the
  8869. minimum and maximum elements of the input tensor. The :attr:`range`
  8870. argument can be provided to specify a range for the bins.
  8871. If :attr:`bins` is a 1D tensor, it specifies the sequence of bin edges
  8872. including the rightmost edge. It should contain at least 2 elements
  8873. and its elements should be increasing.
  8874. Args:
  8875. input (Tensor): the input tensor.
  8876. bins: int or 1D Tensor. If int, defines the number of equal-width bins. If tensor,
  8877. defines the sequence of bin edges including the rightmost edge.
  8878. Keyword args:
  8879. range (tuple of float): Defines the range of the bins.
  8880. weight (Tensor): If provided, weight should have the same shape as input. Each value in
  8881. input contributes its associated weight towards its bin's result.
  8882. density (bool): If False, the result will contain the count (or total weight) in each bin.
  8883. If True, the result is the value of the probability density function over the bins,
  8884. normalized such that the integral over the range of the bins is 1.
  8885. out (Tensor, optional): the output tensor. (tuple, optional): The result tuple of two output tensors (hist, bin_edges).
  8886. Returns:
  8887. hist (Tensor): 1D Tensor containing the values of the histogram.
  8888. bin_edges(Tensor): 1D Tensor containing the edges of the histogram bins.
  8889. Example::
  8890. >>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.]))
  8891. (tensor([ 0., 5., 2., 0.]), tensor([0., 0.75, 1.5, 2.25, 3.]))
  8892. >>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.]), density=True)
  8893. (tensor([ 0., 0.9524, 0.3810, 0.]), tensor([0., 0.75, 1.5, 2.25, 3.]))
  8894. """
  8895. ...
  8896. @overload
  8897. def histogramdd(input: Tensor, bins: _int, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> torch.return_types.histogramdd:
  8898. r"""
  8899. histogramdd(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor[])
  8900. Computes a multi-dimensional histogram of the values in a tensor.
  8901. Interprets the elements of an input tensor whose innermost dimension has size N
  8902. as a collection of N-dimensional points. Maps each of the points into a set of
  8903. N-dimensional bins and returns the number of points (or total weight) in each bin.
  8904. :attr:`input` must be a tensor with at least 2 dimensions.
  8905. If input has shape (M, N), each of its M rows defines a point in N-dimensional space.
  8906. If input has three or more dimensions, all but the last dimension are flattened.
  8907. Each dimension is independently associated with its own strictly increasing sequence
  8908. of bin edges. Bin edges may be specified explicitly by passing a sequence of 1D
  8909. tensors. Alternatively, bin edges may be constructed automatically by passing a
  8910. sequence of integers specifying the number of equal-width bins in each dimension.
  8911. For each N-dimensional point in input:
  8912. - Each of its coordinates is binned independently among the bin edges
  8913. corresponding to its dimension
  8914. - Binning results are combined to identify the N-dimensional bin (if any)
  8915. into which the point falls
  8916. - If the point falls into a bin, the bin's count (or total weight) is incremented
  8917. - Points which do not fall into any bin do not contribute to the output
  8918. :attr:`bins` can be a sequence of N 1D tensors, a sequence of N ints, or a single int.
  8919. If :attr:`bins` is a sequence of N 1D tensors, it explicitly specifies the N sequences
  8920. of bin edges. Each 1D tensor should contain a strictly increasing sequence with at
  8921. least one element. A sequence of K bin edges defines K-1 bins, explicitly specifying
  8922. the left and right edges of all bins. Every bin is exclusive of its left edge. Only
  8923. the rightmost bin is inclusive of its right edge.
  8924. If :attr:`bins` is a sequence of N ints, it specifies the number of equal-width bins
  8925. in each dimension. By default, the leftmost and rightmost bin edges in each dimension
  8926. are determined by the minimum and maximum elements of the input tensor in the
  8927. corresponding dimension. The :attr:`range` argument can be provided to manually
  8928. specify the leftmost and rightmost bin edges in each dimension.
  8929. If :attr:`bins` is an int, it specifies the number of equal-width bins for all dimensions.
  8930. .. note::
  8931. See also :func:`torch.histogram`, which specifically computes 1D histograms.
  8932. While :func:`torch.histogramdd` infers the dimensionality of its bins and
  8933. binned values from the shape of :attr:`input`, :func:`torch.histogram`
  8934. accepts and flattens :attr:`input` of any shape.
  8935. Args:
  8936. input (Tensor): the input tensor.
  8937. bins: Tensor[], int[], or int.
  8938. If Tensor[], defines the sequences of bin edges.
  8939. If int[], defines the number of equal-width bins in each dimension.
  8940. If int, defines the number of equal-width bins for all dimensions.
  8941. Keyword args:
  8942. range (sequence of float): Defines the leftmost and rightmost bin edges
  8943. in each dimension.
  8944. weight (Tensor): By default, each value in the input has weight 1. If a weight
  8945. tensor is passed, each N-dimensional coordinate in input
  8946. contributes its associated weight towards its bin's result.
  8947. The weight tensor should have the same shape as the :attr:`input`
  8948. tensor excluding its innermost dimension N.
  8949. density (bool): If False (default), the result will contain the count (or total weight)
  8950. in each bin. If True, each count (weight) is divided by the total count
  8951. (total weight), then divided by the volume of its associated bin.
  8952. Returns:
  8953. hist (Tensor): N-dimensional Tensor containing the values of the histogram.
  8954. bin_edges(Tensor[]): sequence of N 1D Tensors containing the bin edges.
  8955. Example::
  8956. >>> torch.histogramdd(torch.tensor([[0., 1.], [1., 0.], [2., 0.], [2., 2.]]), bins=[3, 3],
  8957. ... weight=torch.tensor([1., 2., 4., 8.]))
  8958. torch.return_types.histogramdd(
  8959. hist=tensor([[0., 1., 0.],
  8960. [2., 0., 0.],
  8961. [4., 0., 8.]]),
  8962. bin_edges=(tensor([0.0000, 0.6667, 1.3333, 2.0000]),
  8963. tensor([0.0000, 0.6667, 1.3333, 2.0000])))
  8964. >>> torch.histogramdd(torch.tensor([[0., 0.], [1., 1.], [2., 2.]]), bins=[2, 2],
  8965. ... range=[0., 1., 0., 1.], density=True)
  8966. torch.return_types.histogramdd(
  8967. hist=tensor([[2., 0.],
  8968. [0., 2.]]),
  8969. bin_edges=(tensor([0.0000, 0.5000, 1.0000]),
  8970. tensor([0.0000, 0.5000, 1.0000])))
  8971. """
  8972. ...
  8973. @overload
  8974. def histogramdd(input: Tensor, bins: _size, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> torch.return_types.histogramdd:
  8975. r"""
  8976. histogramdd(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor[])
  8977. Computes a multi-dimensional histogram of the values in a tensor.
  8978. Interprets the elements of an input tensor whose innermost dimension has size N
  8979. as a collection of N-dimensional points. Maps each of the points into a set of
  8980. N-dimensional bins and returns the number of points (or total weight) in each bin.
  8981. :attr:`input` must be a tensor with at least 2 dimensions.
  8982. If input has shape (M, N), each of its M rows defines a point in N-dimensional space.
  8983. If input has three or more dimensions, all but the last dimension are flattened.
  8984. Each dimension is independently associated with its own strictly increasing sequence
  8985. of bin edges. Bin edges may be specified explicitly by passing a sequence of 1D
  8986. tensors. Alternatively, bin edges may be constructed automatically by passing a
  8987. sequence of integers specifying the number of equal-width bins in each dimension.
  8988. For each N-dimensional point in input:
  8989. - Each of its coordinates is binned independently among the bin edges
  8990. corresponding to its dimension
  8991. - Binning results are combined to identify the N-dimensional bin (if any)
  8992. into which the point falls
  8993. - If the point falls into a bin, the bin's count (or total weight) is incremented
  8994. - Points which do not fall into any bin do not contribute to the output
  8995. :attr:`bins` can be a sequence of N 1D tensors, a sequence of N ints, or a single int.
  8996. If :attr:`bins` is a sequence of N 1D tensors, it explicitly specifies the N sequences
  8997. of bin edges. Each 1D tensor should contain a strictly increasing sequence with at
  8998. least one element. A sequence of K bin edges defines K-1 bins, explicitly specifying
  8999. the left and right edges of all bins. Every bin is exclusive of its left edge. Only
  9000. the rightmost bin is inclusive of its right edge.
  9001. If :attr:`bins` is a sequence of N ints, it specifies the number of equal-width bins
  9002. in each dimension. By default, the leftmost and rightmost bin edges in each dimension
  9003. are determined by the minimum and maximum elements of the input tensor in the
  9004. corresponding dimension. The :attr:`range` argument can be provided to manually
  9005. specify the leftmost and rightmost bin edges in each dimension.
  9006. If :attr:`bins` is an int, it specifies the number of equal-width bins for all dimensions.
  9007. .. note::
  9008. See also :func:`torch.histogram`, which specifically computes 1D histograms.
  9009. While :func:`torch.histogramdd` infers the dimensionality of its bins and
  9010. binned values from the shape of :attr:`input`, :func:`torch.histogram`
  9011. accepts and flattens :attr:`input` of any shape.
  9012. Args:
  9013. input (Tensor): the input tensor.
  9014. bins: Tensor[], int[], or int.
  9015. If Tensor[], defines the sequences of bin edges.
  9016. If int[], defines the number of equal-width bins in each dimension.
  9017. If int, defines the number of equal-width bins for all dimensions.
  9018. Keyword args:
  9019. range (sequence of float): Defines the leftmost and rightmost bin edges
  9020. in each dimension.
  9021. weight (Tensor): By default, each value in the input has weight 1. If a weight
  9022. tensor is passed, each N-dimensional coordinate in input
  9023. contributes its associated weight towards its bin's result.
  9024. The weight tensor should have the same shape as the :attr:`input`
  9025. tensor excluding its innermost dimension N.
  9026. density (bool): If False (default), the result will contain the count (or total weight)
  9027. in each bin. If True, each count (weight) is divided by the total count
  9028. (total weight), then divided by the volume of its associated bin.
  9029. Returns:
  9030. hist (Tensor): N-dimensional Tensor containing the values of the histogram.
  9031. bin_edges(Tensor[]): sequence of N 1D Tensors containing the bin edges.
  9032. Example::
  9033. >>> torch.histogramdd(torch.tensor([[0., 1.], [1., 0.], [2., 0.], [2., 2.]]), bins=[3, 3],
  9034. ... weight=torch.tensor([1., 2., 4., 8.]))
  9035. torch.return_types.histogramdd(
  9036. hist=tensor([[0., 1., 0.],
  9037. [2., 0., 0.],
  9038. [4., 0., 8.]]),
  9039. bin_edges=(tensor([0.0000, 0.6667, 1.3333, 2.0000]),
  9040. tensor([0.0000, 0.6667, 1.3333, 2.0000])))
  9041. >>> torch.histogramdd(torch.tensor([[0., 0.], [1., 1.], [2., 2.]]), bins=[2, 2],
  9042. ... range=[0., 1., 0., 1.], density=True)
  9043. torch.return_types.histogramdd(
  9044. hist=tensor([[2., 0.],
  9045. [0., 2.]]),
  9046. bin_edges=(tensor([0.0000, 0.5000, 1.0000]),
  9047. tensor([0.0000, 0.5000, 1.0000])))
  9048. """
  9049. ...
  9050. @overload
  9051. def histogramdd(input: Tensor, bins: Union[Tuple[Tensor, ...], List[Tensor]], range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> torch.return_types.histogramdd:
  9052. r"""
  9053. histogramdd(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor[])
  9054. Computes a multi-dimensional histogram of the values in a tensor.
  9055. Interprets the elements of an input tensor whose innermost dimension has size N
  9056. as a collection of N-dimensional points. Maps each of the points into a set of
  9057. N-dimensional bins and returns the number of points (or total weight) in each bin.
  9058. :attr:`input` must be a tensor with at least 2 dimensions.
  9059. If input has shape (M, N), each of its M rows defines a point in N-dimensional space.
  9060. If input has three or more dimensions, all but the last dimension are flattened.
  9061. Each dimension is independently associated with its own strictly increasing sequence
  9062. of bin edges. Bin edges may be specified explicitly by passing a sequence of 1D
  9063. tensors. Alternatively, bin edges may be constructed automatically by passing a
  9064. sequence of integers specifying the number of equal-width bins in each dimension.
  9065. For each N-dimensional point in input:
  9066. - Each of its coordinates is binned independently among the bin edges
  9067. corresponding to its dimension
  9068. - Binning results are combined to identify the N-dimensional bin (if any)
  9069. into which the point falls
  9070. - If the point falls into a bin, the bin's count (or total weight) is incremented
  9071. - Points which do not fall into any bin do not contribute to the output
  9072. :attr:`bins` can be a sequence of N 1D tensors, a sequence of N ints, or a single int.
  9073. If :attr:`bins` is a sequence of N 1D tensors, it explicitly specifies the N sequences
  9074. of bin edges. Each 1D tensor should contain a strictly increasing sequence with at
  9075. least one element. A sequence of K bin edges defines K-1 bins, explicitly specifying
  9076. the left and right edges of all bins. Every bin is exclusive of its left edge. Only
  9077. the rightmost bin is inclusive of its right edge.
  9078. If :attr:`bins` is a sequence of N ints, it specifies the number of equal-width bins
  9079. in each dimension. By default, the leftmost and rightmost bin edges in each dimension
  9080. are determined by the minimum and maximum elements of the input tensor in the
  9081. corresponding dimension. The :attr:`range` argument can be provided to manually
  9082. specify the leftmost and rightmost bin edges in each dimension.
  9083. If :attr:`bins` is an int, it specifies the number of equal-width bins for all dimensions.
  9084. .. note::
  9085. See also :func:`torch.histogram`, which specifically computes 1D histograms.
  9086. While :func:`torch.histogramdd` infers the dimensionality of its bins and
  9087. binned values from the shape of :attr:`input`, :func:`torch.histogram`
  9088. accepts and flattens :attr:`input` of any shape.
  9089. Args:
  9090. input (Tensor): the input tensor.
  9091. bins: Tensor[], int[], or int.
  9092. If Tensor[], defines the sequences of bin edges.
  9093. If int[], defines the number of equal-width bins in each dimension.
  9094. If int, defines the number of equal-width bins for all dimensions.
  9095. Keyword args:
  9096. range (sequence of float): Defines the leftmost and rightmost bin edges
  9097. in each dimension.
  9098. weight (Tensor): By default, each value in the input has weight 1. If a weight
  9099. tensor is passed, each N-dimensional coordinate in input
  9100. contributes its associated weight towards its bin's result.
  9101. The weight tensor should have the same shape as the :attr:`input`
  9102. tensor excluding its innermost dimension N.
  9103. density (bool): If False (default), the result will contain the count (or total weight)
  9104. in each bin. If True, each count (weight) is divided by the total count
  9105. (total weight), then divided by the volume of its associated bin.
  9106. Returns:
  9107. hist (Tensor): N-dimensional Tensor containing the values of the histogram.
  9108. bin_edges(Tensor[]): sequence of N 1D Tensors containing the bin edges.
  9109. Example::
  9110. >>> torch.histogramdd(torch.tensor([[0., 1.], [1., 0.], [2., 0.], [2., 2.]]), bins=[3, 3],
  9111. ... weight=torch.tensor([1., 2., 4., 8.]))
  9112. torch.return_types.histogramdd(
  9113. hist=tensor([[0., 1., 0.],
  9114. [2., 0., 0.],
  9115. [4., 0., 8.]]),
  9116. bin_edges=(tensor([0.0000, 0.6667, 1.3333, 2.0000]),
  9117. tensor([0.0000, 0.6667, 1.3333, 2.0000])))
  9118. >>> torch.histogramdd(torch.tensor([[0., 0.], [1., 1.], [2., 2.]]), bins=[2, 2],
  9119. ... range=[0., 1., 0., 1.], density=True)
  9120. torch.return_types.histogramdd(
  9121. hist=tensor([[2., 0.],
  9122. [0., 2.]]),
  9123. bin_edges=(tensor([0.0000, 0.5000, 1.0000]),
  9124. tensor([0.0000, 0.5000, 1.0000])))
  9125. """
  9126. ...
  9127. def hsmm(input: Tensor, mat2: Tensor) -> Tensor: ...
  9128. @overload
  9129. def hsplit(input: Tensor, sections: _int) -> Tuple[Tensor, ...]:
  9130. r"""
  9131. hsplit(input, indices_or_sections) -> List of Tensors
  9132. Splits :attr:`input`, a tensor with one or more dimensions, into multiple tensors
  9133. horizontally according to :attr:`indices_or_sections`. Each split is a view of
  9134. :attr:`input`.
  9135. If :attr:`input` is one dimensional this is equivalent to calling
  9136. torch.tensor_split(input, indices_or_sections, dim=0) (the split dimension is
  9137. zero), and if :attr:`input` has two or more dimensions it's equivalent to calling
  9138. torch.tensor_split(input, indices_or_sections, dim=1) (the split dimension is 1),
  9139. except that if :attr:`indices_or_sections` is an integer it must evenly divide
  9140. the split dimension or a runtime error will be thrown.
  9141. This function is based on NumPy's :func:`numpy.hsplit`.
  9142. Args:
  9143. input (Tensor): tensor to split.
  9144. indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
  9145. Example::
  9146. >>> t = torch.arange(16.0).reshape(4,4)
  9147. >>> t
  9148. tensor([[ 0., 1., 2., 3.],
  9149. [ 4., 5., 6., 7.],
  9150. [ 8., 9., 10., 11.],
  9151. [12., 13., 14., 15.]])
  9152. >>> torch.hsplit(t, 2)
  9153. (tensor([[ 0., 1.],
  9154. [ 4., 5.],
  9155. [ 8., 9.],
  9156. [12., 13.]]),
  9157. tensor([[ 2., 3.],
  9158. [ 6., 7.],
  9159. [10., 11.],
  9160. [14., 15.]]))
  9161. >>> torch.hsplit(t, [3, 6])
  9162. (tensor([[ 0., 1., 2.],
  9163. [ 4., 5., 6.],
  9164. [ 8., 9., 10.],
  9165. [12., 13., 14.]]),
  9166. tensor([[ 3.],
  9167. [ 7.],
  9168. [11.],
  9169. [15.]]),
  9170. tensor([], size=(4, 0)))
  9171. """
  9172. ...
  9173. @overload
  9174. def hsplit(input: Tensor, indices: _size) -> Tuple[Tensor, ...]:
  9175. r"""
  9176. hsplit(input, indices_or_sections) -> List of Tensors
  9177. Splits :attr:`input`, a tensor with one or more dimensions, into multiple tensors
  9178. horizontally according to :attr:`indices_or_sections`. Each split is a view of
  9179. :attr:`input`.
  9180. If :attr:`input` is one dimensional this is equivalent to calling
  9181. torch.tensor_split(input, indices_or_sections, dim=0) (the split dimension is
  9182. zero), and if :attr:`input` has two or more dimensions it's equivalent to calling
  9183. torch.tensor_split(input, indices_or_sections, dim=1) (the split dimension is 1),
  9184. except that if :attr:`indices_or_sections` is an integer it must evenly divide
  9185. the split dimension or a runtime error will be thrown.
  9186. This function is based on NumPy's :func:`numpy.hsplit`.
  9187. Args:
  9188. input (Tensor): tensor to split.
  9189. indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
  9190. Example::
  9191. >>> t = torch.arange(16.0).reshape(4,4)
  9192. >>> t
  9193. tensor([[ 0., 1., 2., 3.],
  9194. [ 4., 5., 6., 7.],
  9195. [ 8., 9., 10., 11.],
  9196. [12., 13., 14., 15.]])
  9197. >>> torch.hsplit(t, 2)
  9198. (tensor([[ 0., 1.],
  9199. [ 4., 5.],
  9200. [ 8., 9.],
  9201. [12., 13.]]),
  9202. tensor([[ 2., 3.],
  9203. [ 6., 7.],
  9204. [10., 11.],
  9205. [14., 15.]]))
  9206. >>> torch.hsplit(t, [3, 6])
  9207. (tensor([[ 0., 1., 2.],
  9208. [ 4., 5., 6.],
  9209. [ 8., 9., 10.],
  9210. [12., 13., 14.]]),
  9211. tensor([[ 3.],
  9212. [ 7.],
  9213. [11.],
  9214. [15.]]),
  9215. tensor([], size=(4, 0)))
  9216. """
  9217. ...
  9218. def hspmm(mat1: Tensor, mat2: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  9219. r"""
  9220. hspmm(mat1, mat2, *, out=None) -> Tensor
  9221. Performs a matrix multiplication of a :ref:`sparse COO matrix
  9222. <sparse-coo-docs>` :attr:`mat1` and a strided matrix :attr:`mat2`. The
  9223. result is a (1 + 1)-dimensional :ref:`hybrid COO matrix
  9224. <sparse-hybrid-coo-docs>`.
  9225. Args:
  9226. mat1 (Tensor): the first sparse matrix to be matrix multiplied
  9227. mat2 (Tensor): the second strided matrix to be matrix multiplied
  9228. Keyword args:
  9229. out (Tensor, optional): the output tensor.
  9230. """
  9231. ...
  9232. def hstack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor] = None) -> Tensor:
  9233. r"""
  9234. hstack(tensors, *, out=None) -> Tensor
  9235. Stack tensors in sequence horizontally (column wise).
  9236. This is equivalent to concatenation along the first axis for 1-D tensors, and along the second axis for all other tensors.
  9237. Args:
  9238. tensors (sequence of Tensors): sequence of tensors to concatenate
  9239. Keyword args:
  9240. out (Tensor, optional): the output tensor.
  9241. Example::
  9242. >>> a = torch.tensor([1, 2, 3])
  9243. >>> b = torch.tensor([4, 5, 6])
  9244. >>> torch.hstack((a,b))
  9245. tensor([1, 2, 3, 4, 5, 6])
  9246. >>> a = torch.tensor([[1],[2],[3]])
  9247. >>> b = torch.tensor([[4],[5],[6]])
  9248. >>> torch.hstack((a,b))
  9249. tensor([[1, 4],
  9250. [2, 5],
  9251. [3, 6]])
  9252. """
  9253. ...
  9254. def hypot(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  9255. r"""
  9256. hypot(input, other, *, out=None) -> Tensor
  9257. Given the legs of a right triangle, return its hypotenuse.
  9258. .. math::
  9259. \text{out}_{i} = \sqrt{\text{input}_{i}^{2} + \text{other}_{i}^{2}}
  9260. The shapes of ``input`` and ``other`` must be
  9261. :ref:`broadcastable <broadcasting-semantics>`.
  9262. Args:
  9263. input (Tensor): the first input tensor
  9264. other (Tensor): the second input tensor
  9265. Keyword args:
  9266. out (Tensor, optional): the output tensor.
  9267. Example::
  9268. >>> a = torch.hypot(torch.tensor([4.0]), torch.tensor([3.0, 4.0, 5.0]))
  9269. tensor([5.0000, 5.6569, 6.4031])
  9270. """
  9271. ...
  9272. def i0(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  9273. r"""
  9274. i0(input, *, out=None) -> Tensor
  9275. Alias for :func:`torch.special.i0`.
  9276. """
  9277. ...
  9278. def i0_(input: Tensor) -> Tensor: ...
  9279. def igamma(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  9280. r"""
  9281. igamma(input, other, *, out=None) -> Tensor
  9282. Alias for :func:`torch.special.gammainc`.
  9283. """
  9284. ...
  9285. def igammac(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  9286. r"""
  9287. igammac(input, other, *, out=None) -> Tensor
  9288. Alias for :func:`torch.special.gammaincc`.
  9289. """
  9290. ...
  9291. def imag(input: Tensor) -> Tensor:
  9292. r"""
  9293. imag(input) -> Tensor
  9294. Returns a new tensor containing imaginary values of the :attr:`self` tensor.
  9295. The returned tensor and :attr:`self` share the same underlying storage.
  9296. .. warning::
  9297. :func:`imag` is only supported for tensors with complex dtypes.
  9298. Args:
  9299. input (Tensor): the input tensor.
  9300. Example::
  9301. >>> x=torch.randn(4, dtype=torch.cfloat)
  9302. >>> x
  9303. tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
  9304. >>> x.imag
  9305. tensor([ 0.3553, -0.7896, -0.0633, -0.8119])
  9306. """
  9307. ...
  9308. @overload
  9309. def index_add(input: Tensor, dim: _int, index: Tensor, source: Tensor, *, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor:
  9310. r"""
  9311. index_add(input, dim, index, source, *, alpha=1, out=None) -> Tensor
  9312. See :meth:`~Tensor.index_add_` for function description.
  9313. """
  9314. ...
  9315. @overload
  9316. def index_add(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor:
  9317. r"""
  9318. index_add(input, dim, index, source, *, alpha=1, out=None) -> Tensor
  9319. See :meth:`~Tensor.index_add_` for function description.
  9320. """
  9321. ...
  9322. @overload
  9323. def index_copy(input: Tensor, dim: _int, index: Tensor, source: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  9324. r"""
  9325. index_copy(input, dim, index, source, *, out=None) -> Tensor
  9326. See :meth:`~Tensor.index_add_` for function description.
  9327. """
  9328. ...
  9329. @overload
  9330. def index_copy(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor) -> Tensor:
  9331. r"""
  9332. index_copy(input, dim, index, source, *, out=None) -> Tensor
  9333. See :meth:`~Tensor.index_add_` for function description.
  9334. """
  9335. ...
  9336. @overload
  9337. def index_fill(input: Tensor, dim: _int, index: Tensor, value: Tensor) -> Tensor: ...
  9338. @overload
  9339. def index_fill(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, value: Tensor) -> Tensor: ...
  9340. @overload
  9341. def index_fill(input: Tensor, dim: _int, index: Tensor, value: Union[Number, _complex]) -> Tensor: ...
  9342. @overload
  9343. def index_fill(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, value: Union[Number, _complex]) -> Tensor: ...
  9344. def index_put(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False) -> Tensor: ...
  9345. def index_put_(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False) -> Tensor: ...
  9346. def index_reduce(input: Tensor, dim: _int, index: Tensor, source: Tensor, reduce: str, *, include_self: _bool = True, out: Optional[Tensor] = None) -> Tensor:
  9347. r"""
  9348. index_reduce(input, dim, index, source, reduce, *, include_self=True, out=None) -> Tensor
  9349. See :meth:`~Tensor.index_reduce_` for function description.
  9350. """
  9351. ...
  9352. @overload
  9353. def index_select(input: Tensor, dim: _int, index: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  9354. r"""
  9355. index_select(input, dim, index, *, out=None) -> Tensor
  9356. Returns a new tensor which indexes the :attr:`input` tensor along dimension
  9357. :attr:`dim` using the entries in :attr:`index` which is a `LongTensor`.
  9358. The returned tensor has the same number of dimensions as the original tensor
  9359. (:attr:`input`). The :attr:`dim`\ th dimension has the same size as the length
  9360. of :attr:`index`; other dimensions have the same size as in the original tensor.
  9361. .. note:: The returned tensor does **not** use the same storage as the original
  9362. tensor. If :attr:`out` has a different shape than expected, we
  9363. silently change it to the correct shape, reallocating the underlying
  9364. storage if necessary.
  9365. Args:
  9366. input (Tensor): the input tensor.
  9367. dim (int): the dimension in which we index
  9368. index (IntTensor or LongTensor): the 1-D tensor containing the indices to index
  9369. Keyword args:
  9370. out (Tensor, optional): the output tensor.
  9371. Example::
  9372. >>> x = torch.randn(3, 4)
  9373. >>> x
  9374. tensor([[ 0.1427, 0.0231, -0.5414, -1.0009],
  9375. [-0.4664, 0.2647, -0.1228, -1.1068],
  9376. [-1.1734, -0.6571, 0.7230, -0.6004]])
  9377. >>> indices = torch.tensor([0, 2])
  9378. >>> torch.index_select(x, 0, indices)
  9379. tensor([[ 0.1427, 0.0231, -0.5414, -1.0009],
  9380. [-1.1734, -0.6571, 0.7230, -0.6004]])
  9381. >>> torch.index_select(x, 1, indices)
  9382. tensor([[ 0.1427, -0.5414],
  9383. [-0.4664, -0.1228],
  9384. [-1.1734, 0.7230]])
  9385. """
  9386. ...
  9387. @overload
  9388. def index_select(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  9389. r"""
  9390. index_select(input, dim, index, *, out=None) -> Tensor
  9391. Returns a new tensor which indexes the :attr:`input` tensor along dimension
  9392. :attr:`dim` using the entries in :attr:`index` which is a `LongTensor`.
  9393. The returned tensor has the same number of dimensions as the original tensor
  9394. (:attr:`input`). The :attr:`dim`\ th dimension has the same size as the length
  9395. of :attr:`index`; other dimensions have the same size as in the original tensor.
  9396. .. note:: The returned tensor does **not** use the same storage as the original
  9397. tensor. If :attr:`out` has a different shape than expected, we
  9398. silently change it to the correct shape, reallocating the underlying
  9399. storage if necessary.
  9400. Args:
  9401. input (Tensor): the input tensor.
  9402. dim (int): the dimension in which we index
  9403. index (IntTensor or LongTensor): the 1-D tensor containing the indices to index
  9404. Keyword args:
  9405. out (Tensor, optional): the output tensor.
  9406. Example::
  9407. >>> x = torch.randn(3, 4)
  9408. >>> x
  9409. tensor([[ 0.1427, 0.0231, -0.5414, -1.0009],
  9410. [-0.4664, 0.2647, -0.1228, -1.1068],
  9411. [-1.1734, -0.6571, 0.7230, -0.6004]])
  9412. >>> indices = torch.tensor([0, 2])
  9413. >>> torch.index_select(x, 0, indices)
  9414. tensor([[ 0.1427, 0.0231, -0.5414, -1.0009],
  9415. [-1.1734, -0.6571, 0.7230, -0.6004]])
  9416. >>> torch.index_select(x, 1, indices)
  9417. tensor([[ 0.1427, -0.5414],
  9418. [-0.4664, -0.1228],
  9419. [-1.1734, 0.7230]])
  9420. """
  9421. ...
  9422. def indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  9423. r"""
  9424. Performs the same operation as :func:`torch.indices`, but all output tensors
  9425. are freshly created instead of aliasing the input.
  9426. """
  9427. ...
  9428. def init_num_threads() -> None: ...
  9429. def inner(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  9430. r"""
  9431. inner(input, other, *, out=None) -> Tensor
  9432. Computes the dot product for 1D tensors. For higher dimensions, sums the product
  9433. of elements from :attr:`input` and :attr:`other` along their last dimension.
  9434. .. note::
  9435. If either :attr:`input` or :attr:`other` is a scalar, the result is equivalent
  9436. to `torch.mul(input, other)`.
  9437. If both :attr:`input` and :attr:`other` are non-scalars, the size of their last
  9438. dimension must match and the result is equivalent to `torch.tensordot(input,
  9439. other, dims=([-1], [-1]))`
  9440. Args:
  9441. input (Tensor): First input tensor
  9442. other (Tensor): Second input tensor
  9443. Keyword args:
  9444. out (Tensor, optional): Optional output tensor to write result into. The output
  9445. shape is `input.shape[:-1] + other.shape[:-1]`.
  9446. Example::
  9447. # Dot product
  9448. >>> torch.inner(torch.tensor([1, 2, 3]), torch.tensor([0, 2, 1]))
  9449. tensor(7)
  9450. # Multidimensional input tensors
  9451. >>> a = torch.randn(2, 3)
  9452. >>> a
  9453. tensor([[0.8173, 1.0874, 1.1784],
  9454. [0.3279, 0.1234, 2.7894]])
  9455. >>> b = torch.randn(2, 4, 3)
  9456. >>> b
  9457. tensor([[[-0.4682, -0.7159, 0.1506],
  9458. [ 0.4034, -0.3657, 1.0387],
  9459. [ 0.9892, -0.6684, 0.1774],
  9460. [ 0.9482, 1.3261, 0.3917]],
  9461. [[ 0.4537, 0.7493, 1.1724],
  9462. [ 0.2291, 0.5749, -0.2267],
  9463. [-0.7920, 0.3607, -0.3701],
  9464. [ 1.3666, -0.5850, -1.7242]]])
  9465. >>> torch.inner(a, b)
  9466. tensor([[[-0.9837, 1.1560, 0.2907, 2.6785],
  9467. [ 2.5671, 0.5452, -0.6912, -1.5509]],
  9468. [[ 0.1782, 2.9843, 0.7366, 1.5672],
  9469. [ 3.5115, -0.4864, -1.2476, -4.4337]]])
  9470. # Scalar input
  9471. >>> torch.inner(a, torch.tensor(2))
  9472. tensor([[1.6347, 2.1748, 2.3567],
  9473. [0.6558, 0.2469, 5.5787]])
  9474. """
  9475. ...
  9476. def instance_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], use_input_stats: _bool, momentum: _float, eps: _float, cudnn_enabled: _bool) -> Tensor: ...
  9477. def int_repr(input: Tensor) -> Tensor: ...
  9478. def inverse(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  9479. r"""
  9480. inverse(input, *, out=None) -> Tensor
  9481. Alias for :func:`torch.linalg.inv`
  9482. """
  9483. ...
  9484. def is_complex(input: Tensor) -> _bool:
  9485. r"""
  9486. is_complex(input) -> (bool)
  9487. Returns True if the data type of :attr:`input` is a complex data type i.e.,
  9488. one of ``torch.complex64``, and ``torch.complex128``.
  9489. Args:
  9490. input (Tensor): the input tensor.
  9491. """
  9492. ...
  9493. def is_conj(input: Tensor) -> _bool:
  9494. r"""
  9495. is_conj(input) -> (bool)
  9496. Returns True if the :attr:`input` is a conjugated tensor, i.e. its conjugate bit is set to `True`.
  9497. Args:
  9498. input (Tensor): the input tensor.
  9499. """
  9500. ...
  9501. def is_distributed(input: Tensor) -> _bool: ...
  9502. def is_floating_point(input: Tensor) -> _bool:
  9503. r"""
  9504. is_floating_point(input) -> (bool)
  9505. Returns True if the data type of :attr:`input` is a floating point data type i.e.,
  9506. one of ``torch.float64``, ``torch.float32``, ``torch.float16``, and ``torch.bfloat16``.
  9507. Args:
  9508. input (Tensor): the input tensor.
  9509. """
  9510. ...
  9511. def is_grad_enabled() -> _bool:
  9512. r"""
  9513. is_grad_enabled() -> (bool)
  9514. Returns True if grad mode is currently enabled.
  9515. """
  9516. ...
  9517. def is_inference(input: Tensor) -> _bool:
  9518. r"""
  9519. is_inference(input) -> (bool)
  9520. Returns True if :attr:`input` is an inference tensor.
  9521. A non-view tensor is an inference tensor if and only if it was
  9522. allocated during inference mode. A view tensor is an inference
  9523. tensor if and only if the tensor it is a view of is an inference tensor.
  9524. For details on inference mode please see
  9525. `Inference Mode <https://pytorch.org/cppdocs/notes/inference_mode.html>`_.
  9526. Args:
  9527. input (Tensor): the input tensor.
  9528. """
  9529. ...
  9530. def is_inference_mode_enabled() -> _bool:
  9531. r"""
  9532. is_inference_mode_enabled() -> (bool)
  9533. Returns True if inference mode is currently enabled.
  9534. """
  9535. ...
  9536. def is_neg(input: Tensor) -> _bool: ...
  9537. def is_nonzero(input: Tensor) -> _bool:
  9538. r"""
  9539. is_nonzero(input) -> (bool)
  9540. Returns True if the :attr:`input` is a single element tensor which is not equal to zero
  9541. after type conversions.
  9542. i.e. not equal to ``torch.tensor([0.])`` or ``torch.tensor([0])`` or
  9543. ``torch.tensor([False])``.
  9544. Throws a ``RuntimeError`` if ``torch.numel() != 1`` (even in case
  9545. of sparse tensors).
  9546. Args:
  9547. input (Tensor): the input tensor.
  9548. Examples::
  9549. >>> torch.is_nonzero(torch.tensor([0.]))
  9550. False
  9551. >>> torch.is_nonzero(torch.tensor([1.5]))
  9552. True
  9553. >>> torch.is_nonzero(torch.tensor([False]))
  9554. False
  9555. >>> torch.is_nonzero(torch.tensor([3]))
  9556. True
  9557. >>> torch.is_nonzero(torch.tensor([1, 3, 5]))
  9558. Traceback (most recent call last):
  9559. ...
  9560. RuntimeError: bool value of Tensor with more than one value is ambiguous
  9561. >>> torch.is_nonzero(torch.tensor([]))
  9562. Traceback (most recent call last):
  9563. ...
  9564. RuntimeError: bool value of Tensor with no values is ambiguous
  9565. """
  9566. ...
  9567. def is_same_size(input: Tensor, other: Tensor) -> _bool: ...
  9568. def is_signed(input: Tensor) -> _bool: ...
  9569. def is_vulkan_available() -> _bool: ...
  9570. def isclose(input: Tensor, other: Tensor, rtol: _float = 1e-05, atol: _float = 1e-08, equal_nan: _bool = False) -> Tensor:
  9571. r"""
  9572. isclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor
  9573. Returns a new tensor with boolean elements representing if each element of
  9574. :attr:`input` is "close" to the corresponding element of :attr:`other`.
  9575. Closeness is defined as:
  9576. .. math::
  9577. \lvert \text{input} - \text{other} \rvert \leq \texttt{atol} + \texttt{rtol} \times \lvert \text{other} \rvert
  9578. where :attr:`input` and :attr:`other` are finite. Where :attr:`input`
  9579. and/or :attr:`other` are nonfinite they are close if and only if
  9580. they are equal, with NaNs being considered equal to each other when
  9581. :attr:`equal_nan` is True.
  9582. Args:
  9583. input (Tensor): first tensor to compare
  9584. other (Tensor): second tensor to compare
  9585. atol (float, optional): absolute tolerance. Default: 1e-08
  9586. rtol (float, optional): relative tolerance. Default: 1e-05
  9587. equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False``
  9588. Examples::
  9589. >>> torch.isclose(torch.tensor((1., 2, 3)), torch.tensor((1 + 1e-10, 3, 4)))
  9590. tensor([ True, False, False])
  9591. >>> torch.isclose(torch.tensor((float('inf'), 4)), torch.tensor((float('inf'), 6)), rtol=.5)
  9592. tensor([True, True])
  9593. """
  9594. ...
  9595. def isfinite(input: Tensor) -> Tensor:
  9596. r"""
  9597. isfinite(input) -> Tensor
  9598. Returns a new tensor with boolean elements representing if each element is `finite` or not.
  9599. Real values are finite when they are not NaN, negative infinity, or infinity.
  9600. Complex values are finite when both their real and imaginary parts are finite.
  9601. Args:
  9602. input (Tensor): the input tensor.
  9603. Returns:
  9604. A boolean tensor that is True where :attr:`input` is finite and False elsewhere
  9605. Example::
  9606. >>> torch.isfinite(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')]))
  9607. tensor([True, False, True, False, False])
  9608. """
  9609. ...
  9610. @overload
  9611. def isin(elements: Tensor, test_elements: Tensor, *, assume_unique: _bool = False, invert: _bool = False, out: Optional[Tensor] = None) -> Tensor:
  9612. r"""
  9613. isin(elements, test_elements, *, assume_unique=False, invert=False) -> Tensor
  9614. Tests if each element of :attr:`elements` is in :attr:`test_elements`. Returns
  9615. a boolean tensor of the same shape as :attr:`elements` that is True for elements
  9616. in :attr:`test_elements` and False otherwise.
  9617. .. note::
  9618. One of :attr:`elements` or :attr:`test_elements` can be a scalar, but not both.
  9619. Args:
  9620. elements (Tensor or Scalar): Input elements
  9621. test_elements (Tensor or Scalar): Values against which to test for each input element
  9622. assume_unique (bool, optional): If True, assumes both :attr:`elements` and
  9623. :attr:`test_elements` contain unique elements, which can speed up the
  9624. calculation. Default: False
  9625. invert (bool, optional): If True, inverts the boolean return tensor, resulting in True
  9626. values for elements *not* in :attr:`test_elements`. Default: False
  9627. Returns:
  9628. A boolean tensor of the same shape as :attr:`elements` that is True for elements in
  9629. :attr:`test_elements` and False otherwise
  9630. Example:
  9631. >>> torch.isin(torch.tensor([[1, 2], [3, 4]]), torch.tensor([2, 3]))
  9632. tensor([[False, True],
  9633. [ True, False]])
  9634. """
  9635. ...
  9636. @overload
  9637. def isin(element: Union[Number, _complex], test_elements: Tensor, *, assume_unique: _bool = False, invert: _bool = False, out: Optional[Tensor] = None) -> Tensor:
  9638. r"""
  9639. isin(elements, test_elements, *, assume_unique=False, invert=False) -> Tensor
  9640. Tests if each element of :attr:`elements` is in :attr:`test_elements`. Returns
  9641. a boolean tensor of the same shape as :attr:`elements` that is True for elements
  9642. in :attr:`test_elements` and False otherwise.
  9643. .. note::
  9644. One of :attr:`elements` or :attr:`test_elements` can be a scalar, but not both.
  9645. Args:
  9646. elements (Tensor or Scalar): Input elements
  9647. test_elements (Tensor or Scalar): Values against which to test for each input element
  9648. assume_unique (bool, optional): If True, assumes both :attr:`elements` and
  9649. :attr:`test_elements` contain unique elements, which can speed up the
  9650. calculation. Default: False
  9651. invert (bool, optional): If True, inverts the boolean return tensor, resulting in True
  9652. values for elements *not* in :attr:`test_elements`. Default: False
  9653. Returns:
  9654. A boolean tensor of the same shape as :attr:`elements` that is True for elements in
  9655. :attr:`test_elements` and False otherwise
  9656. Example:
  9657. >>> torch.isin(torch.tensor([[1, 2], [3, 4]]), torch.tensor([2, 3]))
  9658. tensor([[False, True],
  9659. [ True, False]])
  9660. """
  9661. ...
  9662. @overload
  9663. def isin(elements: Tensor, test_element: Union[Number, _complex], *, assume_unique: _bool = False, invert: _bool = False, out: Optional[Tensor] = None) -> Tensor:
  9664. r"""
  9665. isin(elements, test_elements, *, assume_unique=False, invert=False) -> Tensor
  9666. Tests if each element of :attr:`elements` is in :attr:`test_elements`. Returns
  9667. a boolean tensor of the same shape as :attr:`elements` that is True for elements
  9668. in :attr:`test_elements` and False otherwise.
  9669. .. note::
  9670. One of :attr:`elements` or :attr:`test_elements` can be a scalar, but not both.
  9671. Args:
  9672. elements (Tensor or Scalar): Input elements
  9673. test_elements (Tensor or Scalar): Values against which to test for each input element
  9674. assume_unique (bool, optional): If True, assumes both :attr:`elements` and
  9675. :attr:`test_elements` contain unique elements, which can speed up the
  9676. calculation. Default: False
  9677. invert (bool, optional): If True, inverts the boolean return tensor, resulting in True
  9678. values for elements *not* in :attr:`test_elements`. Default: False
  9679. Returns:
  9680. A boolean tensor of the same shape as :attr:`elements` that is True for elements in
  9681. :attr:`test_elements` and False otherwise
  9682. Example:
  9683. >>> torch.isin(torch.tensor([[1, 2], [3, 4]]), torch.tensor([2, 3]))
  9684. tensor([[False, True],
  9685. [ True, False]])
  9686. """
  9687. ...
  9688. def isinf(input: Tensor) -> Tensor:
  9689. r"""
  9690. isinf(input) -> Tensor
  9691. Tests if each element of :attr:`input` is infinite
  9692. (positive or negative infinity) or not.
  9693. .. note::
  9694. Complex values are infinite when their real or imaginary part is
  9695. infinite.
  9696. Args:
  9697. input (Tensor): the input tensor.
  9698. Returns:
  9699. A boolean tensor that is True where :attr:`input` is infinite and False elsewhere
  9700. Example::
  9701. >>> torch.isinf(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')]))
  9702. tensor([False, True, False, True, False])
  9703. """
  9704. ...
  9705. def isnan(input: Tensor) -> Tensor:
  9706. r"""
  9707. isnan(input) -> Tensor
  9708. Returns a new tensor with boolean elements representing if each element of :attr:`input`
  9709. is NaN or not. Complex values are considered NaN when either their real
  9710. and/or imaginary part is NaN.
  9711. Arguments:
  9712. input (Tensor): the input tensor.
  9713. Returns:
  9714. A boolean tensor that is True where :attr:`input` is NaN and False elsewhere
  9715. Example::
  9716. >>> torch.isnan(torch.tensor([1, float('nan'), 2]))
  9717. tensor([False, True, False])
  9718. """
  9719. ...
  9720. def isneginf(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  9721. r"""
  9722. isneginf(input, *, out=None) -> Tensor
  9723. Tests if each element of :attr:`input` is negative infinity or not.
  9724. Args:
  9725. input (Tensor): the input tensor.
  9726. Keyword args:
  9727. out (Tensor, optional): the output tensor.
  9728. Example::
  9729. >>> a = torch.tensor([-float('inf'), float('inf'), 1.2])
  9730. >>> torch.isneginf(a)
  9731. tensor([ True, False, False])
  9732. """
  9733. ...
  9734. def isposinf(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  9735. r"""
  9736. isposinf(input, *, out=None) -> Tensor
  9737. Tests if each element of :attr:`input` is positive infinity or not.
  9738. Args:
  9739. input (Tensor): the input tensor.
  9740. Keyword args:
  9741. out (Tensor, optional): the output tensor.
  9742. Example::
  9743. >>> a = torch.tensor([-float('inf'), float('inf'), 1.2])
  9744. >>> torch.isposinf(a)
  9745. tensor([False, True, False])
  9746. """
  9747. ...
  9748. def isreal(input: Tensor) -> Tensor:
  9749. r"""
  9750. isreal(input) -> Tensor
  9751. Returns a new tensor with boolean elements representing if each element of :attr:`input` is real-valued or not.
  9752. All real-valued types are considered real. Complex values are considered real when their imaginary part is 0.
  9753. Arguments:
  9754. input (Tensor): the input tensor.
  9755. Returns:
  9756. A boolean tensor that is True where :attr:`input` is real and False elsewhere
  9757. Example::
  9758. >>> torch.isreal(torch.tensor([1, 1+1j, 2+0j]))
  9759. tensor([True, False, True])
  9760. """
  9761. ...
  9762. def istft(input: Tensor, n_fft: _int, hop_length: Optional[_int] = None, win_length: Optional[_int] = None, window: Optional[Tensor] = None, center: _bool = True, normalized: _bool = False, onesided: Optional[_bool] = None, length: Optional[_int] = None, return_complex: _bool = False) -> Tensor: ...
  9763. @overload
  9764. def kaiser_window(window_length: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  9765. r"""
  9766. kaiser_window(window_length, periodic=True, beta=12.0, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  9767. Computes the Kaiser window with window length :attr:`window_length` and shape parameter :attr:`beta`.
  9768. Let I_0 be the zeroth order modified Bessel function of the first kind (see :func:`torch.i0`) and
  9769. ``N = L - 1`` if :attr:`periodic` is False and ``L`` if :attr:`periodic` is True,
  9770. where ``L`` is the :attr:`window_length`. This function computes:
  9771. .. math::
  9772. out_i = I_0 \left( \beta \sqrt{1 - \left( {\frac{i - N/2}{N/2}} \right) ^2 } \right) / I_0( \beta )
  9773. Calling ``torch.kaiser_window(L, B, periodic=True)`` is equivalent to calling
  9774. ``torch.kaiser_window(L + 1, B, periodic=False)[:-1])``.
  9775. The :attr:`periodic` argument is intended as a helpful shorthand
  9776. to produce a periodic window as input to functions like :func:`torch.stft`.
  9777. .. note::
  9778. If :attr:`window_length` is one, then the returned window is a single element tensor containing a one.
  9779. Args:
  9780. window_length (int): length of the window.
  9781. periodic (bool, optional): If True, returns a periodic window suitable for use in spectral analysis.
  9782. If False, returns a symmetric window suitable for use in filter design.
  9783. beta (float, optional): shape parameter for the window.
  9784. Keyword args:
  9785. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  9786. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  9787. layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
  9788. ``torch.strided`` (dense layout) is supported.
  9789. device (:class:`torch.device`, optional): the desired device of returned tensor.
  9790. Default: if ``None``, uses the current device for the default tensor type
  9791. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  9792. for CPU tensor types and the current CUDA device for CUDA tensor types.
  9793. requires_grad (bool, optional): If autograd should record operations on the
  9794. returned tensor. Default: ``False``.
  9795. """
  9796. ...
  9797. @overload
  9798. def kaiser_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  9799. r"""
  9800. kaiser_window(window_length, periodic=True, beta=12.0, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  9801. Computes the Kaiser window with window length :attr:`window_length` and shape parameter :attr:`beta`.
  9802. Let I_0 be the zeroth order modified Bessel function of the first kind (see :func:`torch.i0`) and
  9803. ``N = L - 1`` if :attr:`periodic` is False and ``L`` if :attr:`periodic` is True,
  9804. where ``L`` is the :attr:`window_length`. This function computes:
  9805. .. math::
  9806. out_i = I_0 \left( \beta \sqrt{1 - \left( {\frac{i - N/2}{N/2}} \right) ^2 } \right) / I_0( \beta )
  9807. Calling ``torch.kaiser_window(L, B, periodic=True)`` is equivalent to calling
  9808. ``torch.kaiser_window(L + 1, B, periodic=False)[:-1])``.
  9809. The :attr:`periodic` argument is intended as a helpful shorthand
  9810. to produce a periodic window as input to functions like :func:`torch.stft`.
  9811. .. note::
  9812. If :attr:`window_length` is one, then the returned window is a single element tensor containing a one.
  9813. Args:
  9814. window_length (int): length of the window.
  9815. periodic (bool, optional): If True, returns a periodic window suitable for use in spectral analysis.
  9816. If False, returns a symmetric window suitable for use in filter design.
  9817. beta (float, optional): shape parameter for the window.
  9818. Keyword args:
  9819. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  9820. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  9821. layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
  9822. ``torch.strided`` (dense layout) is supported.
  9823. device (:class:`torch.device`, optional): the desired device of returned tensor.
  9824. Default: if ``None``, uses the current device for the default tensor type
  9825. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  9826. for CPU tensor types and the current CUDA device for CUDA tensor types.
  9827. requires_grad (bool, optional): If autograd should record operations on the
  9828. returned tensor. Default: ``False``.
  9829. """
  9830. ...
  9831. @overload
  9832. def kaiser_window(window_length: _int, periodic: _bool, beta: _float, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  9833. r"""
  9834. kaiser_window(window_length, periodic=True, beta=12.0, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  9835. Computes the Kaiser window with window length :attr:`window_length` and shape parameter :attr:`beta`.
  9836. Let I_0 be the zeroth order modified Bessel function of the first kind (see :func:`torch.i0`) and
  9837. ``N = L - 1`` if :attr:`periodic` is False and ``L`` if :attr:`periodic` is True,
  9838. where ``L`` is the :attr:`window_length`. This function computes:
  9839. .. math::
  9840. out_i = I_0 \left( \beta \sqrt{1 - \left( {\frac{i - N/2}{N/2}} \right) ^2 } \right) / I_0( \beta )
  9841. Calling ``torch.kaiser_window(L, B, periodic=True)`` is equivalent to calling
  9842. ``torch.kaiser_window(L + 1, B, periodic=False)[:-1])``.
  9843. The :attr:`periodic` argument is intended as a helpful shorthand
  9844. to produce a periodic window as input to functions like :func:`torch.stft`.
  9845. .. note::
  9846. If :attr:`window_length` is one, then the returned window is a single element tensor containing a one.
  9847. Args:
  9848. window_length (int): length of the window.
  9849. periodic (bool, optional): If True, returns a periodic window suitable for use in spectral analysis.
  9850. If False, returns a symmetric window suitable for use in filter design.
  9851. beta (float, optional): shape parameter for the window.
  9852. Keyword args:
  9853. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  9854. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  9855. layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
  9856. ``torch.strided`` (dense layout) is supported.
  9857. device (:class:`torch.device`, optional): the desired device of returned tensor.
  9858. Default: if ``None``, uses the current device for the default tensor type
  9859. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  9860. for CPU tensor types and the current CUDA device for CUDA tensor types.
  9861. requires_grad (bool, optional): If autograd should record operations on the
  9862. returned tensor. Default: ``False``.
  9863. """
  9864. ...
  9865. def kl_div(input: Tensor, target: Tensor, reduction: _int = 1, *, log_target: _bool = False) -> Tensor: ...
  9866. def kron(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  9867. r"""
  9868. kron(input, other, *, out=None) -> Tensor
  9869. Computes the Kronecker product, denoted by :math:`\otimes`, of :attr:`input` and :attr:`other`.
  9870. If :attr:`input` is a :math:`(a_0 \times a_1 \times \dots \times a_n)` tensor and :attr:`other` is a
  9871. :math:`(b_0 \times b_1 \times \dots \times b_n)` tensor, the result will be a
  9872. :math:`(a_0*b_0 \times a_1*b_1 \times \dots \times a_n*b_n)` tensor with the following entries:
  9873. .. math::
  9874. (\text{input} \otimes \text{other})_{k_0, k_1, \dots, k_n} =
  9875. \text{input}_{i_0, i_1, \dots, i_n} * \text{other}_{j_0, j_1, \dots, j_n},
  9876. where :math:`k_t = i_t * b_t + j_t` for :math:`0 \leq t \leq n`.
  9877. If one tensor has fewer dimensions than the other it is unsqueezed until it has the same number of dimensions.
  9878. Supports real-valued and complex-valued inputs.
  9879. .. note::
  9880. This function generalizes the typical definition of the Kronecker product for two matrices to two tensors,
  9881. as described above. When :attr:`input` is a :math:`(m \times n)` matrix and :attr:`other` is a
  9882. :math:`(p \times q)` matrix, the result will be a :math:`(p*m \times q*n)` block matrix:
  9883. .. math::
  9884. \mathbf{A} \otimes \mathbf{B}=\begin{bmatrix}
  9885. a_{11} \mathbf{B} & \cdots & a_{1 n} \mathbf{B} \\
  9886. \vdots & \ddots & \vdots \\
  9887. a_{m 1} \mathbf{B} & \cdots & a_{m n} \mathbf{B} \end{bmatrix}
  9888. where :attr:`input` is :math:`\mathbf{A}` and :attr:`other` is :math:`\mathbf{B}`.
  9889. Arguments:
  9890. input (Tensor)
  9891. other (Tensor)
  9892. Keyword args:
  9893. out (Tensor, optional): The output tensor. Ignored if ``None``. Default: ``None``
  9894. Examples::
  9895. >>> mat1 = torch.eye(2)
  9896. >>> mat2 = torch.ones(2, 2)
  9897. >>> torch.kron(mat1, mat2)
  9898. tensor([[1., 1., 0., 0.],
  9899. [1., 1., 0., 0.],
  9900. [0., 0., 1., 1.],
  9901. [0., 0., 1., 1.]])
  9902. >>> mat1 = torch.eye(2)
  9903. >>> mat2 = torch.arange(1, 5).reshape(2, 2)
  9904. >>> torch.kron(mat1, mat2)
  9905. tensor([[1., 2., 0., 0.],
  9906. [3., 4., 0., 0.],
  9907. [0., 0., 1., 2.],
  9908. [0., 0., 3., 4.]])
  9909. """
  9910. ...
  9911. @overload
  9912. def kthvalue(input: Tensor, k: _int, dim: _int = -1, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.kthvalue:
  9913. r"""
  9914. kthvalue(input, k, dim=None, keepdim=False, *, out=None) -> (Tensor, LongTensor)
  9915. Returns a namedtuple ``(values, indices)`` where ``values`` is the :attr:`k` th
  9916. smallest element of each row of the :attr:`input` tensor in the given dimension
  9917. :attr:`dim`. And ``indices`` is the index location of each element found.
  9918. If :attr:`dim` is not given, the last dimension of the `input` is chosen.
  9919. If :attr:`keepdim` is ``True``, both the :attr:`values` and :attr:`indices` tensors
  9920. are the same size as :attr:`input`, except in the dimension :attr:`dim` where
  9921. they are of size 1. Otherwise, :attr:`dim` is squeezed
  9922. (see :func:`torch.squeeze`), resulting in both the :attr:`values` and
  9923. :attr:`indices` tensors having 1 fewer dimension than the :attr:`input` tensor.
  9924. .. note::
  9925. When :attr:`input` is a CUDA tensor and there are multiple valid
  9926. :attr:`k` th values, this function may nondeterministically return
  9927. :attr:`indices` for any of them.
  9928. Args:
  9929. input (Tensor): the input tensor.
  9930. k (int): k for the k-th smallest element
  9931. dim (int, optional): the dimension to find the kth value along
  9932. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  9933. Keyword args:
  9934. out (tuple, optional): the output tuple of (Tensor, LongTensor)
  9935. can be optionally given to be used as output buffers
  9936. Example::
  9937. >>> x = torch.arange(1., 6.)
  9938. >>> x
  9939. tensor([ 1., 2., 3., 4., 5.])
  9940. >>> torch.kthvalue(x, 4)
  9941. torch.return_types.kthvalue(values=tensor(4.), indices=tensor(3))
  9942. >>> x=torch.arange(1.,7.).resize_(2,3)
  9943. >>> x
  9944. tensor([[ 1., 2., 3.],
  9945. [ 4., 5., 6.]])
  9946. >>> torch.kthvalue(x, 2, 0, True)
  9947. torch.return_types.kthvalue(values=tensor([[4., 5., 6.]]), indices=tensor([[1, 1, 1]]))
  9948. """
  9949. ...
  9950. @overload
  9951. def kthvalue(input: Tensor, k: _int, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.kthvalue:
  9952. r"""
  9953. kthvalue(input, k, dim=None, keepdim=False, *, out=None) -> (Tensor, LongTensor)
  9954. Returns a namedtuple ``(values, indices)`` where ``values`` is the :attr:`k` th
  9955. smallest element of each row of the :attr:`input` tensor in the given dimension
  9956. :attr:`dim`. And ``indices`` is the index location of each element found.
  9957. If :attr:`dim` is not given, the last dimension of the `input` is chosen.
  9958. If :attr:`keepdim` is ``True``, both the :attr:`values` and :attr:`indices` tensors
  9959. are the same size as :attr:`input`, except in the dimension :attr:`dim` where
  9960. they are of size 1. Otherwise, :attr:`dim` is squeezed
  9961. (see :func:`torch.squeeze`), resulting in both the :attr:`values` and
  9962. :attr:`indices` tensors having 1 fewer dimension than the :attr:`input` tensor.
  9963. .. note::
  9964. When :attr:`input` is a CUDA tensor and there are multiple valid
  9965. :attr:`k` th values, this function may nondeterministically return
  9966. :attr:`indices` for any of them.
  9967. Args:
  9968. input (Tensor): the input tensor.
  9969. k (int): k for the k-th smallest element
  9970. dim (int, optional): the dimension to find the kth value along
  9971. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  9972. Keyword args:
  9973. out (tuple, optional): the output tuple of (Tensor, LongTensor)
  9974. can be optionally given to be used as output buffers
  9975. Example::
  9976. >>> x = torch.arange(1., 6.)
  9977. >>> x
  9978. tensor([ 1., 2., 3., 4., 5.])
  9979. >>> torch.kthvalue(x, 4)
  9980. torch.return_types.kthvalue(values=tensor(4.), indices=tensor(3))
  9981. >>> x=torch.arange(1.,7.).resize_(2,3)
  9982. >>> x
  9983. tensor([[ 1., 2., 3.],
  9984. [ 4., 5., 6.]])
  9985. >>> torch.kthvalue(x, 2, 0, True)
  9986. torch.return_types.kthvalue(values=tensor([[4., 5., 6.]]), indices=tensor([[1, 1, 1]]))
  9987. """
  9988. ...
  9989. def layer_norm(input: Tensor, normalized_shape: Sequence[Union[_int, SymInt]], weight: Optional[Tensor] = None, bias: Optional[Tensor] = None, eps: _float = 1e-05, cudnn_enable: _bool = True) -> Tensor: ...
  9990. def lcm(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  9991. r"""
  9992. lcm(input, other, *, out=None) -> Tensor
  9993. Computes the element-wise least common multiple (LCM) of :attr:`input` and :attr:`other`.
  9994. Both :attr:`input` and :attr:`other` must have integer types.
  9995. .. note::
  9996. This defines :math:`lcm(0, 0) = 0` and :math:`lcm(0, a) = 0`.
  9997. Args:
  9998. input (Tensor): the input tensor.
  9999. other (Tensor): the second input tensor
  10000. Keyword arguments:
  10001. out (Tensor, optional): the output tensor.
  10002. Example::
  10003. >>> a = torch.tensor([5, 10, 15])
  10004. >>> b = torch.tensor([3, 4, 5])
  10005. >>> torch.lcm(a, b)
  10006. tensor([15, 20, 15])
  10007. >>> c = torch.tensor([3])
  10008. >>> torch.lcm(a, c)
  10009. tensor([15, 30, 15])
  10010. """
  10011. ...
  10012. def lcm_(input: Tensor, other: Tensor) -> Tensor: ...
  10013. def ldexp(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  10014. r"""
  10015. ldexp(input, other, *, out=None) -> Tensor
  10016. Multiplies :attr:`input` by 2 ** :attr:`other`.
  10017. .. math::
  10018. \text{{out}}_i = \text{{input}}_i * 2^\text{{other}}_i
  10019. Typically this function is used to construct floating point numbers by multiplying
  10020. mantissas in :attr:`input` with integral powers of two created from the exponents
  10021. in :attr:`other`.
  10022. Args:
  10023. input (Tensor): the input tensor.
  10024. other (Tensor): a tensor of exponents, typically integers.
  10025. Keyword args:
  10026. out (Tensor, optional): the output tensor.
  10027. Example::
  10028. >>> torch.ldexp(torch.tensor([1.]), torch.tensor([1]))
  10029. tensor([2.])
  10030. >>> torch.ldexp(torch.tensor([1.0]), torch.tensor([1, 2, 3, 4]))
  10031. tensor([ 2., 4., 8., 16.])
  10032. """
  10033. ...
  10034. def ldexp_(input: Tensor, other: Tensor) -> Tensor: ...
  10035. @overload
  10036. def le(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  10037. r"""
  10038. le(input, other, *, out=None) -> Tensor
  10039. Computes :math:`\text{input} \leq \text{other}` element-wise.
  10040. The second argument can be a number or a tensor whose shape is
  10041. :ref:`broadcastable <broadcasting-semantics>` with the first argument.
  10042. Args:
  10043. input (Tensor): the tensor to compare
  10044. other (Tensor or Scalar): the tensor or value to compare
  10045. Keyword args:
  10046. out (Tensor, optional): the output tensor.
  10047. Returns:
  10048. A boolean tensor that is True where :attr:`input` is less than or equal to
  10049. :attr:`other` and False elsewhere
  10050. Example::
  10051. >>> torch.le(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
  10052. tensor([[True, False], [True, True]])
  10053. """
  10054. ...
  10055. @overload
  10056. def le(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
  10057. r"""
  10058. le(input, other, *, out=None) -> Tensor
  10059. Computes :math:`\text{input} \leq \text{other}` element-wise.
  10060. The second argument can be a number or a tensor whose shape is
  10061. :ref:`broadcastable <broadcasting-semantics>` with the first argument.
  10062. Args:
  10063. input (Tensor): the tensor to compare
  10064. other (Tensor or Scalar): the tensor or value to compare
  10065. Keyword args:
  10066. out (Tensor, optional): the output tensor.
  10067. Returns:
  10068. A boolean tensor that is True where :attr:`input` is less than or equal to
  10069. :attr:`other` and False elsewhere
  10070. Example::
  10071. >>> torch.le(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
  10072. tensor([[True, False], [True, True]])
  10073. """
  10074. ...
  10075. @overload
  10076. def lerp(input: Tensor, end: Tensor, weight: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  10077. r"""
  10078. lerp(input, end, weight, *, out=None)
  10079. Does a linear interpolation of two tensors :attr:`start` (given by :attr:`input`) and :attr:`end` based
  10080. on a scalar or tensor :attr:`weight` and returns the resulting :attr:`out` tensor.
  10081. .. math::
  10082. \text{out}_i = \text{start}_i + \text{weight}_i \times (\text{end}_i - \text{start}_i)
  10083. The shapes of :attr:`start` and :attr:`end` must be
  10084. :ref:`broadcastable <broadcasting-semantics>`. If :attr:`weight` is a tensor, then
  10085. the shapes of :attr:`weight`, :attr:`start`, and :attr:`end` must be :ref:`broadcastable <broadcasting-semantics>`.
  10086. Args:
  10087. input (Tensor): the tensor with the starting points
  10088. end (Tensor): the tensor with the ending points
  10089. weight (float or tensor): the weight for the interpolation formula
  10090. Keyword args:
  10091. out (Tensor, optional): the output tensor.
  10092. Example::
  10093. >>> start = torch.arange(1., 5.)
  10094. >>> end = torch.empty(4).fill_(10)
  10095. >>> start
  10096. tensor([ 1., 2., 3., 4.])
  10097. >>> end
  10098. tensor([ 10., 10., 10., 10.])
  10099. >>> torch.lerp(start, end, 0.5)
  10100. tensor([ 5.5000, 6.0000, 6.5000, 7.0000])
  10101. >>> torch.lerp(start, end, torch.full_like(start, 0.5))
  10102. tensor([ 5.5000, 6.0000, 6.5000, 7.0000])
  10103. """
  10104. ...
  10105. @overload
  10106. def lerp(input: Tensor, end: Tensor, weight: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
  10107. r"""
  10108. lerp(input, end, weight, *, out=None)
  10109. Does a linear interpolation of two tensors :attr:`start` (given by :attr:`input`) and :attr:`end` based
  10110. on a scalar or tensor :attr:`weight` and returns the resulting :attr:`out` tensor.
  10111. .. math::
  10112. \text{out}_i = \text{start}_i + \text{weight}_i \times (\text{end}_i - \text{start}_i)
  10113. The shapes of :attr:`start` and :attr:`end` must be
  10114. :ref:`broadcastable <broadcasting-semantics>`. If :attr:`weight` is a tensor, then
  10115. the shapes of :attr:`weight`, :attr:`start`, and :attr:`end` must be :ref:`broadcastable <broadcasting-semantics>`.
  10116. Args:
  10117. input (Tensor): the tensor with the starting points
  10118. end (Tensor): the tensor with the ending points
  10119. weight (float or tensor): the weight for the interpolation formula
  10120. Keyword args:
  10121. out (Tensor, optional): the output tensor.
  10122. Example::
  10123. >>> start = torch.arange(1., 5.)
  10124. >>> end = torch.empty(4).fill_(10)
  10125. >>> start
  10126. tensor([ 1., 2., 3., 4.])
  10127. >>> end
  10128. tensor([ 10., 10., 10., 10.])
  10129. >>> torch.lerp(start, end, 0.5)
  10130. tensor([ 5.5000, 6.0000, 6.5000, 7.0000])
  10131. >>> torch.lerp(start, end, torch.full_like(start, 0.5))
  10132. tensor([ 5.5000, 6.0000, 6.5000, 7.0000])
  10133. """
  10134. ...
  10135. @overload
  10136. def less(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  10137. r"""
  10138. less(input, other, *, out=None) -> Tensor
  10139. Alias for :func:`torch.lt`.
  10140. """
  10141. ...
  10142. @overload
  10143. def less(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
  10144. r"""
  10145. less(input, other, *, out=None) -> Tensor
  10146. Alias for :func:`torch.lt`.
  10147. """
  10148. ...
  10149. @overload
  10150. def less_equal(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  10151. r"""
  10152. less_equal(input, other, *, out=None) -> Tensor
  10153. Alias for :func:`torch.le`.
  10154. """
  10155. ...
  10156. @overload
  10157. def less_equal(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
  10158. r"""
  10159. less_equal(input, other, *, out=None) -> Tensor
  10160. Alias for :func:`torch.le`.
  10161. """
  10162. ...
  10163. def lgamma(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  10164. r"""
  10165. lgamma(input, *, out=None) -> Tensor
  10166. Computes the natural logarithm of the absolute value of the gamma function on :attr:`input`.
  10167. .. math::
  10168. \text{out}_{i} = \ln |\Gamma(\text{input}_{i})|
  10169. Args:
  10170. input (Tensor): the input tensor.
  10171. Keyword args:
  10172. out (Tensor, optional): the output tensor.
  10173. Example::
  10174. >>> a = torch.arange(0.5, 2, 0.5)
  10175. >>> torch.lgamma(a)
  10176. tensor([ 0.5724, 0.0000, -0.1208])
  10177. """
  10178. ...
  10179. @overload
  10180. def linspace(start: Number, end: Number, steps: Optional[_int] = None, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
  10181. r"""
  10182. linspace(start, end, steps, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  10183. Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
  10184. spaced from :attr:`start` to :attr:`end`, inclusive. That is, the value are:
  10185. .. math::
  10186. (\text{start},
  10187. \text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1},
  10188. \ldots,
  10189. \text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1},
  10190. \text{end})
  10191. From PyTorch 1.11 linspace requires the steps argument. Use steps=100 to restore the previous behavior.
  10192. Args:
  10193. start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
  10194. end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
  10195. steps (int): size of the constructed tensor
  10196. Keyword arguments:
  10197. out (Tensor, optional): the output tensor.
  10198. dtype (torch.dtype, optional): the data type to perform the computation in.
  10199. Default: if None, uses the global default dtype (see torch.get_default_dtype())
  10200. when both :attr:`start` and :attr:`end` are real,
  10201. and corresponding complex dtype when either is complex.
  10202. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  10203. Default: ``torch.strided``.
  10204. device (:class:`torch.device`, optional): the desired device of returned tensor.
  10205. Default: if ``None``, uses the current device for the default tensor type
  10206. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  10207. for CPU tensor types and the current CUDA device for CUDA tensor types.
  10208. requires_grad (bool, optional): If autograd should record operations on the
  10209. returned tensor. Default: ``False``.
  10210. Example::
  10211. >>> torch.linspace(3, 10, steps=5)
  10212. tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000])
  10213. >>> torch.linspace(-10, 10, steps=5)
  10214. tensor([-10., -5., 0., 5., 10.])
  10215. >>> torch.linspace(start=-10, end=10, steps=5)
  10216. tensor([-10., -5., 0., 5., 10.])
  10217. >>> torch.linspace(start=-10, end=10, steps=1)
  10218. tensor([-10.])
  10219. """
  10220. ...
  10221. @overload
  10222. def linspace(start: Tensor, end: Tensor, steps: _int, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  10223. r"""
  10224. linspace(start, end, steps, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  10225. Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
  10226. spaced from :attr:`start` to :attr:`end`, inclusive. That is, the value are:
  10227. .. math::
  10228. (\text{start},
  10229. \text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1},
  10230. \ldots,
  10231. \text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1},
  10232. \text{end})
  10233. From PyTorch 1.11 linspace requires the steps argument. Use steps=100 to restore the previous behavior.
  10234. Args:
  10235. start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
  10236. end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
  10237. steps (int): size of the constructed tensor
  10238. Keyword arguments:
  10239. out (Tensor, optional): the output tensor.
  10240. dtype (torch.dtype, optional): the data type to perform the computation in.
  10241. Default: if None, uses the global default dtype (see torch.get_default_dtype())
  10242. when both :attr:`start` and :attr:`end` are real,
  10243. and corresponding complex dtype when either is complex.
  10244. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  10245. Default: ``torch.strided``.
  10246. device (:class:`torch.device`, optional): the desired device of returned tensor.
  10247. Default: if ``None``, uses the current device for the default tensor type
  10248. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  10249. for CPU tensor types and the current CUDA device for CUDA tensor types.
  10250. requires_grad (bool, optional): If autograd should record operations on the
  10251. returned tensor. Default: ``False``.
  10252. Example::
  10253. >>> torch.linspace(3, 10, steps=5)
  10254. tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000])
  10255. >>> torch.linspace(-10, 10, steps=5)
  10256. tensor([-10., -5., 0., 5., 10.])
  10257. >>> torch.linspace(start=-10, end=10, steps=5)
  10258. tensor([-10., -5., 0., 5., 10.])
  10259. >>> torch.linspace(start=-10, end=10, steps=1)
  10260. tensor([-10.])
  10261. """
  10262. ...
  10263. @overload
  10264. def linspace(start: Union[Number, _complex], end: Tensor, steps: _int, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  10265. r"""
  10266. linspace(start, end, steps, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  10267. Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
  10268. spaced from :attr:`start` to :attr:`end`, inclusive. That is, the value are:
  10269. .. math::
  10270. (\text{start},
  10271. \text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1},
  10272. \ldots,
  10273. \text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1},
  10274. \text{end})
  10275. From PyTorch 1.11 linspace requires the steps argument. Use steps=100 to restore the previous behavior.
  10276. Args:
  10277. start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
  10278. end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
  10279. steps (int): size of the constructed tensor
  10280. Keyword arguments:
  10281. out (Tensor, optional): the output tensor.
  10282. dtype (torch.dtype, optional): the data type to perform the computation in.
  10283. Default: if None, uses the global default dtype (see torch.get_default_dtype())
  10284. when both :attr:`start` and :attr:`end` are real,
  10285. and corresponding complex dtype when either is complex.
  10286. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  10287. Default: ``torch.strided``.
  10288. device (:class:`torch.device`, optional): the desired device of returned tensor.
  10289. Default: if ``None``, uses the current device for the default tensor type
  10290. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  10291. for CPU tensor types and the current CUDA device for CUDA tensor types.
  10292. requires_grad (bool, optional): If autograd should record operations on the
  10293. returned tensor. Default: ``False``.
  10294. Example::
  10295. >>> torch.linspace(3, 10, steps=5)
  10296. tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000])
  10297. >>> torch.linspace(-10, 10, steps=5)
  10298. tensor([-10., -5., 0., 5., 10.])
  10299. >>> torch.linspace(start=-10, end=10, steps=5)
  10300. tensor([-10., -5., 0., 5., 10.])
  10301. >>> torch.linspace(start=-10, end=10, steps=1)
  10302. tensor([-10.])
  10303. """
  10304. ...
  10305. @overload
  10306. def linspace(start: Tensor, end: Union[Number, _complex], steps: _int, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  10307. r"""
  10308. linspace(start, end, steps, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  10309. Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
  10310. spaced from :attr:`start` to :attr:`end`, inclusive. That is, the value are:
  10311. .. math::
  10312. (\text{start},
  10313. \text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1},
  10314. \ldots,
  10315. \text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1},
  10316. \text{end})
  10317. From PyTorch 1.11 linspace requires the steps argument. Use steps=100 to restore the previous behavior.
  10318. Args:
  10319. start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
  10320. end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
  10321. steps (int): size of the constructed tensor
  10322. Keyword arguments:
  10323. out (Tensor, optional): the output tensor.
  10324. dtype (torch.dtype, optional): the data type to perform the computation in.
  10325. Default: if None, uses the global default dtype (see torch.get_default_dtype())
  10326. when both :attr:`start` and :attr:`end` are real,
  10327. and corresponding complex dtype when either is complex.
  10328. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  10329. Default: ``torch.strided``.
  10330. device (:class:`torch.device`, optional): the desired device of returned tensor.
  10331. Default: if ``None``, uses the current device for the default tensor type
  10332. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  10333. for CPU tensor types and the current CUDA device for CUDA tensor types.
  10334. requires_grad (bool, optional): If autograd should record operations on the
  10335. returned tensor. Default: ``False``.
  10336. Example::
  10337. >>> torch.linspace(3, 10, steps=5)
  10338. tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000])
  10339. >>> torch.linspace(-10, 10, steps=5)
  10340. tensor([-10., -5., 0., 5., 10.])
  10341. >>> torch.linspace(start=-10, end=10, steps=5)
  10342. tensor([-10., -5., 0., 5., 10.])
  10343. >>> torch.linspace(start=-10, end=10, steps=1)
  10344. tensor([-10.])
  10345. """
  10346. ...
  10347. @overload
  10348. def linspace(start: Union[Number, _complex], end: Union[Number, _complex], steps: _int, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  10349. r"""
  10350. linspace(start, end, steps, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  10351. Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
  10352. spaced from :attr:`start` to :attr:`end`, inclusive. That is, the value are:
  10353. .. math::
  10354. (\text{start},
  10355. \text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1},
  10356. \ldots,
  10357. \text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1},
  10358. \text{end})
  10359. From PyTorch 1.11 linspace requires the steps argument. Use steps=100 to restore the previous behavior.
  10360. Args:
  10361. start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
  10362. end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
  10363. steps (int): size of the constructed tensor
  10364. Keyword arguments:
  10365. out (Tensor, optional): the output tensor.
  10366. dtype (torch.dtype, optional): the data type to perform the computation in.
  10367. Default: if None, uses the global default dtype (see torch.get_default_dtype())
  10368. when both :attr:`start` and :attr:`end` are real,
  10369. and corresponding complex dtype when either is complex.
  10370. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  10371. Default: ``torch.strided``.
  10372. device (:class:`torch.device`, optional): the desired device of returned tensor.
  10373. Default: if ``None``, uses the current device for the default tensor type
  10374. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  10375. for CPU tensor types and the current CUDA device for CUDA tensor types.
  10376. requires_grad (bool, optional): If autograd should record operations on the
  10377. returned tensor. Default: ``False``.
  10378. Example::
  10379. >>> torch.linspace(3, 10, steps=5)
  10380. tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000])
  10381. >>> torch.linspace(-10, 10, steps=5)
  10382. tensor([-10., -5., 0., 5., 10.])
  10383. >>> torch.linspace(start=-10, end=10, steps=5)
  10384. tensor([-10., -5., 0., 5., 10.])
  10385. >>> torch.linspace(start=-10, end=10, steps=1)
  10386. tensor([-10.])
  10387. """
  10388. ...
  10389. def log(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  10390. r"""
  10391. log(input, *, out=None) -> Tensor
  10392. Returns a new tensor with the natural logarithm of the elements
  10393. of :attr:`input`.
  10394. .. math::
  10395. y_{i} = \log_{e} (x_{i})
  10396. Args:
  10397. input (Tensor): the input tensor.
  10398. Keyword args:
  10399. out (Tensor, optional): the output tensor.
  10400. Example::
  10401. >>> a = torch.rand(5) * 5
  10402. >>> a
  10403. tensor([4.7767, 4.3234, 1.2156, 0.2411, 4.5739])
  10404. >>> torch.log(a)
  10405. tensor([ 1.5637, 1.4640, 0.1952, -1.4226, 1.5204])
  10406. """
  10407. ...
  10408. def log10(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  10409. r"""
  10410. log10(input, *, out=None) -> Tensor
  10411. Returns a new tensor with the logarithm to the base 10 of the elements
  10412. of :attr:`input`.
  10413. .. math::
  10414. y_{i} = \log_{10} (x_{i})
  10415. Args:
  10416. input (Tensor): the input tensor.
  10417. Keyword args:
  10418. out (Tensor, optional): the output tensor.
  10419. Example::
  10420. >>> a = torch.rand(5)
  10421. >>> a
  10422. tensor([ 0.5224, 0.9354, 0.7257, 0.1301, 0.2251])
  10423. >>> torch.log10(a)
  10424. tensor([-0.2820, -0.0290, -0.1392, -0.8857, -0.6476])
  10425. """
  10426. ...
  10427. def log10_(input: Tensor) -> Tensor: ...
  10428. def log1p(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  10429. r"""
  10430. log1p(input, *, out=None) -> Tensor
  10431. Returns a new tensor with the natural logarithm of (1 + :attr:`input`).
  10432. .. math::
  10433. y_i = \log_{e} (x_i + 1)
  10434. .. note:: This function is more accurate than :func:`torch.log` for small
  10435. values of :attr:`input`
  10436. Args:
  10437. input (Tensor): the input tensor.
  10438. Keyword args:
  10439. out (Tensor, optional): the output tensor.
  10440. Example::
  10441. >>> a = torch.randn(5)
  10442. >>> a
  10443. tensor([-1.0090, -0.9923, 1.0249, -0.5372, 0.2492])
  10444. >>> torch.log1p(a)
  10445. tensor([ nan, -4.8653, 0.7055, -0.7705, 0.2225])
  10446. """
  10447. ...
  10448. def log1p_(input: Tensor) -> Tensor: ...
  10449. def log2(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  10450. r"""
  10451. log2(input, *, out=None) -> Tensor
  10452. Returns a new tensor with the logarithm to the base 2 of the elements
  10453. of :attr:`input`.
  10454. .. math::
  10455. y_{i} = \log_{2} (x_{i})
  10456. Args:
  10457. input (Tensor): the input tensor.
  10458. Keyword args:
  10459. out (Tensor, optional): the output tensor.
  10460. Example::
  10461. >>> a = torch.rand(5)
  10462. >>> a
  10463. tensor([ 0.8419, 0.8003, 0.9971, 0.5287, 0.0490])
  10464. >>> torch.log2(a)
  10465. tensor([-0.2483, -0.3213, -0.0042, -0.9196, -4.3504])
  10466. """
  10467. ...
  10468. def log2_(input: Tensor) -> Tensor: ...
  10469. def log_(input: Tensor) -> Tensor: ...
  10470. @overload
  10471. def log_softmax(input: Tensor, dim: _int, dtype: Optional[_dtype] = None, *, out: Optional[Tensor] = None) -> Tensor: ...
  10472. @overload
  10473. def log_softmax(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor: ...
  10474. def logaddexp(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  10475. r"""
  10476. logaddexp(input, other, *, out=None) -> Tensor
  10477. Logarithm of the sum of exponentiations of the inputs.
  10478. Calculates pointwise :math:`\log\left(e^x + e^y\right)`. This function is useful
  10479. in statistics where the calculated probabilities of events may be so small as to
  10480. exceed the range of normal floating point numbers. In such cases the logarithm
  10481. of the calculated probability is stored. This function allows adding
  10482. probabilities stored in such a fashion.
  10483. This op should be disambiguated with :func:`torch.logsumexp` which performs a
  10484. reduction on a single tensor.
  10485. Args:
  10486. input (Tensor): the input tensor.
  10487. other (Tensor): the second input tensor
  10488. Keyword arguments:
  10489. out (Tensor, optional): the output tensor.
  10490. Example::
  10491. >>> torch.logaddexp(torch.tensor([-1.0]), torch.tensor([-1.0, -2, -3]))
  10492. tensor([-0.3069, -0.6867, -0.8731])
  10493. >>> torch.logaddexp(torch.tensor([-100.0, -200, -300]), torch.tensor([-1.0, -2, -3]))
  10494. tensor([-1., -2., -3.])
  10495. >>> torch.logaddexp(torch.tensor([1.0, 2000, 30000]), torch.tensor([-1.0, -2, -3]))
  10496. tensor([1.1269e+00, 2.0000e+03, 3.0000e+04])
  10497. """
  10498. ...
  10499. def logaddexp2(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  10500. r"""
  10501. logaddexp2(input, other, *, out=None) -> Tensor
  10502. Logarithm of the sum of exponentiations of the inputs in base-2.
  10503. Calculates pointwise :math:`\log_2\left(2^x + 2^y\right)`. See
  10504. :func:`torch.logaddexp` for more details.
  10505. Args:
  10506. input (Tensor): the input tensor.
  10507. other (Tensor): the second input tensor
  10508. Keyword arguments:
  10509. out (Tensor, optional): the output tensor.
  10510. """
  10511. ...
  10512. @overload
  10513. def logcumsumexp(input: Tensor, dim: _int, *, out: Optional[Tensor] = None) -> Tensor:
  10514. r"""
  10515. logcumsumexp(input, dim, *, out=None) -> Tensor
  10516. Returns the logarithm of the cumulative summation of the exponentiation of
  10517. elements of :attr:`input` in the dimension :attr:`dim`.
  10518. For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is
  10519. .. math::
  10520. \text{logcumsumexp}(x)_{ij} = \log \sum\limits_{j=0}^{i} \exp(x_{ij})
  10521. Args:
  10522. input (Tensor): the input tensor.
  10523. dim (int): the dimension to do the operation over
  10524. Keyword args:
  10525. out (Tensor, optional): the output tensor.
  10526. Example::
  10527. >>> a = torch.randn(10)
  10528. >>> torch.logcumsumexp(a, dim=0)
  10529. tensor([-0.42296738, -0.04462666, 0.86278635, 0.94622083, 1.05277811,
  10530. 1.39202815, 1.83525007, 1.84492621, 2.06084887, 2.06844475]))
  10531. """
  10532. ...
  10533. @overload
  10534. def logcumsumexp(input: Tensor, dim: Union[str, ellipsis, None], *, out: Optional[Tensor] = None) -> Tensor:
  10535. r"""
  10536. logcumsumexp(input, dim, *, out=None) -> Tensor
  10537. Returns the logarithm of the cumulative summation of the exponentiation of
  10538. elements of :attr:`input` in the dimension :attr:`dim`.
  10539. For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is
  10540. .. math::
  10541. \text{logcumsumexp}(x)_{ij} = \log \sum\limits_{j=0}^{i} \exp(x_{ij})
  10542. Args:
  10543. input (Tensor): the input tensor.
  10544. dim (int): the dimension to do the operation over
  10545. Keyword args:
  10546. out (Tensor, optional): the output tensor.
  10547. Example::
  10548. >>> a = torch.randn(10)
  10549. >>> torch.logcumsumexp(a, dim=0)
  10550. tensor([-0.42296738, -0.04462666, 0.86278635, 0.94622083, 1.05277811,
  10551. 1.39202815, 1.83525007, 1.84492621, 2.06084887, 2.06844475]))
  10552. """
  10553. ...
  10554. def logdet(input: Tensor) -> Tensor:
  10555. r"""
  10556. logdet(input) -> Tensor
  10557. Calculates log determinant of a square matrix or batches of square matrices.
  10558. It returns ``-inf`` if the input has a determinant of zero, and ``NaN`` if it has
  10559. a negative determinant.
  10560. .. note::
  10561. Backward through :meth:`logdet` internally uses SVD results when :attr:`input`
  10562. is not invertible. In this case, double backward through :meth:`logdet` will
  10563. be unstable in when :attr:`input` doesn't have distinct singular values. See
  10564. :func:`torch.linalg.svd` for details.
  10565. .. seealso::
  10566. :func:`torch.linalg.slogdet` computes the sign (resp. angle) and natural logarithm of the
  10567. absolute value of the determinant of real-valued (resp. complex) square matrices.
  10568. Arguments:
  10569. input (Tensor): the input tensor of size ``(*, n, n)`` where ``*`` is zero or more
  10570. batch dimensions.
  10571. Example::
  10572. >>> A = torch.randn(3, 3)
  10573. >>> torch.det(A)
  10574. tensor(0.2611)
  10575. >>> torch.logdet(A)
  10576. tensor(-1.3430)
  10577. >>> A
  10578. tensor([[[ 0.9254, -0.6213],
  10579. [-0.5787, 1.6843]],
  10580. [[ 0.3242, -0.9665],
  10581. [ 0.4539, -0.0887]],
  10582. [[ 1.1336, -0.4025],
  10583. [-0.7089, 0.9032]]])
  10584. >>> A.det()
  10585. tensor([1.1990, 0.4099, 0.7386])
  10586. >>> A.det().log()
  10587. tensor([ 0.1815, -0.8917, -0.3031])
  10588. """
  10589. ...
  10590. def logical_and(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  10591. r"""
  10592. logical_and(input, other, *, out=None) -> Tensor
  10593. Computes the element-wise logical AND of the given input tensors. Zeros are treated as ``False`` and nonzeros are
  10594. treated as ``True``.
  10595. Args:
  10596. input (Tensor): the input tensor.
  10597. other (Tensor): the tensor to compute AND with
  10598. Keyword args:
  10599. out (Tensor, optional): the output tensor.
  10600. Example::
  10601. >>> torch.logical_and(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
  10602. tensor([ True, False, False])
  10603. >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
  10604. >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
  10605. >>> torch.logical_and(a, b)
  10606. tensor([False, False, True, False])
  10607. >>> torch.logical_and(a.double(), b.double())
  10608. tensor([False, False, True, False])
  10609. >>> torch.logical_and(a.double(), b)
  10610. tensor([False, False, True, False])
  10611. >>> torch.logical_and(a, b, out=torch.empty(4, dtype=torch.bool))
  10612. tensor([False, False, True, False])
  10613. """
  10614. ...
  10615. def logical_not(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  10616. r"""
  10617. logical_not(input, *, out=None) -> Tensor
  10618. Computes the element-wise logical NOT of the given input tensor. If not specified, the output tensor will have the bool
  10619. dtype. If the input tensor is not a bool tensor, zeros are treated as ``False`` and non-zeros are treated as ``True``.
  10620. Args:
  10621. input (Tensor): the input tensor.
  10622. Keyword args:
  10623. out (Tensor, optional): the output tensor.
  10624. Example::
  10625. >>> torch.logical_not(torch.tensor([True, False]))
  10626. tensor([False, True])
  10627. >>> torch.logical_not(torch.tensor([0, 1, -10], dtype=torch.int8))
  10628. tensor([ True, False, False])
  10629. >>> torch.logical_not(torch.tensor([0., 1.5, -10.], dtype=torch.double))
  10630. tensor([ True, False, False])
  10631. >>> torch.logical_not(torch.tensor([0., 1., -10.], dtype=torch.double), out=torch.empty(3, dtype=torch.int16))
  10632. tensor([1, 0, 0], dtype=torch.int16)
  10633. """
  10634. ...
  10635. def logical_or(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  10636. r"""
  10637. logical_or(input, other, *, out=None) -> Tensor
  10638. Computes the element-wise logical OR of the given input tensors. Zeros are treated as ``False`` and nonzeros are
  10639. treated as ``True``.
  10640. Args:
  10641. input (Tensor): the input tensor.
  10642. other (Tensor): the tensor to compute OR with
  10643. Keyword args:
  10644. out (Tensor, optional): the output tensor.
  10645. Example::
  10646. >>> torch.logical_or(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
  10647. tensor([ True, False, True])
  10648. >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
  10649. >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
  10650. >>> torch.logical_or(a, b)
  10651. tensor([ True, True, True, False])
  10652. >>> torch.logical_or(a.double(), b.double())
  10653. tensor([ True, True, True, False])
  10654. >>> torch.logical_or(a.double(), b)
  10655. tensor([ True, True, True, False])
  10656. >>> torch.logical_or(a, b, out=torch.empty(4, dtype=torch.bool))
  10657. tensor([ True, True, True, False])
  10658. """
  10659. ...
  10660. def logical_xor(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  10661. r"""
  10662. logical_xor(input, other, *, out=None) -> Tensor
  10663. Computes the element-wise logical XOR of the given input tensors. Zeros are treated as ``False`` and nonzeros are
  10664. treated as ``True``.
  10665. Args:
  10666. input (Tensor): the input tensor.
  10667. other (Tensor): the tensor to compute XOR with
  10668. Keyword args:
  10669. out (Tensor, optional): the output tensor.
  10670. Example::
  10671. >>> torch.logical_xor(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
  10672. tensor([False, False, True])
  10673. >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
  10674. >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
  10675. >>> torch.logical_xor(a, b)
  10676. tensor([ True, True, False, False])
  10677. >>> torch.logical_xor(a.double(), b.double())
  10678. tensor([ True, True, False, False])
  10679. >>> torch.logical_xor(a.double(), b)
  10680. tensor([ True, True, False, False])
  10681. >>> torch.logical_xor(a, b, out=torch.empty(4, dtype=torch.bool))
  10682. tensor([ True, True, False, False])
  10683. """
  10684. ...
  10685. def logit(input: Tensor, eps: Optional[_float] = None, *, out: Optional[Tensor] = None) -> Tensor:
  10686. r"""
  10687. logit(input, eps=None, *, out=None) -> Tensor
  10688. Alias for :func:`torch.special.logit`.
  10689. """
  10690. ...
  10691. def logit_(input: Tensor, eps: Optional[_float] = None) -> Tensor: ...
  10692. @overload
  10693. def logspace(start: Number, end: Number, steps: Optional[_int] = None, base: _float = 10.0, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
  10694. r"""
  10695. logspace(start, end, steps, base=10.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  10696. Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
  10697. spaced from :math:`{{\text{{base}}}}^{{\text{{start}}}}` to
  10698. :math:`{{\text{{base}}}}^{{\text{{end}}}}`, inclusive, on a logarithmic scale
  10699. with base :attr:`base`. That is, the values are:
  10700. .. math::
  10701. (\text{base}^{\text{start}},
  10702. \text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
  10703. \ldots,
  10704. \text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
  10705. \text{base}^{\text{end}})
  10706. From PyTorch 1.11 logspace requires the steps argument. Use steps=100 to restore the previous behavior.
  10707. Args:
  10708. start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
  10709. end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
  10710. steps (int): size of the constructed tensor
  10711. base (float, optional): base of the logarithm function. Default: ``10.0``.
  10712. Keyword arguments:
  10713. out (Tensor, optional): the output tensor.
  10714. dtype (torch.dtype, optional): the data type to perform the computation in.
  10715. Default: if None, uses the global default dtype (see torch.get_default_dtype())
  10716. when both :attr:`start` and :attr:`end` are real,
  10717. and corresponding complex dtype when either is complex.
  10718. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  10719. Default: ``torch.strided``.
  10720. device (:class:`torch.device`, optional): the desired device of returned tensor.
  10721. Default: if ``None``, uses the current device for the default tensor type
  10722. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  10723. for CPU tensor types and the current CUDA device for CUDA tensor types.
  10724. requires_grad (bool, optional): If autograd should record operations on the
  10725. returned tensor. Default: ``False``.
  10726. Example::
  10727. >>> torch.logspace(start=-10, end=10, steps=5)
  10728. tensor([ 1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10])
  10729. >>> torch.logspace(start=0.1, end=1.0, steps=5)
  10730. tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000])
  10731. >>> torch.logspace(start=0.1, end=1.0, steps=1)
  10732. tensor([1.2589])
  10733. >>> torch.logspace(start=2, end=2, steps=1, base=2)
  10734. tensor([4.0])
  10735. """
  10736. ...
  10737. @overload
  10738. def logspace(start: Tensor, end: Tensor, steps: _int, base: _float = 10.0, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  10739. r"""
  10740. logspace(start, end, steps, base=10.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  10741. Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
  10742. spaced from :math:`{{\text{{base}}}}^{{\text{{start}}}}` to
  10743. :math:`{{\text{{base}}}}^{{\text{{end}}}}`, inclusive, on a logarithmic scale
  10744. with base :attr:`base`. That is, the values are:
  10745. .. math::
  10746. (\text{base}^{\text{start}},
  10747. \text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
  10748. \ldots,
  10749. \text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
  10750. \text{base}^{\text{end}})
  10751. From PyTorch 1.11 logspace requires the steps argument. Use steps=100 to restore the previous behavior.
  10752. Args:
  10753. start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
  10754. end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
  10755. steps (int): size of the constructed tensor
  10756. base (float, optional): base of the logarithm function. Default: ``10.0``.
  10757. Keyword arguments:
  10758. out (Tensor, optional): the output tensor.
  10759. dtype (torch.dtype, optional): the data type to perform the computation in.
  10760. Default: if None, uses the global default dtype (see torch.get_default_dtype())
  10761. when both :attr:`start` and :attr:`end` are real,
  10762. and corresponding complex dtype when either is complex.
  10763. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  10764. Default: ``torch.strided``.
  10765. device (:class:`torch.device`, optional): the desired device of returned tensor.
  10766. Default: if ``None``, uses the current device for the default tensor type
  10767. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  10768. for CPU tensor types and the current CUDA device for CUDA tensor types.
  10769. requires_grad (bool, optional): If autograd should record operations on the
  10770. returned tensor. Default: ``False``.
  10771. Example::
  10772. >>> torch.logspace(start=-10, end=10, steps=5)
  10773. tensor([ 1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10])
  10774. >>> torch.logspace(start=0.1, end=1.0, steps=5)
  10775. tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000])
  10776. >>> torch.logspace(start=0.1, end=1.0, steps=1)
  10777. tensor([1.2589])
  10778. >>> torch.logspace(start=2, end=2, steps=1, base=2)
  10779. tensor([4.0])
  10780. """
  10781. ...
  10782. @overload
  10783. def logspace(start: Union[Number, _complex], end: Tensor, steps: _int, base: _float = 10.0, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  10784. r"""
  10785. logspace(start, end, steps, base=10.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  10786. Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
  10787. spaced from :math:`{{\text{{base}}}}^{{\text{{start}}}}` to
  10788. :math:`{{\text{{base}}}}^{{\text{{end}}}}`, inclusive, on a logarithmic scale
  10789. with base :attr:`base`. That is, the values are:
  10790. .. math::
  10791. (\text{base}^{\text{start}},
  10792. \text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
  10793. \ldots,
  10794. \text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
  10795. \text{base}^{\text{end}})
  10796. From PyTorch 1.11 logspace requires the steps argument. Use steps=100 to restore the previous behavior.
  10797. Args:
  10798. start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
  10799. end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
  10800. steps (int): size of the constructed tensor
  10801. base (float, optional): base of the logarithm function. Default: ``10.0``.
  10802. Keyword arguments:
  10803. out (Tensor, optional): the output tensor.
  10804. dtype (torch.dtype, optional): the data type to perform the computation in.
  10805. Default: if None, uses the global default dtype (see torch.get_default_dtype())
  10806. when both :attr:`start` and :attr:`end` are real,
  10807. and corresponding complex dtype when either is complex.
  10808. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  10809. Default: ``torch.strided``.
  10810. device (:class:`torch.device`, optional): the desired device of returned tensor.
  10811. Default: if ``None``, uses the current device for the default tensor type
  10812. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  10813. for CPU tensor types and the current CUDA device for CUDA tensor types.
  10814. requires_grad (bool, optional): If autograd should record operations on the
  10815. returned tensor. Default: ``False``.
  10816. Example::
  10817. >>> torch.logspace(start=-10, end=10, steps=5)
  10818. tensor([ 1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10])
  10819. >>> torch.logspace(start=0.1, end=1.0, steps=5)
  10820. tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000])
  10821. >>> torch.logspace(start=0.1, end=1.0, steps=1)
  10822. tensor([1.2589])
  10823. >>> torch.logspace(start=2, end=2, steps=1, base=2)
  10824. tensor([4.0])
  10825. """
  10826. ...
  10827. @overload
  10828. def logspace(start: Tensor, end: Union[Number, _complex], steps: _int, base: _float = 10.0, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  10829. r"""
  10830. logspace(start, end, steps, base=10.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  10831. Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
  10832. spaced from :math:`{{\text{{base}}}}^{{\text{{start}}}}` to
  10833. :math:`{{\text{{base}}}}^{{\text{{end}}}}`, inclusive, on a logarithmic scale
  10834. with base :attr:`base`. That is, the values are:
  10835. .. math::
  10836. (\text{base}^{\text{start}},
  10837. \text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
  10838. \ldots,
  10839. \text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
  10840. \text{base}^{\text{end}})
  10841. From PyTorch 1.11 logspace requires the steps argument. Use steps=100 to restore the previous behavior.
  10842. Args:
  10843. start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
  10844. end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
  10845. steps (int): size of the constructed tensor
  10846. base (float, optional): base of the logarithm function. Default: ``10.0``.
  10847. Keyword arguments:
  10848. out (Tensor, optional): the output tensor.
  10849. dtype (torch.dtype, optional): the data type to perform the computation in.
  10850. Default: if None, uses the global default dtype (see torch.get_default_dtype())
  10851. when both :attr:`start` and :attr:`end` are real,
  10852. and corresponding complex dtype when either is complex.
  10853. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  10854. Default: ``torch.strided``.
  10855. device (:class:`torch.device`, optional): the desired device of returned tensor.
  10856. Default: if ``None``, uses the current device for the default tensor type
  10857. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  10858. for CPU tensor types and the current CUDA device for CUDA tensor types.
  10859. requires_grad (bool, optional): If autograd should record operations on the
  10860. returned tensor. Default: ``False``.
  10861. Example::
  10862. >>> torch.logspace(start=-10, end=10, steps=5)
  10863. tensor([ 1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10])
  10864. >>> torch.logspace(start=0.1, end=1.0, steps=5)
  10865. tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000])
  10866. >>> torch.logspace(start=0.1, end=1.0, steps=1)
  10867. tensor([1.2589])
  10868. >>> torch.logspace(start=2, end=2, steps=1, base=2)
  10869. tensor([4.0])
  10870. """
  10871. ...
  10872. @overload
  10873. def logspace(start: Union[Number, _complex], end: Union[Number, _complex], steps: _int, base: _float = 10.0, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  10874. r"""
  10875. logspace(start, end, steps, base=10.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  10876. Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
  10877. spaced from :math:`{{\text{{base}}}}^{{\text{{start}}}}` to
  10878. :math:`{{\text{{base}}}}^{{\text{{end}}}}`, inclusive, on a logarithmic scale
  10879. with base :attr:`base`. That is, the values are:
  10880. .. math::
  10881. (\text{base}^{\text{start}},
  10882. \text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
  10883. \ldots,
  10884. \text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
  10885. \text{base}^{\text{end}})
  10886. From PyTorch 1.11 logspace requires the steps argument. Use steps=100 to restore the previous behavior.
  10887. Args:
  10888. start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
  10889. end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
  10890. steps (int): size of the constructed tensor
  10891. base (float, optional): base of the logarithm function. Default: ``10.0``.
  10892. Keyword arguments:
  10893. out (Tensor, optional): the output tensor.
  10894. dtype (torch.dtype, optional): the data type to perform the computation in.
  10895. Default: if None, uses the global default dtype (see torch.get_default_dtype())
  10896. when both :attr:`start` and :attr:`end` are real,
  10897. and corresponding complex dtype when either is complex.
  10898. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  10899. Default: ``torch.strided``.
  10900. device (:class:`torch.device`, optional): the desired device of returned tensor.
  10901. Default: if ``None``, uses the current device for the default tensor type
  10902. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  10903. for CPU tensor types and the current CUDA device for CUDA tensor types.
  10904. requires_grad (bool, optional): If autograd should record operations on the
  10905. returned tensor. Default: ``False``.
  10906. Example::
  10907. >>> torch.logspace(start=-10, end=10, steps=5)
  10908. tensor([ 1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10])
  10909. >>> torch.logspace(start=0.1, end=1.0, steps=5)
  10910. tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000])
  10911. >>> torch.logspace(start=0.1, end=1.0, steps=1)
  10912. tensor([1.2589])
  10913. >>> torch.logspace(start=2, end=2, steps=1, base=2)
  10914. tensor([4.0])
  10915. """
  10916. ...
  10917. @overload
  10918. def logsumexp(input: Tensor, dim: Union[_int, _size], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
  10919. r"""
  10920. logsumexp(input, dim, keepdim=False, *, out=None)
  10921. Returns the log of summed exponentials of each row of the :attr:`input`
  10922. tensor in the given dimension :attr:`dim`. The computation is numerically
  10923. stabilized.
  10924. For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is
  10925. .. math::
  10926. \text{logsumexp}(x)_{i} = \log \sum_j \exp(x_{ij})
  10927. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  10928. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  10929. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  10930. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  10931. Args:
  10932. input (Tensor): the input tensor.
  10933. dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
  10934. If ``None``, all dimensions are reduced.
  10935. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  10936. Keyword args:
  10937. out (Tensor, optional): the output tensor.
  10938. Example::
  10939. >>> a = torch.randn(3, 3)
  10940. >>> torch.logsumexp(a, 1)
  10941. tensor([1.4907, 1.0593, 1.5696])
  10942. >>> torch.dist(torch.logsumexp(a, 1), torch.log(torch.sum(torch.exp(a), 1)))
  10943. tensor(1.6859e-07)
  10944. """
  10945. ...
  10946. @overload
  10947. def logsumexp(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
  10948. r"""
  10949. logsumexp(input, dim, keepdim=False, *, out=None)
  10950. Returns the log of summed exponentials of each row of the :attr:`input`
  10951. tensor in the given dimension :attr:`dim`. The computation is numerically
  10952. stabilized.
  10953. For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is
  10954. .. math::
  10955. \text{logsumexp}(x)_{i} = \log \sum_j \exp(x_{ij})
  10956. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  10957. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  10958. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  10959. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  10960. Args:
  10961. input (Tensor): the input tensor.
  10962. dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
  10963. If ``None``, all dimensions are reduced.
  10964. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  10965. Keyword args:
  10966. out (Tensor, optional): the output tensor.
  10967. Example::
  10968. >>> a = torch.randn(3, 3)
  10969. >>> torch.logsumexp(a, 1)
  10970. tensor([1.4907, 1.0593, 1.5696])
  10971. >>> torch.dist(torch.logsumexp(a, 1), torch.log(torch.sum(torch.exp(a), 1)))
  10972. tensor(1.6859e-07)
  10973. """
  10974. ...
  10975. @overload
  10976. def lstm(data: Tensor, batch_sizes: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor, Tensor]: ...
  10977. @overload
  10978. def lstm(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor, Tensor]: ...
  10979. def lstm_cell(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor] = None, b_hh: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: ...
  10980. @overload
  10981. def lt(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  10982. r"""
  10983. lt(input, other, *, out=None) -> Tensor
  10984. Computes :math:`\text{input} < \text{other}` element-wise.
  10985. The second argument can be a number or a tensor whose shape is
  10986. :ref:`broadcastable <broadcasting-semantics>` with the first argument.
  10987. Args:
  10988. input (Tensor): the tensor to compare
  10989. other (Tensor or float): the tensor or value to compare
  10990. Keyword args:
  10991. out (Tensor, optional): the output tensor.
  10992. Returns:
  10993. A boolean tensor that is True where :attr:`input` is less than :attr:`other` and False elsewhere
  10994. Example::
  10995. >>> torch.lt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
  10996. tensor([[False, False], [True, False]])
  10997. """
  10998. ...
  10999. @overload
  11000. def lt(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
  11001. r"""
  11002. lt(input, other, *, out=None) -> Tensor
  11003. Computes :math:`\text{input} < \text{other}` element-wise.
  11004. The second argument can be a number or a tensor whose shape is
  11005. :ref:`broadcastable <broadcasting-semantics>` with the first argument.
  11006. Args:
  11007. input (Tensor): the tensor to compare
  11008. other (Tensor or float): the tensor or value to compare
  11009. Keyword args:
  11010. out (Tensor, optional): the output tensor.
  11011. Returns:
  11012. A boolean tensor that is True where :attr:`input` is less than :attr:`other` and False elsewhere
  11013. Example::
  11014. >>> torch.lt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
  11015. tensor([[False, False], [True, False]])
  11016. """
  11017. ...
  11018. def lu_solve(input: Tensor, LU_data: Tensor, LU_pivots: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  11019. r"""
  11020. lu_solve(b, LU_data, LU_pivots, *, out=None) -> Tensor
  11021. Returns the LU solve of the linear system :math:`Ax = b` using the partially pivoted
  11022. LU factorization of A from :func:`~linalg.lu_factor`.
  11023. This function supports ``float``, ``double``, ``cfloat`` and ``cdouble`` dtypes for :attr:`input`.
  11024. .. warning::
  11025. :func:`torch.lu_solve` is deprecated in favor of :func:`torch.linalg.lu_solve`.
  11026. :func:`torch.lu_solve` will be removed in a future PyTorch release.
  11027. ``X = torch.lu_solve(B, LU, pivots)`` should be replaced with
  11028. .. code:: python
  11029. X = linalg.lu_solve(LU, pivots, B)
  11030. Arguments:
  11031. b (Tensor): the RHS tensor of size :math:`(*, m, k)`, where :math:`*`
  11032. is zero or more batch dimensions.
  11033. LU_data (Tensor): the pivoted LU factorization of A from :meth:`~linalg.lu_factor` of size :math:`(*, m, m)`,
  11034. where :math:`*` is zero or more batch dimensions.
  11035. LU_pivots (IntTensor): the pivots of the LU factorization from :meth:`~linalg.lu_factor` of size :math:`(*, m)`,
  11036. where :math:`*` is zero or more batch dimensions.
  11037. The batch dimensions of :attr:`LU_pivots` must be equal to the batch dimensions of
  11038. :attr:`LU_data`.
  11039. Keyword args:
  11040. out (Tensor, optional): the output tensor.
  11041. Example::
  11042. >>> A = torch.randn(2, 3, 3)
  11043. >>> b = torch.randn(2, 3, 1)
  11044. >>> LU, pivots = torch.linalg.lu_factor(A)
  11045. >>> x = torch.lu_solve(b, LU, pivots)
  11046. >>> torch.dist(A @ x, b)
  11047. tensor(1.00000e-07 *
  11048. 2.8312)
  11049. """
  11050. ...
  11051. def lu_unpack(LU_data: Tensor, LU_pivots: Tensor, unpack_data: _bool = True, unpack_pivots: _bool = True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.lu_unpack:
  11052. r"""
  11053. lu_unpack(LU_data, LU_pivots, unpack_data=True, unpack_pivots=True, *, out=None) -> (Tensor, Tensor, Tensor)
  11054. Unpacks the LU decomposition returned by :func:`~linalg.lu_factor` into the `P, L, U` matrices.
  11055. .. seealso::
  11056. :func:`~linalg.lu` returns the matrices from the LU decomposition. Its gradient formula is more efficient
  11057. than that of doing :func:`~linalg.lu_factor` followed by :func:`~linalg.lu_unpack`.
  11058. Args:
  11059. LU_data (Tensor): the packed LU factorization data
  11060. LU_pivots (Tensor): the packed LU factorization pivots
  11061. unpack_data (bool): flag indicating if the data should be unpacked.
  11062. If ``False``, then the returned ``L`` and ``U`` are empty tensors.
  11063. Default: ``True``
  11064. unpack_pivots (bool): flag indicating if the pivots should be unpacked into a permutation matrix ``P``.
  11065. If ``False``, then the returned ``P`` is an empty tensor.
  11066. Default: ``True``
  11067. Keyword args:
  11068. out (tuple, optional): output tuple of three tensors. Ignored if `None`.
  11069. Returns:
  11070. A namedtuple ``(P, L, U)``
  11071. Examples::
  11072. >>> A = torch.randn(2, 3, 3)
  11073. >>> LU, pivots = torch.linalg.lu_factor(A)
  11074. >>> P, L, U = torch.lu_unpack(LU, pivots)
  11075. >>> # We can recover A from the factorization
  11076. >>> A_ = P @ L @ U
  11077. >>> torch.allclose(A, A_)
  11078. True
  11079. >>> # LU factorization of a rectangular matrix:
  11080. >>> A = torch.randn(2, 3, 2)
  11081. >>> LU, pivots = torch.linalg.lu_factor(A)
  11082. >>> P, L, U = torch.lu_unpack(LU, pivots)
  11083. >>> # P, L, U are the same as returned by linalg.lu
  11084. >>> P_, L_, U_ = torch.linalg.lu(A)
  11085. >>> torch.allclose(P, P_) and torch.allclose(L, L_) and torch.allclose(U, U_)
  11086. True
  11087. """
  11088. ...
  11089. def margin_ranking_loss(input1: Tensor, input2: Tensor, target: Tensor, margin: _float = 0.0, reduction: _int = 1) -> Tensor: ...
  11090. @overload
  11091. def masked_fill(input: Tensor, mask: Tensor, value: Tensor) -> Tensor: ...
  11092. @overload
  11093. def masked_fill(input: Tensor, mask: Tensor, value: Union[Number, _complex]) -> Tensor: ...
  11094. def masked_scatter(input: Tensor, mask: Tensor, source: Tensor) -> Tensor: ...
  11095. def masked_select(input: Tensor, mask: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  11096. r"""
  11097. masked_select(input, mask, *, out=None) -> Tensor
  11098. Returns a new 1-D tensor which indexes the :attr:`input` tensor according to
  11099. the boolean mask :attr:`mask` which is a `BoolTensor`.
  11100. The shapes of the :attr:`mask` tensor and the :attr:`input` tensor don't need
  11101. to match, but they must be :ref:`broadcastable <broadcasting-semantics>`.
  11102. .. note:: The returned tensor does **not** use the same storage
  11103. as the original tensor
  11104. Args:
  11105. input (Tensor): the input tensor.
  11106. mask (BoolTensor): the tensor containing the binary mask to index with
  11107. Keyword args:
  11108. out (Tensor, optional): the output tensor.
  11109. Example::
  11110. >>> x = torch.randn(3, 4)
  11111. >>> x
  11112. tensor([[ 0.3552, -2.3825, -0.8297, 0.3477],
  11113. [-1.2035, 1.2252, 0.5002, 0.6248],
  11114. [ 0.1307, -2.0608, 0.1244, 2.0139]])
  11115. >>> mask = x.ge(0.5)
  11116. >>> mask
  11117. tensor([[False, False, False, False],
  11118. [False, True, True, True],
  11119. [False, False, False, True]])
  11120. >>> torch.masked_select(x, mask)
  11121. tensor([ 1.2252, 0.5002, 0.6248, 2.0139])
  11122. """
  11123. ...
  11124. def matmul(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  11125. r"""
  11126. matmul(input, other, *, out=None) -> Tensor
  11127. Matrix product of two tensors.
  11128. The behavior depends on the dimensionality of the tensors as follows:
  11129. - If both tensors are 1-dimensional, the dot product (scalar) is returned.
  11130. - If both arguments are 2-dimensional, the matrix-matrix product is returned.
  11131. - If the first argument is 1-dimensional and the second argument is 2-dimensional,
  11132. a 1 is prepended to its dimension for the purpose of the matrix multiply.
  11133. After the matrix multiply, the prepended dimension is removed.
  11134. - If the first argument is 2-dimensional and the second argument is 1-dimensional,
  11135. the matrix-vector product is returned.
  11136. - If both arguments are at least 1-dimensional and at least one argument is
  11137. N-dimensional (where N > 2), then a batched matrix multiply is returned. If the first
  11138. argument is 1-dimensional, a 1 is prepended to its dimension for the purpose of the
  11139. batched matrix multiply and removed after. If the second argument is 1-dimensional, a
  11140. 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after.
  11141. The non-matrix (i.e. batch) dimensions are :ref:`broadcasted <broadcasting-semantics>` (and thus
  11142. must be broadcastable). For example, if :attr:`input` is a
  11143. :math:`(j \times 1 \times n \times n)` tensor and :attr:`other` is a :math:`(k \times n \times n)`
  11144. tensor, :attr:`out` will be a :math:`(j \times k \times n \times n)` tensor.
  11145. Note that the broadcasting logic only looks at the batch dimensions when determining if the inputs
  11146. are broadcastable, and not the matrix dimensions. For example, if :attr:`input` is a
  11147. :math:`(j \times 1 \times n \times m)` tensor and :attr:`other` is a :math:`(k \times m \times p)`
  11148. tensor, these inputs are valid for broadcasting even though the final two dimensions (i.e. the
  11149. matrix dimensions) are different. :attr:`out` will be a :math:`(j \times k \times n \times p)` tensor.
  11150. This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`. In particular the
  11151. matrix-matrix (both arguments 2-dimensional) supports sparse arguments with the same restrictions
  11152. as :func:`torch.mm`
  11153. .. warning::
  11154. Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported,
  11155. or may not have autograd support. If you notice missing functionality please
  11156. open a feature request.
  11157. This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
  11158. On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
  11159. .. note::
  11160. The 1-dimensional dot product version of this function does not support an :attr:`out` parameter.
  11161. Arguments:
  11162. input (Tensor): the first tensor to be multiplied
  11163. other (Tensor): the second tensor to be multiplied
  11164. Keyword args:
  11165. out (Tensor, optional): the output tensor.
  11166. Example::
  11167. >>> # vector x vector
  11168. >>> tensor1 = torch.randn(3)
  11169. >>> tensor2 = torch.randn(3)
  11170. >>> torch.matmul(tensor1, tensor2).size()
  11171. torch.Size([])
  11172. >>> # matrix x vector
  11173. >>> tensor1 = torch.randn(3, 4)
  11174. >>> tensor2 = torch.randn(4)
  11175. >>> torch.matmul(tensor1, tensor2).size()
  11176. torch.Size([3])
  11177. >>> # batched matrix x broadcasted vector
  11178. >>> tensor1 = torch.randn(10, 3, 4)
  11179. >>> tensor2 = torch.randn(4)
  11180. >>> torch.matmul(tensor1, tensor2).size()
  11181. torch.Size([10, 3])
  11182. >>> # batched matrix x batched matrix
  11183. >>> tensor1 = torch.randn(10, 3, 4)
  11184. >>> tensor2 = torch.randn(10, 4, 5)
  11185. >>> torch.matmul(tensor1, tensor2).size()
  11186. torch.Size([10, 3, 5])
  11187. >>> # batched matrix x broadcasted matrix
  11188. >>> tensor1 = torch.randn(10, 3, 4)
  11189. >>> tensor2 = torch.randn(4, 5)
  11190. >>> torch.matmul(tensor1, tensor2).size()
  11191. torch.Size([10, 3, 5])
  11192. """
  11193. ...
  11194. def matrix_exp(input: Tensor) -> Tensor:
  11195. r"""
  11196. matrix_exp(A) -> Tensor
  11197. Alias for :func:`torch.linalg.matrix_exp`.
  11198. """
  11199. ...
  11200. def matrix_power(input: Tensor, n: _int, *, out: Optional[Tensor] = None) -> Tensor:
  11201. r"""
  11202. matrix_power(input, n, *, out=None) -> Tensor
  11203. Alias for :func:`torch.linalg.matrix_power`
  11204. """
  11205. ...
  11206. @overload
  11207. def max(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  11208. r"""
  11209. max(input) -> Tensor
  11210. Returns the maximum value of all elements in the ``input`` tensor.
  11211. .. warning::
  11212. This function produces deterministic (sub)gradients unlike ``max(dim=0)``
  11213. Args:
  11214. input (Tensor): the input tensor.
  11215. Example::
  11216. >>> a = torch.randn(1, 3)
  11217. >>> a
  11218. tensor([[ 0.6763, 0.7445, -2.2369]])
  11219. >>> torch.max(a)
  11220. tensor(0.7445)
  11221. .. function:: max(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
  11222. :noindex:
  11223. Returns a namedtuple ``(values, indices)`` where ``values`` is the maximum
  11224. value of each row of the :attr:`input` tensor in the given dimension
  11225. :attr:`dim`. And ``indices`` is the index location of each maximum value found
  11226. (argmax).
  11227. If ``keepdim`` is ``True``, the output tensors are of the same size
  11228. as ``input`` except in the dimension ``dim`` where they are of size 1.
  11229. Otherwise, ``dim`` is squeezed (see :func:`torch.squeeze`), resulting
  11230. in the output tensors having 1 fewer dimension than ``input``.
  11231. .. note:: If there are multiple maximal values in a reduced row then
  11232. the indices of the first maximal value are returned.
  11233. Args:
  11234. input (Tensor): the input tensor.
  11235. dim (int): the dimension to reduce.
  11236. keepdim (bool): whether the output tensor has :attr:`dim` retained or not. Default: ``False``.
  11237. Keyword args:
  11238. out (tuple, optional): the result tuple of two output tensors (max, max_indices)
  11239. Example::
  11240. >>> a = torch.randn(4, 4)
  11241. >>> a
  11242. tensor([[-1.2360, -0.2942, -0.1222, 0.8475],
  11243. [ 1.1949, -1.1127, -2.2379, -0.6702],
  11244. [ 1.5717, -0.9207, 0.1297, -1.8768],
  11245. [-0.6172, 1.0036, -0.6060, -0.2432]])
  11246. >>> torch.max(a, 1)
  11247. torch.return_types.max(values=tensor([0.8475, 1.1949, 1.5717, 1.0036]), indices=tensor([3, 0, 0, 1]))
  11248. .. function:: max(input, other, *, out=None) -> Tensor
  11249. :noindex:
  11250. See :func:`torch.maximum`.
  11251. """
  11252. ...
  11253. @overload
  11254. def max(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  11255. r"""
  11256. max(input) -> Tensor
  11257. Returns the maximum value of all elements in the ``input`` tensor.
  11258. .. warning::
  11259. This function produces deterministic (sub)gradients unlike ``max(dim=0)``
  11260. Args:
  11261. input (Tensor): the input tensor.
  11262. Example::
  11263. >>> a = torch.randn(1, 3)
  11264. >>> a
  11265. tensor([[ 0.6763, 0.7445, -2.2369]])
  11266. >>> torch.max(a)
  11267. tensor(0.7445)
  11268. .. function:: max(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
  11269. :noindex:
  11270. Returns a namedtuple ``(values, indices)`` where ``values`` is the maximum
  11271. value of each row of the :attr:`input` tensor in the given dimension
  11272. :attr:`dim`. And ``indices`` is the index location of each maximum value found
  11273. (argmax).
  11274. If ``keepdim`` is ``True``, the output tensors are of the same size
  11275. as ``input`` except in the dimension ``dim`` where they are of size 1.
  11276. Otherwise, ``dim`` is squeezed (see :func:`torch.squeeze`), resulting
  11277. in the output tensors having 1 fewer dimension than ``input``.
  11278. .. note:: If there are multiple maximal values in a reduced row then
  11279. the indices of the first maximal value are returned.
  11280. Args:
  11281. input (Tensor): the input tensor.
  11282. dim (int): the dimension to reduce.
  11283. keepdim (bool): whether the output tensor has :attr:`dim` retained or not. Default: ``False``.
  11284. Keyword args:
  11285. out (tuple, optional): the result tuple of two output tensors (max, max_indices)
  11286. Example::
  11287. >>> a = torch.randn(4, 4)
  11288. >>> a
  11289. tensor([[-1.2360, -0.2942, -0.1222, 0.8475],
  11290. [ 1.1949, -1.1127, -2.2379, -0.6702],
  11291. [ 1.5717, -0.9207, 0.1297, -1.8768],
  11292. [-0.6172, 1.0036, -0.6060, -0.2432]])
  11293. >>> torch.max(a, 1)
  11294. torch.return_types.max(values=tensor([0.8475, 1.1949, 1.5717, 1.0036]), indices=tensor([3, 0, 0, 1]))
  11295. .. function:: max(input, other, *, out=None) -> Tensor
  11296. :noindex:
  11297. See :func:`torch.maximum`.
  11298. """
  11299. ...
  11300. @overload
  11301. def max(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.max:
  11302. r"""
  11303. max(input) -> Tensor
  11304. Returns the maximum value of all elements in the ``input`` tensor.
  11305. .. warning::
  11306. This function produces deterministic (sub)gradients unlike ``max(dim=0)``
  11307. Args:
  11308. input (Tensor): the input tensor.
  11309. Example::
  11310. >>> a = torch.randn(1, 3)
  11311. >>> a
  11312. tensor([[ 0.6763, 0.7445, -2.2369]])
  11313. >>> torch.max(a)
  11314. tensor(0.7445)
  11315. .. function:: max(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
  11316. :noindex:
  11317. Returns a namedtuple ``(values, indices)`` where ``values`` is the maximum
  11318. value of each row of the :attr:`input` tensor in the given dimension
  11319. :attr:`dim`. And ``indices`` is the index location of each maximum value found
  11320. (argmax).
  11321. If ``keepdim`` is ``True``, the output tensors are of the same size
  11322. as ``input`` except in the dimension ``dim`` where they are of size 1.
  11323. Otherwise, ``dim`` is squeezed (see :func:`torch.squeeze`), resulting
  11324. in the output tensors having 1 fewer dimension than ``input``.
  11325. .. note:: If there are multiple maximal values in a reduced row then
  11326. the indices of the first maximal value are returned.
  11327. Args:
  11328. input (Tensor): the input tensor.
  11329. dim (int): the dimension to reduce.
  11330. keepdim (bool): whether the output tensor has :attr:`dim` retained or not. Default: ``False``.
  11331. Keyword args:
  11332. out (tuple, optional): the result tuple of two output tensors (max, max_indices)
  11333. Example::
  11334. >>> a = torch.randn(4, 4)
  11335. >>> a
  11336. tensor([[-1.2360, -0.2942, -0.1222, 0.8475],
  11337. [ 1.1949, -1.1127, -2.2379, -0.6702],
  11338. [ 1.5717, -0.9207, 0.1297, -1.8768],
  11339. [-0.6172, 1.0036, -0.6060, -0.2432]])
  11340. >>> torch.max(a, 1)
  11341. torch.return_types.max(values=tensor([0.8475, 1.1949, 1.5717, 1.0036]), indices=tensor([3, 0, 0, 1]))
  11342. .. function:: max(input, other, *, out=None) -> Tensor
  11343. :noindex:
  11344. See :func:`torch.maximum`.
  11345. """
  11346. ...
  11347. @overload
  11348. def max(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.max:
  11349. r"""
  11350. max(input) -> Tensor
  11351. Returns the maximum value of all elements in the ``input`` tensor.
  11352. .. warning::
  11353. This function produces deterministic (sub)gradients unlike ``max(dim=0)``
  11354. Args:
  11355. input (Tensor): the input tensor.
  11356. Example::
  11357. >>> a = torch.randn(1, 3)
  11358. >>> a
  11359. tensor([[ 0.6763, 0.7445, -2.2369]])
  11360. >>> torch.max(a)
  11361. tensor(0.7445)
  11362. .. function:: max(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
  11363. :noindex:
  11364. Returns a namedtuple ``(values, indices)`` where ``values`` is the maximum
  11365. value of each row of the :attr:`input` tensor in the given dimension
  11366. :attr:`dim`. And ``indices`` is the index location of each maximum value found
  11367. (argmax).
  11368. If ``keepdim`` is ``True``, the output tensors are of the same size
  11369. as ``input`` except in the dimension ``dim`` where they are of size 1.
  11370. Otherwise, ``dim`` is squeezed (see :func:`torch.squeeze`), resulting
  11371. in the output tensors having 1 fewer dimension than ``input``.
  11372. .. note:: If there are multiple maximal values in a reduced row then
  11373. the indices of the first maximal value are returned.
  11374. Args:
  11375. input (Tensor): the input tensor.
  11376. dim (int): the dimension to reduce.
  11377. keepdim (bool): whether the output tensor has :attr:`dim` retained or not. Default: ``False``.
  11378. Keyword args:
  11379. out (tuple, optional): the result tuple of two output tensors (max, max_indices)
  11380. Example::
  11381. >>> a = torch.randn(4, 4)
  11382. >>> a
  11383. tensor([[-1.2360, -0.2942, -0.1222, 0.8475],
  11384. [ 1.1949, -1.1127, -2.2379, -0.6702],
  11385. [ 1.5717, -0.9207, 0.1297, -1.8768],
  11386. [-0.6172, 1.0036, -0.6060, -0.2432]])
  11387. >>> torch.max(a, 1)
  11388. torch.return_types.max(values=tensor([0.8475, 1.1949, 1.5717, 1.0036]), indices=tensor([3, 0, 0, 1]))
  11389. .. function:: max(input, other, *, out=None) -> Tensor
  11390. :noindex:
  11391. See :func:`torch.maximum`.
  11392. """
  11393. ...
  11394. def max_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ...
  11395. def max_pool1d_with_indices(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tuple[Tensor, Tensor]: ...
  11396. def max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ...
  11397. def max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ...
  11398. def maximum(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  11399. r"""
  11400. maximum(input, other, *, out=None) -> Tensor
  11401. Computes the element-wise maximum of :attr:`input` and :attr:`other`.
  11402. .. note::
  11403. If one of the elements being compared is a NaN, then that element is returned.
  11404. :func:`maximum` is not supported for tensors with complex dtypes.
  11405. Args:
  11406. input (Tensor): the input tensor.
  11407. other (Tensor): the second input tensor
  11408. Keyword args:
  11409. out (Tensor, optional): the output tensor.
  11410. Example::
  11411. >>> a = torch.tensor((1, 2, -1))
  11412. >>> b = torch.tensor((3, 0, 4))
  11413. >>> torch.maximum(a, b)
  11414. tensor([3, 2, 4])
  11415. """
  11416. ...
  11417. @overload
  11418. def mean(input: Tensor, *, dtype: Optional[_dtype] = None) -> Tensor:
  11419. r"""
  11420. mean(input, *, dtype=None) -> Tensor
  11421. Returns the mean value of all elements in the :attr:`input` tensor. Input must be floating point or complex.
  11422. Args:
  11423. input (Tensor):
  11424. the input tensor, either of floating point or complex dtype
  11425. Keyword args:
  11426. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  11427. If specified, the input tensor is casted to :attr:`dtype` before the operation
  11428. is performed. This is useful for preventing data type overflows. Default: None.
  11429. Example::
  11430. >>> a = torch.randn(1, 3)
  11431. >>> a
  11432. tensor([[ 0.2294, -0.5481, 1.3288]])
  11433. >>> torch.mean(a)
  11434. tensor(0.3367)
  11435. .. function:: mean(input, dim, keepdim=False, *, dtype=None, out=None) -> Tensor
  11436. :noindex:
  11437. Returns the mean value of each row of the :attr:`input` tensor in the given
  11438. dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
  11439. reduce over all of them.
  11440. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  11441. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  11442. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  11443. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  11444. Args:
  11445. input (Tensor): the input tensor.
  11446. dim (int or tuple of ints): the dimension or dimensions to reduce.
  11447. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  11448. Keyword args:
  11449. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  11450. If specified, the input tensor is casted to :attr:`dtype` before the operation
  11451. is performed. This is useful for preventing data type overflows. Default: None.
  11452. out (Tensor, optional): the output tensor.
  11453. .. seealso::
  11454. :func:`torch.nanmean` computes the mean value of `non-NaN` elements.
  11455. Example::
  11456. >>> a = torch.randn(4, 4)
  11457. >>> a
  11458. tensor([[-0.3841, 0.6320, 0.4254, -0.7384],
  11459. [-0.9644, 1.0131, -0.6549, -1.4279],
  11460. [-0.2951, -1.3350, -0.7694, 0.5600],
  11461. [ 1.0842, -0.9580, 0.3623, 0.2343]])
  11462. >>> torch.mean(a, 1)
  11463. tensor([-0.0163, -0.5085, -0.4599, 0.1807])
  11464. >>> torch.mean(a, 1, True)
  11465. tensor([[-0.0163],
  11466. [-0.5085],
  11467. [-0.4599],
  11468. [ 0.1807]])
  11469. """
  11470. ...
  11471. @overload
  11472. def mean(input: Tensor, dim: Optional[Union[_int, _size]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
  11473. r"""
  11474. mean(input, *, dtype=None) -> Tensor
  11475. Returns the mean value of all elements in the :attr:`input` tensor. Input must be floating point or complex.
  11476. Args:
  11477. input (Tensor):
  11478. the input tensor, either of floating point or complex dtype
  11479. Keyword args:
  11480. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  11481. If specified, the input tensor is casted to :attr:`dtype` before the operation
  11482. is performed. This is useful for preventing data type overflows. Default: None.
  11483. Example::
  11484. >>> a = torch.randn(1, 3)
  11485. >>> a
  11486. tensor([[ 0.2294, -0.5481, 1.3288]])
  11487. >>> torch.mean(a)
  11488. tensor(0.3367)
  11489. .. function:: mean(input, dim, keepdim=False, *, dtype=None, out=None) -> Tensor
  11490. :noindex:
  11491. Returns the mean value of each row of the :attr:`input` tensor in the given
  11492. dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
  11493. reduce over all of them.
  11494. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  11495. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  11496. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  11497. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  11498. Args:
  11499. input (Tensor): the input tensor.
  11500. dim (int or tuple of ints): the dimension or dimensions to reduce.
  11501. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  11502. Keyword args:
  11503. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  11504. If specified, the input tensor is casted to :attr:`dtype` before the operation
  11505. is performed. This is useful for preventing data type overflows. Default: None.
  11506. out (Tensor, optional): the output tensor.
  11507. .. seealso::
  11508. :func:`torch.nanmean` computes the mean value of `non-NaN` elements.
  11509. Example::
  11510. >>> a = torch.randn(4, 4)
  11511. >>> a
  11512. tensor([[-0.3841, 0.6320, 0.4254, -0.7384],
  11513. [-0.9644, 1.0131, -0.6549, -1.4279],
  11514. [-0.2951, -1.3350, -0.7694, 0.5600],
  11515. [ 1.0842, -0.9580, 0.3623, 0.2343]])
  11516. >>> torch.mean(a, 1)
  11517. tensor([-0.0163, -0.5085, -0.4599, 0.1807])
  11518. >>> torch.mean(a, 1, True)
  11519. tensor([[-0.0163],
  11520. [-0.5085],
  11521. [-0.4599],
  11522. [ 0.1807]])
  11523. """
  11524. ...
  11525. @overload
  11526. def mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
  11527. r"""
  11528. mean(input, *, dtype=None) -> Tensor
  11529. Returns the mean value of all elements in the :attr:`input` tensor. Input must be floating point or complex.
  11530. Args:
  11531. input (Tensor):
  11532. the input tensor, either of floating point or complex dtype
  11533. Keyword args:
  11534. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  11535. If specified, the input tensor is casted to :attr:`dtype` before the operation
  11536. is performed. This is useful for preventing data type overflows. Default: None.
  11537. Example::
  11538. >>> a = torch.randn(1, 3)
  11539. >>> a
  11540. tensor([[ 0.2294, -0.5481, 1.3288]])
  11541. >>> torch.mean(a)
  11542. tensor(0.3367)
  11543. .. function:: mean(input, dim, keepdim=False, *, dtype=None, out=None) -> Tensor
  11544. :noindex:
  11545. Returns the mean value of each row of the :attr:`input` tensor in the given
  11546. dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
  11547. reduce over all of them.
  11548. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  11549. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  11550. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  11551. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  11552. Args:
  11553. input (Tensor): the input tensor.
  11554. dim (int or tuple of ints): the dimension or dimensions to reduce.
  11555. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  11556. Keyword args:
  11557. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  11558. If specified, the input tensor is casted to :attr:`dtype` before the operation
  11559. is performed. This is useful for preventing data type overflows. Default: None.
  11560. out (Tensor, optional): the output tensor.
  11561. .. seealso::
  11562. :func:`torch.nanmean` computes the mean value of `non-NaN` elements.
  11563. Example::
  11564. >>> a = torch.randn(4, 4)
  11565. >>> a
  11566. tensor([[-0.3841, 0.6320, 0.4254, -0.7384],
  11567. [-0.9644, 1.0131, -0.6549, -1.4279],
  11568. [-0.2951, -1.3350, -0.7694, 0.5600],
  11569. [ 1.0842, -0.9580, 0.3623, 0.2343]])
  11570. >>> torch.mean(a, 1)
  11571. tensor([-0.0163, -0.5085, -0.4599, 0.1807])
  11572. >>> torch.mean(a, 1, True)
  11573. tensor([[-0.0163],
  11574. [-0.5085],
  11575. [-0.4599],
  11576. [ 0.1807]])
  11577. """
  11578. ...
  11579. @overload
  11580. def median(input: Tensor) -> Tensor:
  11581. r"""
  11582. median(input) -> Tensor
  11583. Returns the median of the values in :attr:`input`.
  11584. .. note::
  11585. The median is not unique for :attr:`input` tensors with an even number
  11586. of elements. In this case the lower of the two medians is returned. To
  11587. compute the mean of both medians, use :func:`torch.quantile` with ``q=0.5`` instead.
  11588. .. warning::
  11589. This function produces deterministic (sub)gradients unlike ``median(dim=0)``
  11590. Args:
  11591. input (Tensor): the input tensor.
  11592. Example::
  11593. >>> a = torch.randn(1, 3)
  11594. >>> a
  11595. tensor([[ 1.5219, -1.5212, 0.2202]])
  11596. >>> torch.median(a)
  11597. tensor(0.2202)
  11598. .. function:: median(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
  11599. :noindex:
  11600. Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input`
  11601. in the dimension :attr:`dim`, and ``indices`` contains the index of the median values found in the dimension :attr:`dim`.
  11602. By default, :attr:`dim` is the last dimension of the :attr:`input` tensor.
  11603. If :attr:`keepdim` is ``True``, the output tensors are of the same size
  11604. as :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
  11605. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
  11606. the outputs tensor having 1 fewer dimension than :attr:`input`.
  11607. .. note::
  11608. The median is not unique for :attr:`input` tensors with an even number
  11609. of elements in the dimension :attr:`dim`. In this case the lower of the
  11610. two medians is returned. To compute the mean of both medians in
  11611. :attr:`input`, use :func:`torch.quantile` with ``q=0.5`` instead.
  11612. .. warning::
  11613. ``indices`` does not necessarily contain the first occurrence of each
  11614. median value found, unless it is unique.
  11615. The exact implementation details are device-specific.
  11616. Do not expect the same result when run on CPU and GPU in general.
  11617. For the same reason do not expect the gradients to be deterministic.
  11618. Args:
  11619. input (Tensor): the input tensor.
  11620. dim (int): the dimension to reduce.
  11621. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  11622. Keyword args:
  11623. out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second
  11624. tensor, which must have dtype long, with their indices in the dimension
  11625. :attr:`dim` of :attr:`input`.
  11626. Example::
  11627. >>> a = torch.randn(4, 5)
  11628. >>> a
  11629. tensor([[ 0.2505, -0.3982, -0.9948, 0.3518, -1.3131],
  11630. [ 0.3180, -0.6993, 1.0436, 0.0438, 0.2270],
  11631. [-0.2751, 0.7303, 0.2192, 0.3321, 0.2488],
  11632. [ 1.0778, -1.9510, 0.7048, 0.4742, -0.7125]])
  11633. >>> torch.median(a, 1)
  11634. torch.return_types.median(values=tensor([-0.3982, 0.2270, 0.2488, 0.4742]), indices=tensor([1, 4, 4, 3]))
  11635. """
  11636. ...
  11637. @overload
  11638. def median(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.median:
  11639. r"""
  11640. median(input) -> Tensor
  11641. Returns the median of the values in :attr:`input`.
  11642. .. note::
  11643. The median is not unique for :attr:`input` tensors with an even number
  11644. of elements. In this case the lower of the two medians is returned. To
  11645. compute the mean of both medians, use :func:`torch.quantile` with ``q=0.5`` instead.
  11646. .. warning::
  11647. This function produces deterministic (sub)gradients unlike ``median(dim=0)``
  11648. Args:
  11649. input (Tensor): the input tensor.
  11650. Example::
  11651. >>> a = torch.randn(1, 3)
  11652. >>> a
  11653. tensor([[ 1.5219, -1.5212, 0.2202]])
  11654. >>> torch.median(a)
  11655. tensor(0.2202)
  11656. .. function:: median(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
  11657. :noindex:
  11658. Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input`
  11659. in the dimension :attr:`dim`, and ``indices`` contains the index of the median values found in the dimension :attr:`dim`.
  11660. By default, :attr:`dim` is the last dimension of the :attr:`input` tensor.
  11661. If :attr:`keepdim` is ``True``, the output tensors are of the same size
  11662. as :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
  11663. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
  11664. the outputs tensor having 1 fewer dimension than :attr:`input`.
  11665. .. note::
  11666. The median is not unique for :attr:`input` tensors with an even number
  11667. of elements in the dimension :attr:`dim`. In this case the lower of the
  11668. two medians is returned. To compute the mean of both medians in
  11669. :attr:`input`, use :func:`torch.quantile` with ``q=0.5`` instead.
  11670. .. warning::
  11671. ``indices`` does not necessarily contain the first occurrence of each
  11672. median value found, unless it is unique.
  11673. The exact implementation details are device-specific.
  11674. Do not expect the same result when run on CPU and GPU in general.
  11675. For the same reason do not expect the gradients to be deterministic.
  11676. Args:
  11677. input (Tensor): the input tensor.
  11678. dim (int): the dimension to reduce.
  11679. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  11680. Keyword args:
  11681. out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second
  11682. tensor, which must have dtype long, with their indices in the dimension
  11683. :attr:`dim` of :attr:`input`.
  11684. Example::
  11685. >>> a = torch.randn(4, 5)
  11686. >>> a
  11687. tensor([[ 0.2505, -0.3982, -0.9948, 0.3518, -1.3131],
  11688. [ 0.3180, -0.6993, 1.0436, 0.0438, 0.2270],
  11689. [-0.2751, 0.7303, 0.2192, 0.3321, 0.2488],
  11690. [ 1.0778, -1.9510, 0.7048, 0.4742, -0.7125]])
  11691. >>> torch.median(a, 1)
  11692. torch.return_types.median(values=tensor([-0.3982, 0.2270, 0.2488, 0.4742]), indices=tensor([1, 4, 4, 3]))
  11693. """
  11694. ...
  11695. @overload
  11696. def median(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.median:
  11697. r"""
  11698. median(input) -> Tensor
  11699. Returns the median of the values in :attr:`input`.
  11700. .. note::
  11701. The median is not unique for :attr:`input` tensors with an even number
  11702. of elements. In this case the lower of the two medians is returned. To
  11703. compute the mean of both medians, use :func:`torch.quantile` with ``q=0.5`` instead.
  11704. .. warning::
  11705. This function produces deterministic (sub)gradients unlike ``median(dim=0)``
  11706. Args:
  11707. input (Tensor): the input tensor.
  11708. Example::
  11709. >>> a = torch.randn(1, 3)
  11710. >>> a
  11711. tensor([[ 1.5219, -1.5212, 0.2202]])
  11712. >>> torch.median(a)
  11713. tensor(0.2202)
  11714. .. function:: median(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
  11715. :noindex:
  11716. Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input`
  11717. in the dimension :attr:`dim`, and ``indices`` contains the index of the median values found in the dimension :attr:`dim`.
  11718. By default, :attr:`dim` is the last dimension of the :attr:`input` tensor.
  11719. If :attr:`keepdim` is ``True``, the output tensors are of the same size
  11720. as :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
  11721. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
  11722. the outputs tensor having 1 fewer dimension than :attr:`input`.
  11723. .. note::
  11724. The median is not unique for :attr:`input` tensors with an even number
  11725. of elements in the dimension :attr:`dim`. In this case the lower of the
  11726. two medians is returned. To compute the mean of both medians in
  11727. :attr:`input`, use :func:`torch.quantile` with ``q=0.5`` instead.
  11728. .. warning::
  11729. ``indices`` does not necessarily contain the first occurrence of each
  11730. median value found, unless it is unique.
  11731. The exact implementation details are device-specific.
  11732. Do not expect the same result when run on CPU and GPU in general.
  11733. For the same reason do not expect the gradients to be deterministic.
  11734. Args:
  11735. input (Tensor): the input tensor.
  11736. dim (int): the dimension to reduce.
  11737. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  11738. Keyword args:
  11739. out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second
  11740. tensor, which must have dtype long, with their indices in the dimension
  11741. :attr:`dim` of :attr:`input`.
  11742. Example::
  11743. >>> a = torch.randn(4, 5)
  11744. >>> a
  11745. tensor([[ 0.2505, -0.3982, -0.9948, 0.3518, -1.3131],
  11746. [ 0.3180, -0.6993, 1.0436, 0.0438, 0.2270],
  11747. [-0.2751, 0.7303, 0.2192, 0.3321, 0.2488],
  11748. [ 1.0778, -1.9510, 0.7048, 0.4742, -0.7125]])
  11749. >>> torch.median(a, 1)
  11750. torch.return_types.median(values=tensor([-0.3982, 0.2270, 0.2488, 0.4742]), indices=tensor([1, 4, 4, 3]))
  11751. """
  11752. ...
  11753. @overload
  11754. def min(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  11755. r"""
  11756. min(input) -> Tensor
  11757. Returns the minimum value of all elements in the :attr:`input` tensor.
  11758. .. warning::
  11759. This function produces deterministic (sub)gradients unlike ``min(dim=0)``
  11760. Args:
  11761. input (Tensor): the input tensor.
  11762. Example::
  11763. >>> a = torch.randn(1, 3)
  11764. >>> a
  11765. tensor([[ 0.6750, 1.0857, 1.7197]])
  11766. >>> torch.min(a)
  11767. tensor(0.6750)
  11768. .. function:: min(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
  11769. :noindex:
  11770. Returns a namedtuple ``(values, indices)`` where ``values`` is the minimum
  11771. value of each row of the :attr:`input` tensor in the given dimension
  11772. :attr:`dim`. And ``indices`` is the index location of each minimum value found
  11773. (argmin).
  11774. If :attr:`keepdim` is ``True``, the output tensors are of the same size as
  11775. :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
  11776. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
  11777. the output tensors having 1 fewer dimension than :attr:`input`.
  11778. .. note:: If there are multiple minimal values in a reduced row then
  11779. the indices of the first minimal value are returned.
  11780. Args:
  11781. input (Tensor): the input tensor.
  11782. dim (int): the dimension to reduce.
  11783. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  11784. Keyword args:
  11785. out (tuple, optional): the tuple of two output tensors (min, min_indices)
  11786. Example::
  11787. >>> a = torch.randn(4, 4)
  11788. >>> a
  11789. tensor([[-0.6248, 1.1334, -1.1899, -0.2803],
  11790. [-1.4644, -0.2635, -0.3651, 0.6134],
  11791. [ 0.2457, 0.0384, 1.0128, 0.7015],
  11792. [-0.1153, 2.9849, 2.1458, 0.5788]])
  11793. >>> torch.min(a, 1)
  11794. torch.return_types.min(values=tensor([-1.1899, -1.4644, 0.0384, -0.1153]), indices=tensor([2, 0, 1, 0]))
  11795. .. function:: min(input, other, *, out=None) -> Tensor
  11796. :noindex:
  11797. See :func:`torch.minimum`.
  11798. """
  11799. ...
  11800. @overload
  11801. def min(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  11802. r"""
  11803. min(input) -> Tensor
  11804. Returns the minimum value of all elements in the :attr:`input` tensor.
  11805. .. warning::
  11806. This function produces deterministic (sub)gradients unlike ``min(dim=0)``
  11807. Args:
  11808. input (Tensor): the input tensor.
  11809. Example::
  11810. >>> a = torch.randn(1, 3)
  11811. >>> a
  11812. tensor([[ 0.6750, 1.0857, 1.7197]])
  11813. >>> torch.min(a)
  11814. tensor(0.6750)
  11815. .. function:: min(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
  11816. :noindex:
  11817. Returns a namedtuple ``(values, indices)`` where ``values`` is the minimum
  11818. value of each row of the :attr:`input` tensor in the given dimension
  11819. :attr:`dim`. And ``indices`` is the index location of each minimum value found
  11820. (argmin).
  11821. If :attr:`keepdim` is ``True``, the output tensors are of the same size as
  11822. :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
  11823. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
  11824. the output tensors having 1 fewer dimension than :attr:`input`.
  11825. .. note:: If there are multiple minimal values in a reduced row then
  11826. the indices of the first minimal value are returned.
  11827. Args:
  11828. input (Tensor): the input tensor.
  11829. dim (int): the dimension to reduce.
  11830. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  11831. Keyword args:
  11832. out (tuple, optional): the tuple of two output tensors (min, min_indices)
  11833. Example::
  11834. >>> a = torch.randn(4, 4)
  11835. >>> a
  11836. tensor([[-0.6248, 1.1334, -1.1899, -0.2803],
  11837. [-1.4644, -0.2635, -0.3651, 0.6134],
  11838. [ 0.2457, 0.0384, 1.0128, 0.7015],
  11839. [-0.1153, 2.9849, 2.1458, 0.5788]])
  11840. >>> torch.min(a, 1)
  11841. torch.return_types.min(values=tensor([-1.1899, -1.4644, 0.0384, -0.1153]), indices=tensor([2, 0, 1, 0]))
  11842. .. function:: min(input, other, *, out=None) -> Tensor
  11843. :noindex:
  11844. See :func:`torch.minimum`.
  11845. """
  11846. ...
  11847. @overload
  11848. def min(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.min:
  11849. r"""
  11850. min(input) -> Tensor
  11851. Returns the minimum value of all elements in the :attr:`input` tensor.
  11852. .. warning::
  11853. This function produces deterministic (sub)gradients unlike ``min(dim=0)``
  11854. Args:
  11855. input (Tensor): the input tensor.
  11856. Example::
  11857. >>> a = torch.randn(1, 3)
  11858. >>> a
  11859. tensor([[ 0.6750, 1.0857, 1.7197]])
  11860. >>> torch.min(a)
  11861. tensor(0.6750)
  11862. .. function:: min(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
  11863. :noindex:
  11864. Returns a namedtuple ``(values, indices)`` where ``values`` is the minimum
  11865. value of each row of the :attr:`input` tensor in the given dimension
  11866. :attr:`dim`. And ``indices`` is the index location of each minimum value found
  11867. (argmin).
  11868. If :attr:`keepdim` is ``True``, the output tensors are of the same size as
  11869. :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
  11870. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
  11871. the output tensors having 1 fewer dimension than :attr:`input`.
  11872. .. note:: If there are multiple minimal values in a reduced row then
  11873. the indices of the first minimal value are returned.
  11874. Args:
  11875. input (Tensor): the input tensor.
  11876. dim (int): the dimension to reduce.
  11877. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  11878. Keyword args:
  11879. out (tuple, optional): the tuple of two output tensors (min, min_indices)
  11880. Example::
  11881. >>> a = torch.randn(4, 4)
  11882. >>> a
  11883. tensor([[-0.6248, 1.1334, -1.1899, -0.2803],
  11884. [-1.4644, -0.2635, -0.3651, 0.6134],
  11885. [ 0.2457, 0.0384, 1.0128, 0.7015],
  11886. [-0.1153, 2.9849, 2.1458, 0.5788]])
  11887. >>> torch.min(a, 1)
  11888. torch.return_types.min(values=tensor([-1.1899, -1.4644, 0.0384, -0.1153]), indices=tensor([2, 0, 1, 0]))
  11889. .. function:: min(input, other, *, out=None) -> Tensor
  11890. :noindex:
  11891. See :func:`torch.minimum`.
  11892. """
  11893. ...
  11894. @overload
  11895. def min(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.min:
  11896. r"""
  11897. min(input) -> Tensor
  11898. Returns the minimum value of all elements in the :attr:`input` tensor.
  11899. .. warning::
  11900. This function produces deterministic (sub)gradients unlike ``min(dim=0)``
  11901. Args:
  11902. input (Tensor): the input tensor.
  11903. Example::
  11904. >>> a = torch.randn(1, 3)
  11905. >>> a
  11906. tensor([[ 0.6750, 1.0857, 1.7197]])
  11907. >>> torch.min(a)
  11908. tensor(0.6750)
  11909. .. function:: min(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
  11910. :noindex:
  11911. Returns a namedtuple ``(values, indices)`` where ``values`` is the minimum
  11912. value of each row of the :attr:`input` tensor in the given dimension
  11913. :attr:`dim`. And ``indices`` is the index location of each minimum value found
  11914. (argmin).
  11915. If :attr:`keepdim` is ``True``, the output tensors are of the same size as
  11916. :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
  11917. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
  11918. the output tensors having 1 fewer dimension than :attr:`input`.
  11919. .. note:: If there are multiple minimal values in a reduced row then
  11920. the indices of the first minimal value are returned.
  11921. Args:
  11922. input (Tensor): the input tensor.
  11923. dim (int): the dimension to reduce.
  11924. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  11925. Keyword args:
  11926. out (tuple, optional): the tuple of two output tensors (min, min_indices)
  11927. Example::
  11928. >>> a = torch.randn(4, 4)
  11929. >>> a
  11930. tensor([[-0.6248, 1.1334, -1.1899, -0.2803],
  11931. [-1.4644, -0.2635, -0.3651, 0.6134],
  11932. [ 0.2457, 0.0384, 1.0128, 0.7015],
  11933. [-0.1153, 2.9849, 2.1458, 0.5788]])
  11934. >>> torch.min(a, 1)
  11935. torch.return_types.min(values=tensor([-1.1899, -1.4644, 0.0384, -0.1153]), indices=tensor([2, 0, 1, 0]))
  11936. .. function:: min(input, other, *, out=None) -> Tensor
  11937. :noindex:
  11938. See :func:`torch.minimum`.
  11939. """
  11940. ...
  11941. def minimum(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  11942. r"""
  11943. minimum(input, other, *, out=None) -> Tensor
  11944. Computes the element-wise minimum of :attr:`input` and :attr:`other`.
  11945. .. note::
  11946. If one of the elements being compared is a NaN, then that element is returned.
  11947. :func:`minimum` is not supported for tensors with complex dtypes.
  11948. Args:
  11949. input (Tensor): the input tensor.
  11950. other (Tensor): the second input tensor
  11951. Keyword args:
  11952. out (Tensor, optional): the output tensor.
  11953. Example::
  11954. >>> a = torch.tensor((1, 2, -1))
  11955. >>> b = torch.tensor((3, 0, 4))
  11956. >>> torch.minimum(a, b)
  11957. tensor([1, 0, -1])
  11958. """
  11959. ...
  11960. def miopen_batch_norm(input: Tensor, weight: Tensor, bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, exponential_average_factor: _float, epsilon: _float) -> Tuple[Tensor, Tensor, Tensor]: ...
  11961. def miopen_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool) -> Tensor: ...
  11962. def miopen_convolution_add_relu(input: Tensor, weight: Tensor, z: Tensor, alpha: Optional[Union[Number, _complex]], bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
  11963. def miopen_convolution_relu(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
  11964. def miopen_convolution_transpose(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], output_padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool) -> Tensor: ...
  11965. def miopen_depthwise_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool) -> Tensor: ...
  11966. def miopen_rnn(input: Tensor, weight: Union[Tuple[Tensor, ...], List[Tensor]], weight_stride0: _int, hx: Tensor, cx: Optional[Tensor], mode: _int, hidden_size: _int, num_layers: _int, batch_first: _bool, dropout: _float, train: _bool, bidirectional: _bool, batch_sizes: _size, dropout_state: Optional[Tensor]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: ...
  11967. def mkldnn_adaptive_avg_pool2d(input: Tensor, output_size: Union[_int, _size], *, out: Optional[Tensor] = None) -> Tensor: ...
  11968. def mkldnn_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
  11969. def mkldnn_linear_backward_weights(grad_output: Tensor, input: Tensor, weight: Tensor, bias_defined: _bool) -> Tuple[Tensor, Tensor]: ...
  11970. def mkldnn_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ...
  11971. def mkldnn_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ...
  11972. def mkldnn_rnn_layer(input: Tensor, weight0: Tensor, weight1: Tensor, weight2: Tensor, weight3: Tensor, hx_: Tensor, cx_: Tensor, reverse: _bool, batch_sizes: _size, mode: _int, hidden_size: _int, num_layers: _int, has_biases: _bool, bidirectional: _bool, batch_first: _bool, train: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
  11973. def mm(input: Tensor, mat2: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  11974. r"""
  11975. mm(input, mat2, *, out=None) -> Tensor
  11976. Performs a matrix multiplication of the matrices :attr:`input` and :attr:`mat2`.
  11977. If :attr:`input` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
  11978. :math:`(m \times p)` tensor, :attr:`out` will be a :math:`(n \times p)` tensor.
  11979. .. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
  11980. For broadcasting matrix products, see :func:`torch.matmul`.
  11981. Supports strided and sparse 2-D tensors as inputs, autograd with
  11982. respect to strided inputs.
  11983. This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`.
  11984. If :attr:`out` is provided its layout will be used. Otherwise, the result
  11985. layout will be deduced from that of :attr:`input`.
  11986. .. warning::
  11987. Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported,
  11988. or may not have autograd support. If you notice missing functionality please
  11989. open a feature request.
  11990. This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
  11991. On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
  11992. Args:
  11993. input (Tensor): the first matrix to be matrix multiplied
  11994. mat2 (Tensor): the second matrix to be matrix multiplied
  11995. Keyword args:
  11996. out (Tensor, optional): the output tensor.
  11997. Example::
  11998. >>> mat1 = torch.randn(2, 3)
  11999. >>> mat2 = torch.randn(3, 3)
  12000. >>> torch.mm(mat1, mat2)
  12001. tensor([[ 0.4851, 0.5037, -0.3633],
  12002. [-0.0760, -3.6705, 2.4784]])
  12003. """
  12004. ...
  12005. @overload
  12006. def mode(input: Tensor, dim: _int = -1, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.mode:
  12007. r"""
  12008. mode(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
  12009. Returns a namedtuple ``(values, indices)`` where ``values`` is the mode
  12010. value of each row of the :attr:`input` tensor in the given dimension
  12011. :attr:`dim`, i.e. a value which appears most often
  12012. in that row, and ``indices`` is the index location of each mode value found.
  12013. By default, :attr:`dim` is the last dimension of the :attr:`input` tensor.
  12014. If :attr:`keepdim` is ``True``, the output tensors are of the same size as
  12015. :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
  12016. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting
  12017. in the output tensors having 1 fewer dimension than :attr:`input`.
  12018. .. note:: This function is not defined for ``torch.cuda.Tensor`` yet.
  12019. Args:
  12020. input (Tensor): the input tensor.
  12021. dim (int): the dimension to reduce.
  12022. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  12023. Keyword args:
  12024. out (tuple, optional): the result tuple of two output tensors (values, indices)
  12025. Example::
  12026. >>> b = torch.tensor(
  12027. [[0, 0, 0, 2, 0, 0, 2],
  12028. [0, 3, 0, 0, 2, 0, 1],
  12029. [2, 2, 2, 0, 0, 0, 3],
  12030. [2, 2, 3, 0, 1, 1, 0],
  12031. [1, 1, 0, 0, 2, 0, 2]])
  12032. >>> torch.mode(b, 0)
  12033. torch.return_types.mode(
  12034. values=tensor([0, 2, 0, 0, 0, 0, 2]),
  12035. indices=tensor([1, 3, 4, 4, 2, 4, 4]))
  12036. """
  12037. ...
  12038. @overload
  12039. def mode(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.mode:
  12040. r"""
  12041. mode(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
  12042. Returns a namedtuple ``(values, indices)`` where ``values`` is the mode
  12043. value of each row of the :attr:`input` tensor in the given dimension
  12044. :attr:`dim`, i.e. a value which appears most often
  12045. in that row, and ``indices`` is the index location of each mode value found.
  12046. By default, :attr:`dim` is the last dimension of the :attr:`input` tensor.
  12047. If :attr:`keepdim` is ``True``, the output tensors are of the same size as
  12048. :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
  12049. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting
  12050. in the output tensors having 1 fewer dimension than :attr:`input`.
  12051. .. note:: This function is not defined for ``torch.cuda.Tensor`` yet.
  12052. Args:
  12053. input (Tensor): the input tensor.
  12054. dim (int): the dimension to reduce.
  12055. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  12056. Keyword args:
  12057. out (tuple, optional): the result tuple of two output tensors (values, indices)
  12058. Example::
  12059. >>> b = torch.tensor(
  12060. [[0, 0, 0, 2, 0, 0, 2],
  12061. [0, 3, 0, 0, 2, 0, 1],
  12062. [2, 2, 2, 0, 0, 0, 3],
  12063. [2, 2, 3, 0, 1, 1, 0],
  12064. [1, 1, 0, 0, 2, 0, 2]])
  12065. >>> torch.mode(b, 0)
  12066. torch.return_types.mode(
  12067. values=tensor([0, 2, 0, 0, 0, 0, 2]),
  12068. indices=tensor([1, 3, 4, 4, 2, 4, 4]))
  12069. """
  12070. ...
  12071. @overload
  12072. def moveaxis(input: Tensor, source: _int, destination: _int) -> Tensor:
  12073. r"""
  12074. moveaxis(input, source, destination) -> Tensor
  12075. Alias for :func:`torch.movedim`.
  12076. This function is equivalent to NumPy's moveaxis function.
  12077. Examples::
  12078. >>> t = torch.randn(3,2,1)
  12079. >>> t
  12080. tensor([[[-0.3362],
  12081. [-0.8437]],
  12082. [[-0.9627],
  12083. [ 0.1727]],
  12084. [[ 0.5173],
  12085. [-0.1398]]])
  12086. >>> torch.moveaxis(t, 1, 0).shape
  12087. torch.Size([2, 3, 1])
  12088. >>> torch.moveaxis(t, 1, 0)
  12089. tensor([[[-0.3362],
  12090. [-0.9627],
  12091. [ 0.5173]],
  12092. [[-0.8437],
  12093. [ 0.1727],
  12094. [-0.1398]]])
  12095. >>> torch.moveaxis(t, (1, 2), (0, 1)).shape
  12096. torch.Size([2, 1, 3])
  12097. >>> torch.moveaxis(t, (1, 2), (0, 1))
  12098. tensor([[[-0.3362, -0.9627, 0.5173]],
  12099. [[-0.8437, 0.1727, -0.1398]]])
  12100. """
  12101. ...
  12102. @overload
  12103. def moveaxis(input: Tensor, source: _size, destination: _size) -> Tensor:
  12104. r"""
  12105. moveaxis(input, source, destination) -> Tensor
  12106. Alias for :func:`torch.movedim`.
  12107. This function is equivalent to NumPy's moveaxis function.
  12108. Examples::
  12109. >>> t = torch.randn(3,2,1)
  12110. >>> t
  12111. tensor([[[-0.3362],
  12112. [-0.8437]],
  12113. [[-0.9627],
  12114. [ 0.1727]],
  12115. [[ 0.5173],
  12116. [-0.1398]]])
  12117. >>> torch.moveaxis(t, 1, 0).shape
  12118. torch.Size([2, 3, 1])
  12119. >>> torch.moveaxis(t, 1, 0)
  12120. tensor([[[-0.3362],
  12121. [-0.9627],
  12122. [ 0.5173]],
  12123. [[-0.8437],
  12124. [ 0.1727],
  12125. [-0.1398]]])
  12126. >>> torch.moveaxis(t, (1, 2), (0, 1)).shape
  12127. torch.Size([2, 1, 3])
  12128. >>> torch.moveaxis(t, (1, 2), (0, 1))
  12129. tensor([[[-0.3362, -0.9627, 0.5173]],
  12130. [[-0.8437, 0.1727, -0.1398]]])
  12131. """
  12132. ...
  12133. @overload
  12134. def movedim(input: Tensor, source: _int, destination: _int) -> Tensor:
  12135. r"""
  12136. movedim(input, source, destination) -> Tensor
  12137. Moves the dimension(s) of :attr:`input` at the position(s) in :attr:`source`
  12138. to the position(s) in :attr:`destination`.
  12139. Other dimensions of :attr:`input` that are not explicitly moved remain in
  12140. their original order and appear at the positions not specified in :attr:`destination`.
  12141. Args:
  12142. input (Tensor): the input tensor.
  12143. source (int or tuple of ints): Original positions of the dims to move. These must be unique.
  12144. destination (int or tuple of ints): Destination positions for each of the original dims. These must also be unique.
  12145. Examples::
  12146. >>> t = torch.randn(3,2,1)
  12147. >>> t
  12148. tensor([[[-0.3362],
  12149. [-0.8437]],
  12150. [[-0.9627],
  12151. [ 0.1727]],
  12152. [[ 0.5173],
  12153. [-0.1398]]])
  12154. >>> torch.movedim(t, 1, 0).shape
  12155. torch.Size([2, 3, 1])
  12156. >>> torch.movedim(t, 1, 0)
  12157. tensor([[[-0.3362],
  12158. [-0.9627],
  12159. [ 0.5173]],
  12160. [[-0.8437],
  12161. [ 0.1727],
  12162. [-0.1398]]])
  12163. >>> torch.movedim(t, (1, 2), (0, 1)).shape
  12164. torch.Size([2, 1, 3])
  12165. >>> torch.movedim(t, (1, 2), (0, 1))
  12166. tensor([[[-0.3362, -0.9627, 0.5173]],
  12167. [[-0.8437, 0.1727, -0.1398]]])
  12168. """
  12169. ...
  12170. @overload
  12171. def movedim(input: Tensor, source: _size, destination: _size) -> Tensor:
  12172. r"""
  12173. movedim(input, source, destination) -> Tensor
  12174. Moves the dimension(s) of :attr:`input` at the position(s) in :attr:`source`
  12175. to the position(s) in :attr:`destination`.
  12176. Other dimensions of :attr:`input` that are not explicitly moved remain in
  12177. their original order and appear at the positions not specified in :attr:`destination`.
  12178. Args:
  12179. input (Tensor): the input tensor.
  12180. source (int or tuple of ints): Original positions of the dims to move. These must be unique.
  12181. destination (int or tuple of ints): Destination positions for each of the original dims. These must also be unique.
  12182. Examples::
  12183. >>> t = torch.randn(3,2,1)
  12184. >>> t
  12185. tensor([[[-0.3362],
  12186. [-0.8437]],
  12187. [[-0.9627],
  12188. [ 0.1727]],
  12189. [[ 0.5173],
  12190. [-0.1398]]])
  12191. >>> torch.movedim(t, 1, 0).shape
  12192. torch.Size([2, 3, 1])
  12193. >>> torch.movedim(t, 1, 0)
  12194. tensor([[[-0.3362],
  12195. [-0.9627],
  12196. [ 0.5173]],
  12197. [[-0.8437],
  12198. [ 0.1727],
  12199. [-0.1398]]])
  12200. >>> torch.movedim(t, (1, 2), (0, 1)).shape
  12201. torch.Size([2, 1, 3])
  12202. >>> torch.movedim(t, (1, 2), (0, 1))
  12203. tensor([[[-0.3362, -0.9627, 0.5173]],
  12204. [[-0.8437, 0.1727, -0.1398]]])
  12205. """
  12206. ...
  12207. def msort(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  12208. r"""
  12209. msort(input, *, out=None) -> Tensor
  12210. Sorts the elements of the :attr:`input` tensor along its first dimension
  12211. in ascending order by value.
  12212. .. note:: `torch.msort(t)` is equivalent to `torch.sort(t, dim=0)[0]`.
  12213. See also :func:`torch.sort`.
  12214. Args:
  12215. input (Tensor): the input tensor.
  12216. Keyword args:
  12217. out (Tensor, optional): the output tensor.
  12218. Example::
  12219. >>> t = torch.randn(3, 4)
  12220. >>> t
  12221. tensor([[-0.1321, 0.4370, -1.2631, -1.1289],
  12222. [-2.0527, -1.1250, 0.2275, 0.3077],
  12223. [-0.0881, -0.1259, -0.5495, 1.0284]])
  12224. >>> torch.msort(t)
  12225. tensor([[-2.0527, -1.1250, -1.2631, -1.1289],
  12226. [-0.1321, -0.1259, -0.5495, 0.3077],
  12227. [-0.0881, 0.4370, 0.2275, 1.0284]])
  12228. """
  12229. ...
  12230. def mul(input: Union[Tensor, Number, _complex], other: Union[Tensor, Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
  12231. r"""
  12232. mul(input, other, *, out=None) -> Tensor
  12233. Multiplies :attr:`input` by :attr:`other`.
  12234. .. math::
  12235. \text{out}_i = \text{input}_i \times \text{other}_i
  12236. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
  12237. :ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
  12238. Args:
  12239. input (Tensor): the input tensor.
  12240. other (Tensor or Number) - the tensor or number to multiply input by.
  12241. Keyword args:
  12242. out (Tensor, optional): the output tensor.
  12243. Examples::
  12244. >>> a = torch.randn(3)
  12245. >>> a
  12246. tensor([ 0.2015, -0.4255, 2.6087])
  12247. >>> torch.mul(a, 100)
  12248. tensor([ 20.1494, -42.5491, 260.8663])
  12249. >>> b = torch.randn(4, 1)
  12250. >>> b
  12251. tensor([[ 1.1207],
  12252. [-0.3137],
  12253. [ 0.0700],
  12254. [ 0.8378]])
  12255. >>> c = torch.randn(1, 4)
  12256. >>> c
  12257. tensor([[ 0.5146, 0.1216, -0.5244, 2.2382]])
  12258. >>> torch.mul(b, c)
  12259. tensor([[ 0.5767, 0.1363, -0.5877, 2.5083],
  12260. [-0.1614, -0.0382, 0.1645, -0.7021],
  12261. [ 0.0360, 0.0085, -0.0367, 0.1567],
  12262. [ 0.4312, 0.1019, -0.4394, 1.8753]])
  12263. """
  12264. ...
  12265. def multinomial(input: Tensor, num_samples: _int, replacement: _bool = False, *, generator: Optional[Generator] = None, out: Optional[Tensor] = None) -> Tensor:
  12266. r"""
  12267. multinomial(input, num_samples, replacement=False, *, generator=None, out=None) -> LongTensor
  12268. Returns a tensor where each row contains :attr:`num_samples` indices sampled
  12269. from the multinomial (a stricter definition would be multivariate,
  12270. refer to torch.distributions.multinomial.Multinomial for more details)
  12271. probability distribution located in the corresponding row
  12272. of tensor :attr:`input`.
  12273. .. note::
  12274. The rows of :attr:`input` do not need to sum to one (in which case we use
  12275. the values as weights), but must be non-negative, finite and have
  12276. a non-zero sum.
  12277. Indices are ordered from left to right according to when each was sampled
  12278. (first samples are placed in first column).
  12279. If :attr:`input` is a vector, :attr:`out` is a vector of size :attr:`num_samples`.
  12280. If :attr:`input` is a matrix with `m` rows, :attr:`out` is an matrix of shape
  12281. :math:`(m \times \text{num\_samples})`.
  12282. If replacement is ``True``, samples are drawn with replacement.
  12283. If not, they are drawn without replacement, which means that when a
  12284. sample index is drawn for a row, it cannot be drawn again for that row.
  12285. .. note::
  12286. When drawn without replacement, :attr:`num_samples` must be lower than
  12287. number of non-zero elements in :attr:`input` (or the min number of non-zero
  12288. elements in each row of :attr:`input` if it is a matrix).
  12289. Args:
  12290. input (Tensor): the input tensor containing probabilities
  12291. num_samples (int): number of samples to draw
  12292. replacement (bool, optional): whether to draw with replacement or not
  12293. Keyword args:
  12294. generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
  12295. out (Tensor, optional): the output tensor.
  12296. Example::
  12297. >>> weights = torch.tensor([0, 10, 3, 0], dtype=torch.float) # create a tensor of weights
  12298. >>> torch.multinomial(weights, 2)
  12299. tensor([1, 2])
  12300. >>> torch.multinomial(weights, 5) # ERROR!
  12301. RuntimeError: cannot sample n_sample > prob_dist.size(-1) samples without replacement
  12302. >>> torch.multinomial(weights, 4, replacement=True)
  12303. tensor([ 2, 1, 1, 1])
  12304. """
  12305. ...
  12306. @overload
  12307. def multiply(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  12308. r"""
  12309. multiply(input, other, *, out=None)
  12310. Alias for :func:`torch.mul`.
  12311. """
  12312. ...
  12313. @overload
  12314. def multiply(input: Tensor, other: Union[Number, _complex]) -> Tensor:
  12315. r"""
  12316. multiply(input, other, *, out=None)
  12317. Alias for :func:`torch.mul`.
  12318. """
  12319. ...
  12320. def mv(input: Tensor, vec: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  12321. r"""
  12322. mv(input, vec, *, out=None) -> Tensor
  12323. Performs a matrix-vector product of the matrix :attr:`input` and the vector
  12324. :attr:`vec`.
  12325. If :attr:`input` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
  12326. size :math:`m`, :attr:`out` will be 1-D of size :math:`n`.
  12327. .. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
  12328. Args:
  12329. input (Tensor): matrix to be multiplied
  12330. vec (Tensor): vector to be multiplied
  12331. Keyword args:
  12332. out (Tensor, optional): the output tensor.
  12333. Example::
  12334. >>> mat = torch.randn(2, 3)
  12335. >>> vec = torch.randn(3)
  12336. >>> torch.mv(mat, vec)
  12337. tensor([ 1.0404, -0.6361])
  12338. """
  12339. ...
  12340. def mvlgamma(input: Tensor, p: _int, *, out: Optional[Tensor] = None) -> Tensor:
  12341. r"""
  12342. mvlgamma(input, p, *, out=None) -> Tensor
  12343. Alias for :func:`torch.special.multigammaln`.
  12344. """
  12345. ...
  12346. def nan_to_num(input: Tensor, nan: Optional[_float] = None, posinf: Optional[_float] = None, neginf: Optional[_float] = None, *, out: Optional[Tensor] = None) -> Tensor:
  12347. r"""
  12348. nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None) -> Tensor
  12349. Replaces :literal:`NaN`, positive infinity, and negative infinity values in :attr:`input`
  12350. with the values specified by :attr:`nan`, :attr:`posinf`, and :attr:`neginf`, respectively.
  12351. By default, :literal:`NaN`\ s are replaced with zero, positive infinity is replaced with the
  12352. greatest finite value representable by :attr:`input`'s dtype, and negative infinity
  12353. is replaced with the least finite value representable by :attr:`input`'s dtype.
  12354. Args:
  12355. input (Tensor): the input tensor.
  12356. nan (Number, optional): the value to replace :literal:`NaN`\s with. Default is zero.
  12357. posinf (Number, optional): if a Number, the value to replace positive infinity values with.
  12358. If None, positive infinity values are replaced with the greatest finite value representable by :attr:`input`'s dtype.
  12359. Default is None.
  12360. neginf (Number, optional): if a Number, the value to replace negative infinity values with.
  12361. If None, negative infinity values are replaced with the lowest finite value representable by :attr:`input`'s dtype.
  12362. Default is None.
  12363. Keyword args:
  12364. out (Tensor, optional): the output tensor.
  12365. Example::
  12366. >>> x = torch.tensor([float('nan'), float('inf'), -float('inf'), 3.14])
  12367. >>> torch.nan_to_num(x)
  12368. tensor([ 0.0000e+00, 3.4028e+38, -3.4028e+38, 3.1400e+00])
  12369. >>> torch.nan_to_num(x, nan=2.0)
  12370. tensor([ 2.0000e+00, 3.4028e+38, -3.4028e+38, 3.1400e+00])
  12371. >>> torch.nan_to_num(x, nan=2.0, posinf=1.0)
  12372. tensor([ 2.0000e+00, 1.0000e+00, -3.4028e+38, 3.1400e+00])
  12373. """
  12374. ...
  12375. def nan_to_num_(input: Tensor, nan: Optional[_float] = None, posinf: Optional[_float] = None, neginf: Optional[_float] = None) -> Tensor: ...
  12376. def nanmean(input: Tensor, dim: Optional[Union[_int, _size]] = None, keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
  12377. r"""
  12378. nanmean(input, dim=None, keepdim=False, *, dtype=None, out=None) -> Tensor
  12379. Computes the mean of all `non-NaN` elements along the specified dimensions.
  12380. This function is identical to :func:`torch.mean` when there are no `NaN` values
  12381. in the :attr:`input` tensor. In the presence of `NaN`, :func:`torch.mean` will
  12382. propagate the `NaN` to the output whereas :func:`torch.nanmean` will ignore the
  12383. `NaN` values (`torch.nanmean(a)` is equivalent to `torch.mean(a[~a.isnan()])`).
  12384. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  12385. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  12386. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  12387. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  12388. Args:
  12389. input (Tensor): the input tensor.
  12390. dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
  12391. If ``None``, all dimensions are reduced.
  12392. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  12393. Keyword args:
  12394. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  12395. If specified, the input tensor is casted to :attr:`dtype` before the operation
  12396. is performed. This is useful for preventing data type overflows. Default: None.
  12397. out (Tensor, optional): the output tensor.
  12398. .. seealso::
  12399. :func:`torch.mean` computes the mean value, propagating `NaN`.
  12400. Example::
  12401. >>> x = torch.tensor([[torch.nan, 1, 2], [1, 2, 3]])
  12402. >>> x.mean()
  12403. tensor(nan)
  12404. >>> x.nanmean()
  12405. tensor(1.8000)
  12406. >>> x.mean(dim=0)
  12407. tensor([ nan, 1.5000, 2.5000])
  12408. >>> x.nanmean(dim=0)
  12409. tensor([1.0000, 1.5000, 2.5000])
  12410. # If all elements in the reduced dimensions are NaN then the result is NaN
  12411. >>> torch.tensor([torch.nan]).nanmean()
  12412. tensor(nan)
  12413. """
  12414. ...
  12415. @overload
  12416. def nanmedian(input: Tensor) -> Tensor:
  12417. r"""
  12418. nanmedian(input) -> Tensor
  12419. Returns the median of the values in :attr:`input`, ignoring ``NaN`` values.
  12420. This function is identical to :func:`torch.median` when there are no ``NaN`` values in :attr:`input`.
  12421. When :attr:`input` has one or more ``NaN`` values, :func:`torch.median` will always return ``NaN``,
  12422. while this function will return the median of the non-``NaN`` elements in :attr:`input`.
  12423. If all the elements in :attr:`input` are ``NaN`` it will also return ``NaN``.
  12424. Args:
  12425. input (Tensor): the input tensor.
  12426. Example::
  12427. >>> a = torch.tensor([1, float('nan'), 3, 2])
  12428. >>> a.median()
  12429. tensor(nan)
  12430. >>> a.nanmedian()
  12431. tensor(2.)
  12432. .. function:: nanmedian(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
  12433. :noindex:
  12434. Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input`
  12435. in the dimension :attr:`dim`, ignoring ``NaN`` values, and ``indices`` contains the index of the median values
  12436. found in the dimension :attr:`dim`.
  12437. This function is identical to :func:`torch.median` when there are no ``NaN`` values in a reduced row. When a reduced row has
  12438. one or more ``NaN`` values, :func:`torch.median` will always reduce it to ``NaN``, while this function will reduce it to the
  12439. median of the non-``NaN`` elements. If all the elements in a reduced row are ``NaN`` then it will be reduced to ``NaN``, too.
  12440. Args:
  12441. input (Tensor): the input tensor.
  12442. dim (int): the dimension to reduce.
  12443. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  12444. Keyword args:
  12445. out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second
  12446. tensor, which must have dtype long, with their indices in the dimension
  12447. :attr:`dim` of :attr:`input`.
  12448. Example::
  12449. >>> a = torch.tensor([[2, 3, 1], [float('nan'), 1, float('nan')]])
  12450. >>> a
  12451. tensor([[2., 3., 1.],
  12452. [nan, 1., nan]])
  12453. >>> a.median(0)
  12454. torch.return_types.median(values=tensor([nan, 1., nan]), indices=tensor([1, 1, 1]))
  12455. >>> a.nanmedian(0)
  12456. torch.return_types.nanmedian(values=tensor([2., 1., 1.]), indices=tensor([0, 1, 0]))
  12457. """
  12458. ...
  12459. @overload
  12460. def nanmedian(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.nanmedian:
  12461. r"""
  12462. nanmedian(input) -> Tensor
  12463. Returns the median of the values in :attr:`input`, ignoring ``NaN`` values.
  12464. This function is identical to :func:`torch.median` when there are no ``NaN`` values in :attr:`input`.
  12465. When :attr:`input` has one or more ``NaN`` values, :func:`torch.median` will always return ``NaN``,
  12466. while this function will return the median of the non-``NaN`` elements in :attr:`input`.
  12467. If all the elements in :attr:`input` are ``NaN`` it will also return ``NaN``.
  12468. Args:
  12469. input (Tensor): the input tensor.
  12470. Example::
  12471. >>> a = torch.tensor([1, float('nan'), 3, 2])
  12472. >>> a.median()
  12473. tensor(nan)
  12474. >>> a.nanmedian()
  12475. tensor(2.)
  12476. .. function:: nanmedian(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
  12477. :noindex:
  12478. Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input`
  12479. in the dimension :attr:`dim`, ignoring ``NaN`` values, and ``indices`` contains the index of the median values
  12480. found in the dimension :attr:`dim`.
  12481. This function is identical to :func:`torch.median` when there are no ``NaN`` values in a reduced row. When a reduced row has
  12482. one or more ``NaN`` values, :func:`torch.median` will always reduce it to ``NaN``, while this function will reduce it to the
  12483. median of the non-``NaN`` elements. If all the elements in a reduced row are ``NaN`` then it will be reduced to ``NaN``, too.
  12484. Args:
  12485. input (Tensor): the input tensor.
  12486. dim (int): the dimension to reduce.
  12487. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  12488. Keyword args:
  12489. out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second
  12490. tensor, which must have dtype long, with their indices in the dimension
  12491. :attr:`dim` of :attr:`input`.
  12492. Example::
  12493. >>> a = torch.tensor([[2, 3, 1], [float('nan'), 1, float('nan')]])
  12494. >>> a
  12495. tensor([[2., 3., 1.],
  12496. [nan, 1., nan]])
  12497. >>> a.median(0)
  12498. torch.return_types.median(values=tensor([nan, 1., nan]), indices=tensor([1, 1, 1]))
  12499. >>> a.nanmedian(0)
  12500. torch.return_types.nanmedian(values=tensor([2., 1., 1.]), indices=tensor([0, 1, 0]))
  12501. """
  12502. ...
  12503. @overload
  12504. def nanmedian(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.nanmedian:
  12505. r"""
  12506. nanmedian(input) -> Tensor
  12507. Returns the median of the values in :attr:`input`, ignoring ``NaN`` values.
  12508. This function is identical to :func:`torch.median` when there are no ``NaN`` values in :attr:`input`.
  12509. When :attr:`input` has one or more ``NaN`` values, :func:`torch.median` will always return ``NaN``,
  12510. while this function will return the median of the non-``NaN`` elements in :attr:`input`.
  12511. If all the elements in :attr:`input` are ``NaN`` it will also return ``NaN``.
  12512. Args:
  12513. input (Tensor): the input tensor.
  12514. Example::
  12515. >>> a = torch.tensor([1, float('nan'), 3, 2])
  12516. >>> a.median()
  12517. tensor(nan)
  12518. >>> a.nanmedian()
  12519. tensor(2.)
  12520. .. function:: nanmedian(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
  12521. :noindex:
  12522. Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input`
  12523. in the dimension :attr:`dim`, ignoring ``NaN`` values, and ``indices`` contains the index of the median values
  12524. found in the dimension :attr:`dim`.
  12525. This function is identical to :func:`torch.median` when there are no ``NaN`` values in a reduced row. When a reduced row has
  12526. one or more ``NaN`` values, :func:`torch.median` will always reduce it to ``NaN``, while this function will reduce it to the
  12527. median of the non-``NaN`` elements. If all the elements in a reduced row are ``NaN`` then it will be reduced to ``NaN``, too.
  12528. Args:
  12529. input (Tensor): the input tensor.
  12530. dim (int): the dimension to reduce.
  12531. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  12532. Keyword args:
  12533. out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second
  12534. tensor, which must have dtype long, with their indices in the dimension
  12535. :attr:`dim` of :attr:`input`.
  12536. Example::
  12537. >>> a = torch.tensor([[2, 3, 1], [float('nan'), 1, float('nan')]])
  12538. >>> a
  12539. tensor([[2., 3., 1.],
  12540. [nan, 1., nan]])
  12541. >>> a.median(0)
  12542. torch.return_types.median(values=tensor([nan, 1., nan]), indices=tensor([1, 1, 1]))
  12543. >>> a.nanmedian(0)
  12544. torch.return_types.nanmedian(values=tensor([2., 1., 1.]), indices=tensor([0, 1, 0]))
  12545. """
  12546. ...
  12547. @overload
  12548. def nanquantile(input: Tensor, q: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear", out: Optional[Tensor] = None) -> Tensor:
  12549. r"""
  12550. nanquantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor
  12551. This is a variant of :func:`torch.quantile` that "ignores" ``NaN`` values,
  12552. computing the quantiles :attr:`q` as if ``NaN`` values in :attr:`input` did
  12553. not exist. If all values in a reduced row are ``NaN`` then the quantiles for
  12554. that reduction will be ``NaN``. See the documentation for :func:`torch.quantile`.
  12555. Args:
  12556. input (Tensor): the input tensor.
  12557. q (float or Tensor): a scalar or 1D tensor of quantile values in the range [0, 1]
  12558. dim (int): the dimension to reduce.
  12559. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  12560. Keyword arguments:
  12561. interpolation (str): interpolation method to use when the desired quantile lies between two data points.
  12562. Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``.
  12563. Default is ``linear``.
  12564. out (Tensor, optional): the output tensor.
  12565. Example::
  12566. >>> t = torch.tensor([float('nan'), 1, 2])
  12567. >>> t.quantile(0.5)
  12568. tensor(nan)
  12569. >>> t.nanquantile(0.5)
  12570. tensor(1.5000)
  12571. >>> t = torch.tensor([[float('nan'), float('nan')], [1, 2]])
  12572. >>> t
  12573. tensor([[nan, nan],
  12574. [1., 2.]])
  12575. >>> t.nanquantile(0.5, dim=0)
  12576. tensor([1., 2.])
  12577. >>> t.nanquantile(0.5, dim=1)
  12578. tensor([ nan, 1.5000])
  12579. """
  12580. ...
  12581. @overload
  12582. def nanquantile(input: Tensor, q: _float, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear", out: Optional[Tensor] = None) -> Tensor:
  12583. r"""
  12584. nanquantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor
  12585. This is a variant of :func:`torch.quantile` that "ignores" ``NaN`` values,
  12586. computing the quantiles :attr:`q` as if ``NaN`` values in :attr:`input` did
  12587. not exist. If all values in a reduced row are ``NaN`` then the quantiles for
  12588. that reduction will be ``NaN``. See the documentation for :func:`torch.quantile`.
  12589. Args:
  12590. input (Tensor): the input tensor.
  12591. q (float or Tensor): a scalar or 1D tensor of quantile values in the range [0, 1]
  12592. dim (int): the dimension to reduce.
  12593. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  12594. Keyword arguments:
  12595. interpolation (str): interpolation method to use when the desired quantile lies between two data points.
  12596. Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``.
  12597. Default is ``linear``.
  12598. out (Tensor, optional): the output tensor.
  12599. Example::
  12600. >>> t = torch.tensor([float('nan'), 1, 2])
  12601. >>> t.quantile(0.5)
  12602. tensor(nan)
  12603. >>> t.nanquantile(0.5)
  12604. tensor(1.5000)
  12605. >>> t = torch.tensor([[float('nan'), float('nan')], [1, 2]])
  12606. >>> t
  12607. tensor([[nan, nan],
  12608. [1., 2.]])
  12609. >>> t.nanquantile(0.5, dim=0)
  12610. tensor([1., 2.])
  12611. >>> t.nanquantile(0.5, dim=1)
  12612. tensor([ nan, 1.5000])
  12613. """
  12614. ...
  12615. def nansum(input: Tensor, dim: Optional[Union[_int, _size]] = None, keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
  12616. r"""
  12617. nansum(input, *, dtype=None) -> Tensor
  12618. Returns the sum of all elements, treating Not a Numbers (NaNs) as zero.
  12619. Args:
  12620. input (Tensor): the input tensor.
  12621. Keyword args:
  12622. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  12623. If specified, the input tensor is casted to :attr:`dtype` before the operation
  12624. is performed. This is useful for preventing data type overflows. Default: None.
  12625. Example::
  12626. >>> a = torch.tensor([1., 2., float('nan'), 4.])
  12627. >>> torch.nansum(a)
  12628. tensor(7.)
  12629. .. function:: nansum(input, dim, keepdim=False, *, dtype=None) -> Tensor
  12630. :noindex:
  12631. Returns the sum of each row of the :attr:`input` tensor in the given
  12632. dimension :attr:`dim`, treating Not a Numbers (NaNs) as zero.
  12633. If :attr:`dim` is a list of dimensions, reduce over all of them.
  12634. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  12635. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  12636. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  12637. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  12638. Args:
  12639. input (Tensor): the input tensor.
  12640. dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
  12641. If ``None``, all dimensions are reduced.
  12642. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  12643. Keyword args:
  12644. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  12645. If specified, the input tensor is casted to :attr:`dtype` before the operation
  12646. is performed. This is useful for preventing data type overflows. Default: None.
  12647. Example::
  12648. >>> torch.nansum(torch.tensor([1., float("nan")]))
  12649. 1.0
  12650. >>> a = torch.tensor([[1, 2], [3., float("nan")]])
  12651. >>> torch.nansum(a)
  12652. tensor(6.)
  12653. >>> torch.nansum(a, dim=0)
  12654. tensor([4., 2.])
  12655. >>> torch.nansum(a, dim=1)
  12656. tensor([3., 3.])
  12657. """
  12658. ...
  12659. @overload
  12660. def narrow(input: Tensor, dim: _int, start: Tensor, length: Union[_int, SymInt]) -> Tensor:
  12661. r"""
  12662. narrow(input, dim, start, length) -> Tensor
  12663. Returns a new tensor that is a narrowed version of :attr:`input` tensor. The
  12664. dimension :attr:`dim` is input from :attr:`start` to ``start + length``. The
  12665. returned tensor and :attr:`input` tensor share the same underlying storage.
  12666. Args:
  12667. input (Tensor): the tensor to narrow
  12668. dim (int): the dimension along which to narrow
  12669. start (int or Tensor): index of the element to start the narrowed dimension
  12670. from. Can be negative, which means indexing from the end of `dim`. If
  12671. `Tensor`, it must be an 0-dim integral `Tensor` (bools not allowed)
  12672. length (int): length of the narrowed dimension, must be weakly positive
  12673. Example::
  12674. >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
  12675. >>> torch.narrow(x, 0, 0, 2)
  12676. tensor([[ 1, 2, 3],
  12677. [ 4, 5, 6]])
  12678. >>> torch.narrow(x, 1, 1, 2)
  12679. tensor([[ 2, 3],
  12680. [ 5, 6],
  12681. [ 8, 9]])
  12682. >>> torch.narrow(x, -1, torch.tensor(-1), 1)
  12683. tensor([[3],
  12684. [6],
  12685. [9]])
  12686. """
  12687. ...
  12688. @overload
  12689. def narrow(input: Tensor, dim: _int, start: Union[_int, SymInt], length: Union[_int, SymInt]) -> Tensor:
  12690. r"""
  12691. narrow(input, dim, start, length) -> Tensor
  12692. Returns a new tensor that is a narrowed version of :attr:`input` tensor. The
  12693. dimension :attr:`dim` is input from :attr:`start` to ``start + length``. The
  12694. returned tensor and :attr:`input` tensor share the same underlying storage.
  12695. Args:
  12696. input (Tensor): the tensor to narrow
  12697. dim (int): the dimension along which to narrow
  12698. start (int or Tensor): index of the element to start the narrowed dimension
  12699. from. Can be negative, which means indexing from the end of `dim`. If
  12700. `Tensor`, it must be an 0-dim integral `Tensor` (bools not allowed)
  12701. length (int): length of the narrowed dimension, must be weakly positive
  12702. Example::
  12703. >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
  12704. >>> torch.narrow(x, 0, 0, 2)
  12705. tensor([[ 1, 2, 3],
  12706. [ 4, 5, 6]])
  12707. >>> torch.narrow(x, 1, 1, 2)
  12708. tensor([[ 2, 3],
  12709. [ 5, 6],
  12710. [ 8, 9]])
  12711. >>> torch.narrow(x, -1, torch.tensor(-1), 1)
  12712. tensor([[3],
  12713. [6],
  12714. [9]])
  12715. """
  12716. ...
  12717. def narrow_copy(input: Tensor, dim: _int, start: Union[_int, SymInt], length: Union[_int, SymInt], *, out: Optional[Tensor] = None) -> Tensor:
  12718. r"""
  12719. narrow_copy(input, dim, start, length, *, out=None) -> Tensor
  12720. Same as :meth:`Tensor.narrow` except this returns a copy rather
  12721. than shared storage. This is primarily for sparse tensors, which
  12722. do not have a shared-storage narrow method.
  12723. Args:
  12724. input (Tensor): the tensor to narrow
  12725. dim (int): the dimension along which to narrow
  12726. start (int): index of the element to start the narrowed dimension from. Can
  12727. be negative, which means indexing from the end of `dim`
  12728. length (int): length of the narrowed dimension, must be weakly positive
  12729. Keyword args:
  12730. out (Tensor, optional): the output tensor.
  12731. Example::
  12732. >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
  12733. >>> torch.narrow_copy(x, 0, 0, 2)
  12734. tensor([[ 1, 2, 3],
  12735. [ 4, 5, 6]])
  12736. >>> torch.narrow_copy(x, 1, 1, 2)
  12737. tensor([[ 2, 3],
  12738. [ 5, 6],
  12739. [ 8, 9]])
  12740. >>> s = torch.arange(16).reshape(2, 2, 2, 2).to_sparse(2)
  12741. >>> torch.narrow_copy(s, 0, 0, 1)
  12742. tensor(indices=tensor([[0, 0],
  12743. [0, 1]]),
  12744. values=tensor([[[0, 1],
  12745. [2, 3]],
  12746. [[4, 5],
  12747. [6, 7]]]),
  12748. size=(1, 2, 2, 2), nnz=2, layout=torch.sparse_coo)
  12749. .. seealso::
  12750. :func:`torch.narrow` for a non copy variant
  12751. """
  12752. ...
  12753. def native_batch_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, momentum: _float, eps: _float, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> Tuple[Tensor, Tensor, Tensor]: ...
  12754. def native_channel_shuffle(input: Tensor, groups: Union[_int, SymInt]) -> Tensor: ...
  12755. def native_dropout(input: Tensor, p: _float, train: Optional[_bool]) -> Tuple[Tensor, Tensor]: ...
  12756. def native_group_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], N: Union[_int, SymInt], C: Union[_int, SymInt], HxW: Union[_int, SymInt], group: _int, eps: _float) -> Tuple[Tensor, Tensor, Tensor]: ...
  12757. def native_layer_norm(input: Tensor, normalized_shape: Sequence[Union[_int, SymInt]], weight: Optional[Tensor], bias: Optional[Tensor], eps: _float) -> Tuple[Tensor, Tensor, Tensor]: ...
  12758. @overload
  12759. def native_norm(input: Tensor, p: Optional[Union[Number, _complex]], dim: Union[_int, _size], keepdim: _bool, dtype: Optional[_dtype]) -> Tensor: ...
  12760. @overload
  12761. def native_norm(input: Tensor, p: Union[Number, _complex] = 2) -> Tensor: ...
  12762. @overload
  12763. def ne(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  12764. r"""
  12765. ne(input, other, *, out=None) -> Tensor
  12766. Computes :math:`\text{input} \neq \text{other}` element-wise.
  12767. The second argument can be a number or a tensor whose shape is
  12768. :ref:`broadcastable <broadcasting-semantics>` with the first argument.
  12769. Args:
  12770. input (Tensor): the tensor to compare
  12771. other (Tensor or float): the tensor or value to compare
  12772. Keyword args:
  12773. out (Tensor, optional): the output tensor.
  12774. Returns:
  12775. A boolean tensor that is True where :attr:`input` is not equal to :attr:`other` and False elsewhere
  12776. Example::
  12777. >>> torch.ne(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
  12778. tensor([[False, True], [True, False]])
  12779. """
  12780. ...
  12781. @overload
  12782. def ne(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
  12783. r"""
  12784. ne(input, other, *, out=None) -> Tensor
  12785. Computes :math:`\text{input} \neq \text{other}` element-wise.
  12786. The second argument can be a number or a tensor whose shape is
  12787. :ref:`broadcastable <broadcasting-semantics>` with the first argument.
  12788. Args:
  12789. input (Tensor): the tensor to compare
  12790. other (Tensor or float): the tensor or value to compare
  12791. Keyword args:
  12792. out (Tensor, optional): the output tensor.
  12793. Returns:
  12794. A boolean tensor that is True where :attr:`input` is not equal to :attr:`other` and False elsewhere
  12795. Example::
  12796. >>> torch.ne(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
  12797. tensor([[False, True], [True, False]])
  12798. """
  12799. ...
  12800. def neg(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  12801. r"""
  12802. neg(input, *, out=None) -> Tensor
  12803. Returns a new tensor with the negative of the elements of :attr:`input`.
  12804. .. math::
  12805. \text{out} = -1 \times \text{input}
  12806. Args:
  12807. input (Tensor): the input tensor.
  12808. Keyword args:
  12809. out (Tensor, optional): the output tensor.
  12810. Example::
  12811. >>> a = torch.randn(5)
  12812. >>> a
  12813. tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940])
  12814. >>> torch.neg(a)
  12815. tensor([-0.0090, 0.2262, 0.0682, 0.2866, -0.3940])
  12816. """
  12817. ...
  12818. def neg_(input: Tensor) -> Tensor: ...
  12819. def negative(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  12820. r"""
  12821. negative(input, *, out=None) -> Tensor
  12822. Alias for :func:`torch.neg`
  12823. """
  12824. ...
  12825. def negative_(input: Tensor) -> Tensor: ...
  12826. def nextafter(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  12827. r"""
  12828. nextafter(input, other, *, out=None) -> Tensor
  12829. Return the next floating-point value after :attr:`input` towards :attr:`other`, elementwise.
  12830. The shapes of ``input`` and ``other`` must be
  12831. :ref:`broadcastable <broadcasting-semantics>`.
  12832. Args:
  12833. input (Tensor): the first input tensor
  12834. other (Tensor): the second input tensor
  12835. Keyword args:
  12836. out (Tensor, optional): the output tensor.
  12837. Example::
  12838. >>> eps = torch.finfo(torch.float32).eps
  12839. >>> torch.nextafter(torch.tensor([1.0, 2.0]), torch.tensor([2.0, 1.0])) == torch.tensor([eps + 1, 2 - eps])
  12840. tensor([True, True])
  12841. """
  12842. ...
  12843. @overload
  12844. def nonzero(input: Tensor, *, as_tuple: Literal[False] = False, out: Optional[Tensor] = None) -> Tensor:
  12845. r"""
  12846. nonzero(input, *, out=None, as_tuple=False) -> LongTensor or tuple of LongTensors
  12847. .. note::
  12848. :func:`torch.nonzero(..., as_tuple=False) <torch.nonzero>` (default) returns a
  12849. 2-D tensor where each row is the index for a nonzero value.
  12850. :func:`torch.nonzero(..., as_tuple=True) <torch.nonzero>` returns a tuple of 1-D
  12851. index tensors, allowing for advanced indexing, so ``x[x.nonzero(as_tuple=True)]``
  12852. gives all nonzero values of tensor ``x``. Of the returned tuple, each index tensor
  12853. contains nonzero indices for a certain dimension.
  12854. See below for more details on the two behaviors.
  12855. When :attr:`input` is on CUDA, :func:`torch.nonzero() <torch.nonzero>` causes
  12856. host-device synchronization.
  12857. **When** :attr:`as_tuple` **is** ``False`` **(default)**:
  12858. Returns a tensor containing the indices of all non-zero elements of
  12859. :attr:`input`. Each row in the result contains the indices of a non-zero
  12860. element in :attr:`input`. The result is sorted lexicographically, with
  12861. the last index changing the fastest (C-style).
  12862. If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor
  12863. :attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of
  12864. non-zero elements in the :attr:`input` tensor.
  12865. **When** :attr:`as_tuple` **is** ``True``:
  12866. Returns a tuple of 1-D tensors, one for each dimension in :attr:`input`,
  12867. each containing the indices (in that dimension) of all non-zero elements of
  12868. :attr:`input` .
  12869. If :attr:`input` has :math:`n` dimensions, then the resulting tuple contains :math:`n`
  12870. tensors of size :math:`z`, where :math:`z` is the total number of
  12871. non-zero elements in the :attr:`input` tensor.
  12872. As a special case, when :attr:`input` has zero dimensions and a nonzero scalar
  12873. value, it is treated as a one-dimensional tensor with one element.
  12874. Args:
  12875. input (Tensor): the input tensor.
  12876. Keyword args:
  12877. out (LongTensor, optional): the output tensor containing indices
  12878. Returns:
  12879. LongTensor or tuple of LongTensor: If :attr:`as_tuple` is ``False``, the output
  12880. tensor containing indices. If :attr:`as_tuple` is ``True``, one 1-D tensor for
  12881. each dimension, containing the indices of each nonzero element along that
  12882. dimension.
  12883. Example::
  12884. >>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]))
  12885. tensor([[ 0],
  12886. [ 1],
  12887. [ 2],
  12888. [ 4]])
  12889. >>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0],
  12890. ... [0.0, 0.4, 0.0, 0.0],
  12891. ... [0.0, 0.0, 1.2, 0.0],
  12892. ... [0.0, 0.0, 0.0,-0.4]]))
  12893. tensor([[ 0, 0],
  12894. [ 1, 1],
  12895. [ 2, 2],
  12896. [ 3, 3]])
  12897. >>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]), as_tuple=True)
  12898. (tensor([0, 1, 2, 4]),)
  12899. >>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0],
  12900. ... [0.0, 0.4, 0.0, 0.0],
  12901. ... [0.0, 0.0, 1.2, 0.0],
  12902. ... [0.0, 0.0, 0.0,-0.4]]), as_tuple=True)
  12903. (tensor([0, 1, 2, 3]), tensor([0, 1, 2, 3]))
  12904. >>> torch.nonzero(torch.tensor(5), as_tuple=True)
  12905. (tensor([0]),)
  12906. """
  12907. ...
  12908. @overload
  12909. def nonzero(input: Tensor, *, as_tuple: Literal[True]) -> Tuple[Tensor, ...]:
  12910. r"""
  12911. nonzero(input, *, out=None, as_tuple=False) -> LongTensor or tuple of LongTensors
  12912. .. note::
  12913. :func:`torch.nonzero(..., as_tuple=False) <torch.nonzero>` (default) returns a
  12914. 2-D tensor where each row is the index for a nonzero value.
  12915. :func:`torch.nonzero(..., as_tuple=True) <torch.nonzero>` returns a tuple of 1-D
  12916. index tensors, allowing for advanced indexing, so ``x[x.nonzero(as_tuple=True)]``
  12917. gives all nonzero values of tensor ``x``. Of the returned tuple, each index tensor
  12918. contains nonzero indices for a certain dimension.
  12919. See below for more details on the two behaviors.
  12920. When :attr:`input` is on CUDA, :func:`torch.nonzero() <torch.nonzero>` causes
  12921. host-device synchronization.
  12922. **When** :attr:`as_tuple` **is** ``False`` **(default)**:
  12923. Returns a tensor containing the indices of all non-zero elements of
  12924. :attr:`input`. Each row in the result contains the indices of a non-zero
  12925. element in :attr:`input`. The result is sorted lexicographically, with
  12926. the last index changing the fastest (C-style).
  12927. If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor
  12928. :attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of
  12929. non-zero elements in the :attr:`input` tensor.
  12930. **When** :attr:`as_tuple` **is** ``True``:
  12931. Returns a tuple of 1-D tensors, one for each dimension in :attr:`input`,
  12932. each containing the indices (in that dimension) of all non-zero elements of
  12933. :attr:`input` .
  12934. If :attr:`input` has :math:`n` dimensions, then the resulting tuple contains :math:`n`
  12935. tensors of size :math:`z`, where :math:`z` is the total number of
  12936. non-zero elements in the :attr:`input` tensor.
  12937. As a special case, when :attr:`input` has zero dimensions and a nonzero scalar
  12938. value, it is treated as a one-dimensional tensor with one element.
  12939. Args:
  12940. input (Tensor): the input tensor.
  12941. Keyword args:
  12942. out (LongTensor, optional): the output tensor containing indices
  12943. Returns:
  12944. LongTensor or tuple of LongTensor: If :attr:`as_tuple` is ``False``, the output
  12945. tensor containing indices. If :attr:`as_tuple` is ``True``, one 1-D tensor for
  12946. each dimension, containing the indices of each nonzero element along that
  12947. dimension.
  12948. Example::
  12949. >>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]))
  12950. tensor([[ 0],
  12951. [ 1],
  12952. [ 2],
  12953. [ 4]])
  12954. >>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0],
  12955. ... [0.0, 0.4, 0.0, 0.0],
  12956. ... [0.0, 0.0, 1.2, 0.0],
  12957. ... [0.0, 0.0, 0.0,-0.4]]))
  12958. tensor([[ 0, 0],
  12959. [ 1, 1],
  12960. [ 2, 2],
  12961. [ 3, 3]])
  12962. >>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]), as_tuple=True)
  12963. (tensor([0, 1, 2, 4]),)
  12964. >>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0],
  12965. ... [0.0, 0.4, 0.0, 0.0],
  12966. ... [0.0, 0.0, 1.2, 0.0],
  12967. ... [0.0, 0.0, 0.0,-0.4]]), as_tuple=True)
  12968. (tensor([0, 1, 2, 3]), tensor([0, 1, 2, 3]))
  12969. >>> torch.nonzero(torch.tensor(5), as_tuple=True)
  12970. (tensor([0]),)
  12971. """
  12972. ...
  12973. def nonzero_static(input: Tensor, *, size: _int, fill_value: _int = -1, out: Optional[Tensor] = None) -> Tensor: ...
  12974. def norm_except_dim(v: Tensor, pow: _int = 2, dim: _int = 0) -> Tensor: ...
  12975. @overload
  12976. def normal(mean: Tensor, std: Tensor, *, generator: Optional[Generator] = None, out: Optional[Tensor] = None) -> Tensor:
  12977. r"""
  12978. normal(mean, std, *, generator=None, out=None) -> Tensor
  12979. Returns a tensor of random numbers drawn from separate normal distributions
  12980. whose mean and standard deviation are given.
  12981. The :attr:`mean` is a tensor with the mean of
  12982. each output element's normal distribution
  12983. The :attr:`std` is a tensor with the standard deviation of
  12984. each output element's normal distribution
  12985. The shapes of :attr:`mean` and :attr:`std` don't need to match, but the
  12986. total number of elements in each tensor need to be the same.
  12987. .. note:: When the shapes do not match, the shape of :attr:`mean`
  12988. is used as the shape for the returned output tensor
  12989. .. note:: When :attr:`std` is a CUDA tensor, this function synchronizes
  12990. its device with the CPU.
  12991. Args:
  12992. mean (Tensor): the tensor of per-element means
  12993. std (Tensor): the tensor of per-element standard deviations
  12994. Keyword args:
  12995. generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
  12996. out (Tensor, optional): the output tensor.
  12997. Example::
  12998. >>> torch.normal(mean=torch.arange(1., 11.), std=torch.arange(1, 0, -0.1))
  12999. tensor([ 1.0425, 3.5672, 2.7969, 4.2925, 4.7229, 6.2134,
  13000. 8.0505, 8.1408, 9.0563, 10.0566])
  13001. .. function:: normal(mean=0.0, std, *, out=None) -> Tensor
  13002. :noindex:
  13003. Similar to the function above, but the means are shared among all drawn
  13004. elements.
  13005. Args:
  13006. mean (float, optional): the mean for all distributions
  13007. std (Tensor): the tensor of per-element standard deviations
  13008. Keyword args:
  13009. out (Tensor, optional): the output tensor.
  13010. Example::
  13011. >>> torch.normal(mean=0.5, std=torch.arange(1., 6.))
  13012. tensor([-1.2793, -1.0732, -2.0687, 5.1177, -1.2303])
  13013. .. function:: normal(mean, std=1.0, *, out=None) -> Tensor
  13014. :noindex:
  13015. Similar to the function above, but the standard deviations are shared among
  13016. all drawn elements.
  13017. Args:
  13018. mean (Tensor): the tensor of per-element means
  13019. std (float, optional): the standard deviation for all distributions
  13020. Keyword args:
  13021. out (Tensor, optional): the output tensor
  13022. Example::
  13023. >>> torch.normal(mean=torch.arange(1., 6.))
  13024. tensor([ 1.1552, 2.6148, 2.6535, 5.8318, 4.2361])
  13025. .. function:: normal(mean, std, size, *, out=None) -> Tensor
  13026. :noindex:
  13027. Similar to the function above, but the means and standard deviations are shared
  13028. among all drawn elements. The resulting tensor has size given by :attr:`size`.
  13029. Args:
  13030. mean (float): the mean for all distributions
  13031. std (float): the standard deviation for all distributions
  13032. size (int...): a sequence of integers defining the shape of the output tensor.
  13033. Keyword args:
  13034. out (Tensor, optional): the output tensor.
  13035. Example::
  13036. >>> torch.normal(2, 3, size=(1, 4))
  13037. tensor([[-1.3987, -1.9544, 3.6048, 0.7909]])
  13038. """
  13039. ...
  13040. @overload
  13041. def normal(mean: Tensor, std: _float = 1, *, generator: Optional[Generator] = None, out: Optional[Tensor] = None) -> Tensor:
  13042. r"""
  13043. normal(mean, std, *, generator=None, out=None) -> Tensor
  13044. Returns a tensor of random numbers drawn from separate normal distributions
  13045. whose mean and standard deviation are given.
  13046. The :attr:`mean` is a tensor with the mean of
  13047. each output element's normal distribution
  13048. The :attr:`std` is a tensor with the standard deviation of
  13049. each output element's normal distribution
  13050. The shapes of :attr:`mean` and :attr:`std` don't need to match, but the
  13051. total number of elements in each tensor need to be the same.
  13052. .. note:: When the shapes do not match, the shape of :attr:`mean`
  13053. is used as the shape for the returned output tensor
  13054. .. note:: When :attr:`std` is a CUDA tensor, this function synchronizes
  13055. its device with the CPU.
  13056. Args:
  13057. mean (Tensor): the tensor of per-element means
  13058. std (Tensor): the tensor of per-element standard deviations
  13059. Keyword args:
  13060. generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
  13061. out (Tensor, optional): the output tensor.
  13062. Example::
  13063. >>> torch.normal(mean=torch.arange(1., 11.), std=torch.arange(1, 0, -0.1))
  13064. tensor([ 1.0425, 3.5672, 2.7969, 4.2925, 4.7229, 6.2134,
  13065. 8.0505, 8.1408, 9.0563, 10.0566])
  13066. .. function:: normal(mean=0.0, std, *, out=None) -> Tensor
  13067. :noindex:
  13068. Similar to the function above, but the means are shared among all drawn
  13069. elements.
  13070. Args:
  13071. mean (float, optional): the mean for all distributions
  13072. std (Tensor): the tensor of per-element standard deviations
  13073. Keyword args:
  13074. out (Tensor, optional): the output tensor.
  13075. Example::
  13076. >>> torch.normal(mean=0.5, std=torch.arange(1., 6.))
  13077. tensor([-1.2793, -1.0732, -2.0687, 5.1177, -1.2303])
  13078. .. function:: normal(mean, std=1.0, *, out=None) -> Tensor
  13079. :noindex:
  13080. Similar to the function above, but the standard deviations are shared among
  13081. all drawn elements.
  13082. Args:
  13083. mean (Tensor): the tensor of per-element means
  13084. std (float, optional): the standard deviation for all distributions
  13085. Keyword args:
  13086. out (Tensor, optional): the output tensor
  13087. Example::
  13088. >>> torch.normal(mean=torch.arange(1., 6.))
  13089. tensor([ 1.1552, 2.6148, 2.6535, 5.8318, 4.2361])
  13090. .. function:: normal(mean, std, size, *, out=None) -> Tensor
  13091. :noindex:
  13092. Similar to the function above, but the means and standard deviations are shared
  13093. among all drawn elements. The resulting tensor has size given by :attr:`size`.
  13094. Args:
  13095. mean (float): the mean for all distributions
  13096. std (float): the standard deviation for all distributions
  13097. size (int...): a sequence of integers defining the shape of the output tensor.
  13098. Keyword args:
  13099. out (Tensor, optional): the output tensor.
  13100. Example::
  13101. >>> torch.normal(2, 3, size=(1, 4))
  13102. tensor([[-1.3987, -1.9544, 3.6048, 0.7909]])
  13103. """
  13104. ...
  13105. @overload
  13106. def normal(mean: _float, std: Tensor, *, generator: Optional[Generator] = None, out: Optional[Tensor] = None) -> Tensor:
  13107. r"""
  13108. normal(mean, std, *, generator=None, out=None) -> Tensor
  13109. Returns a tensor of random numbers drawn from separate normal distributions
  13110. whose mean and standard deviation are given.
  13111. The :attr:`mean` is a tensor with the mean of
  13112. each output element's normal distribution
  13113. The :attr:`std` is a tensor with the standard deviation of
  13114. each output element's normal distribution
  13115. The shapes of :attr:`mean` and :attr:`std` don't need to match, but the
  13116. total number of elements in each tensor need to be the same.
  13117. .. note:: When the shapes do not match, the shape of :attr:`mean`
  13118. is used as the shape for the returned output tensor
  13119. .. note:: When :attr:`std` is a CUDA tensor, this function synchronizes
  13120. its device with the CPU.
  13121. Args:
  13122. mean (Tensor): the tensor of per-element means
  13123. std (Tensor): the tensor of per-element standard deviations
  13124. Keyword args:
  13125. generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
  13126. out (Tensor, optional): the output tensor.
  13127. Example::
  13128. >>> torch.normal(mean=torch.arange(1., 11.), std=torch.arange(1, 0, -0.1))
  13129. tensor([ 1.0425, 3.5672, 2.7969, 4.2925, 4.7229, 6.2134,
  13130. 8.0505, 8.1408, 9.0563, 10.0566])
  13131. .. function:: normal(mean=0.0, std, *, out=None) -> Tensor
  13132. :noindex:
  13133. Similar to the function above, but the means are shared among all drawn
  13134. elements.
  13135. Args:
  13136. mean (float, optional): the mean for all distributions
  13137. std (Tensor): the tensor of per-element standard deviations
  13138. Keyword args:
  13139. out (Tensor, optional): the output tensor.
  13140. Example::
  13141. >>> torch.normal(mean=0.5, std=torch.arange(1., 6.))
  13142. tensor([-1.2793, -1.0732, -2.0687, 5.1177, -1.2303])
  13143. .. function:: normal(mean, std=1.0, *, out=None) -> Tensor
  13144. :noindex:
  13145. Similar to the function above, but the standard deviations are shared among
  13146. all drawn elements.
  13147. Args:
  13148. mean (Tensor): the tensor of per-element means
  13149. std (float, optional): the standard deviation for all distributions
  13150. Keyword args:
  13151. out (Tensor, optional): the output tensor
  13152. Example::
  13153. >>> torch.normal(mean=torch.arange(1., 6.))
  13154. tensor([ 1.1552, 2.6148, 2.6535, 5.8318, 4.2361])
  13155. .. function:: normal(mean, std, size, *, out=None) -> Tensor
  13156. :noindex:
  13157. Similar to the function above, but the means and standard deviations are shared
  13158. among all drawn elements. The resulting tensor has size given by :attr:`size`.
  13159. Args:
  13160. mean (float): the mean for all distributions
  13161. std (float): the standard deviation for all distributions
  13162. size (int...): a sequence of integers defining the shape of the output tensor.
  13163. Keyword args:
  13164. out (Tensor, optional): the output tensor.
  13165. Example::
  13166. >>> torch.normal(2, 3, size=(1, 4))
  13167. tensor([[-1.3987, -1.9544, 3.6048, 0.7909]])
  13168. """
  13169. ...
  13170. @overload
  13171. def normal(mean: _float, std: _float, size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator] = None, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  13172. r"""
  13173. normal(mean, std, *, generator=None, out=None) -> Tensor
  13174. Returns a tensor of random numbers drawn from separate normal distributions
  13175. whose mean and standard deviation are given.
  13176. The :attr:`mean` is a tensor with the mean of
  13177. each output element's normal distribution
  13178. The :attr:`std` is a tensor with the standard deviation of
  13179. each output element's normal distribution
  13180. The shapes of :attr:`mean` and :attr:`std` don't need to match, but the
  13181. total number of elements in each tensor need to be the same.
  13182. .. note:: When the shapes do not match, the shape of :attr:`mean`
  13183. is used as the shape for the returned output tensor
  13184. .. note:: When :attr:`std` is a CUDA tensor, this function synchronizes
  13185. its device with the CPU.
  13186. Args:
  13187. mean (Tensor): the tensor of per-element means
  13188. std (Tensor): the tensor of per-element standard deviations
  13189. Keyword args:
  13190. generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
  13191. out (Tensor, optional): the output tensor.
  13192. Example::
  13193. >>> torch.normal(mean=torch.arange(1., 11.), std=torch.arange(1, 0, -0.1))
  13194. tensor([ 1.0425, 3.5672, 2.7969, 4.2925, 4.7229, 6.2134,
  13195. 8.0505, 8.1408, 9.0563, 10.0566])
  13196. .. function:: normal(mean=0.0, std, *, out=None) -> Tensor
  13197. :noindex:
  13198. Similar to the function above, but the means are shared among all drawn
  13199. elements.
  13200. Args:
  13201. mean (float, optional): the mean for all distributions
  13202. std (Tensor): the tensor of per-element standard deviations
  13203. Keyword args:
  13204. out (Tensor, optional): the output tensor.
  13205. Example::
  13206. >>> torch.normal(mean=0.5, std=torch.arange(1., 6.))
  13207. tensor([-1.2793, -1.0732, -2.0687, 5.1177, -1.2303])
  13208. .. function:: normal(mean, std=1.0, *, out=None) -> Tensor
  13209. :noindex:
  13210. Similar to the function above, but the standard deviations are shared among
  13211. all drawn elements.
  13212. Args:
  13213. mean (Tensor): the tensor of per-element means
  13214. std (float, optional): the standard deviation for all distributions
  13215. Keyword args:
  13216. out (Tensor, optional): the output tensor
  13217. Example::
  13218. >>> torch.normal(mean=torch.arange(1., 6.))
  13219. tensor([ 1.1552, 2.6148, 2.6535, 5.8318, 4.2361])
  13220. .. function:: normal(mean, std, size, *, out=None) -> Tensor
  13221. :noindex:
  13222. Similar to the function above, but the means and standard deviations are shared
  13223. among all drawn elements. The resulting tensor has size given by :attr:`size`.
  13224. Args:
  13225. mean (float): the mean for all distributions
  13226. std (float): the standard deviation for all distributions
  13227. size (int...): a sequence of integers defining the shape of the output tensor.
  13228. Keyword args:
  13229. out (Tensor, optional): the output tensor.
  13230. Example::
  13231. >>> torch.normal(2, 3, size=(1, 4))
  13232. tensor([[-1.3987, -1.9544, 3.6048, 0.7909]])
  13233. """
  13234. ...
  13235. @overload
  13236. def not_equal(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  13237. r"""
  13238. not_equal(input, other, *, out=None) -> Tensor
  13239. Alias for :func:`torch.ne`.
  13240. """
  13241. ...
  13242. @overload
  13243. def not_equal(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
  13244. r"""
  13245. not_equal(input, other, *, out=None) -> Tensor
  13246. Alias for :func:`torch.ne`.
  13247. """
  13248. ...
  13249. @overload
  13250. def nuclear_norm(input: Tensor, dim: Union[_int, _size], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ...
  13251. @overload
  13252. def nuclear_norm(input: Tensor, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ...
  13253. def numel(self: Tensor) -> _int:
  13254. r"""
  13255. numel(input) -> int
  13256. Returns the total number of elements in the :attr:`input` tensor.
  13257. Args:
  13258. input (Tensor): the input tensor.
  13259. Example::
  13260. >>> a = torch.randn(1, 2, 3, 4, 5)
  13261. >>> torch.numel(a)
  13262. 120
  13263. >>> a = torch.zeros(4,4)
  13264. >>> torch.numel(a)
  13265. 16
  13266. """
  13267. ...
  13268. @overload
  13269. def ones(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  13270. r"""
  13271. ones(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  13272. Returns a tensor filled with the scalar value `1`, with the shape defined
  13273. by the variable argument :attr:`size`.
  13274. Args:
  13275. size (int...): a sequence of integers defining the shape of the output tensor.
  13276. Can be a variable number of arguments or a collection like a list or tuple.
  13277. Keyword arguments:
  13278. out (Tensor, optional): the output tensor.
  13279. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  13280. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  13281. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  13282. Default: ``torch.strided``.
  13283. device (:class:`torch.device`, optional): the desired device of returned tensor.
  13284. Default: if ``None``, uses the current device for the default tensor type
  13285. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  13286. for CPU tensor types and the current CUDA device for CUDA tensor types.
  13287. requires_grad (bool, optional): If autograd should record operations on the
  13288. returned tensor. Default: ``False``.
  13289. Example::
  13290. >>> torch.ones(2, 3)
  13291. tensor([[ 1., 1., 1.],
  13292. [ 1., 1., 1.]])
  13293. >>> torch.ones(5)
  13294. tensor([ 1., 1., 1., 1., 1.])
  13295. """
  13296. ...
  13297. @overload
  13298. def ones(*size: _int, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  13299. r"""
  13300. ones(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  13301. Returns a tensor filled with the scalar value `1`, with the shape defined
  13302. by the variable argument :attr:`size`.
  13303. Args:
  13304. size (int...): a sequence of integers defining the shape of the output tensor.
  13305. Can be a variable number of arguments or a collection like a list or tuple.
  13306. Keyword arguments:
  13307. out (Tensor, optional): the output tensor.
  13308. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  13309. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  13310. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  13311. Default: ``torch.strided``.
  13312. device (:class:`torch.device`, optional): the desired device of returned tensor.
  13313. Default: if ``None``, uses the current device for the default tensor type
  13314. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  13315. for CPU tensor types and the current CUDA device for CUDA tensor types.
  13316. requires_grad (bool, optional): If autograd should record operations on the
  13317. returned tensor. Default: ``False``.
  13318. Example::
  13319. >>> torch.ones(2, 3)
  13320. tensor([[ 1., 1., 1.],
  13321. [ 1., 1., 1.]])
  13322. >>> torch.ones(5)
  13323. tensor([ 1., 1., 1., 1., 1.])
  13324. """
  13325. ...
  13326. @overload
  13327. def ones(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  13328. r"""
  13329. ones(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  13330. Returns a tensor filled with the scalar value `1`, with the shape defined
  13331. by the variable argument :attr:`size`.
  13332. Args:
  13333. size (int...): a sequence of integers defining the shape of the output tensor.
  13334. Can be a variable number of arguments or a collection like a list or tuple.
  13335. Keyword arguments:
  13336. out (Tensor, optional): the output tensor.
  13337. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  13338. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  13339. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  13340. Default: ``torch.strided``.
  13341. device (:class:`torch.device`, optional): the desired device of returned tensor.
  13342. Default: if ``None``, uses the current device for the default tensor type
  13343. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  13344. for CPU tensor types and the current CUDA device for CUDA tensor types.
  13345. requires_grad (bool, optional): If autograd should record operations on the
  13346. returned tensor. Default: ``False``.
  13347. Example::
  13348. >>> torch.ones(2, 3)
  13349. tensor([[ 1., 1., 1.],
  13350. [ 1., 1., 1.]])
  13351. >>> torch.ones(5)
  13352. tensor([ 1., 1., 1., 1., 1.])
  13353. """
  13354. ...
  13355. @overload
  13356. def ones(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  13357. r"""
  13358. ones(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  13359. Returns a tensor filled with the scalar value `1`, with the shape defined
  13360. by the variable argument :attr:`size`.
  13361. Args:
  13362. size (int...): a sequence of integers defining the shape of the output tensor.
  13363. Can be a variable number of arguments or a collection like a list or tuple.
  13364. Keyword arguments:
  13365. out (Tensor, optional): the output tensor.
  13366. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  13367. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  13368. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  13369. Default: ``torch.strided``.
  13370. device (:class:`torch.device`, optional): the desired device of returned tensor.
  13371. Default: if ``None``, uses the current device for the default tensor type
  13372. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  13373. for CPU tensor types and the current CUDA device for CUDA tensor types.
  13374. requires_grad (bool, optional): If autograd should record operations on the
  13375. returned tensor. Default: ``False``.
  13376. Example::
  13377. >>> torch.ones(2, 3)
  13378. tensor([[ 1., 1., 1.],
  13379. [ 1., 1., 1.]])
  13380. >>> torch.ones(5)
  13381. tensor([ 1., 1., 1., 1., 1.])
  13382. """
  13383. ...
  13384. def ones_like(input: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  13385. r"""
  13386. ones_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
  13387. Returns a tensor filled with the scalar value `1`, with the same size as
  13388. :attr:`input`. ``torch.ones_like(input)`` is equivalent to
  13389. ``torch.ones(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
  13390. .. warning::
  13391. As of 0.4, this function does not support an :attr:`out` keyword. As an alternative,
  13392. the old ``torch.ones_like(input, out=output)`` is equivalent to
  13393. ``torch.ones(input.size(), out=output)``.
  13394. Args:
  13395. input (Tensor): the size of :attr:`input` will determine size of the output tensor.
  13396. Keyword arguments:
  13397. dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
  13398. Default: if ``None``, defaults to the dtype of :attr:`input`.
  13399. layout (:class:`torch.layout`, optional): the desired layout of returned tensor.
  13400. Default: if ``None``, defaults to the layout of :attr:`input`.
  13401. device (:class:`torch.device`, optional): the desired device of returned tensor.
  13402. Default: if ``None``, defaults to the device of :attr:`input`.
  13403. requires_grad (bool, optional): If autograd should record operations on the
  13404. returned tensor. Default: ``False``.
  13405. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  13406. returned Tensor. Default: ``torch.preserve_format``.
  13407. Example::
  13408. >>> input = torch.empty(2, 3)
  13409. >>> torch.ones_like(input)
  13410. tensor([[ 1., 1., 1.],
  13411. [ 1., 1., 1.]])
  13412. """
  13413. ...
  13414. def orgqr(input: Tensor, input2: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  13415. r"""
  13416. orgqr(input, tau) -> Tensor
  13417. Alias for :func:`torch.linalg.householder_product`.
  13418. """
  13419. ...
  13420. def ormqr(input: Tensor, input2: Tensor, input3: Tensor, left: _bool = True, transpose: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
  13421. r"""
  13422. ormqr(input, tau, other, left=True, transpose=False, *, out=None) -> Tensor
  13423. Computes the matrix-matrix multiplication of a product of Householder matrices with a general matrix.
  13424. Multiplies a :math:`m \times n` matrix `C` (given by :attr:`other`) with a matrix `Q`,
  13425. where `Q` is represented using Householder reflectors `(input, tau)`.
  13426. See `Representation of Orthogonal or Unitary Matrices`_ for further details.
  13427. If :attr:`left` is `True` then `op(Q)` times `C` is computed, otherwise the result is `C` times `op(Q)`.
  13428. When :attr:`left` is `True`, the implicit matrix `Q` has size :math:`m \times m`.
  13429. It has size :math:`n \times n` otherwise.
  13430. If :attr:`transpose` is `True` then `op` is the conjugate transpose operation, otherwise it's a no-op.
  13431. Supports inputs of float, double, cfloat and cdouble dtypes.
  13432. Also supports batched inputs, and, if the input is batched, the output is batched with the same dimensions.
  13433. .. seealso::
  13434. :func:`torch.geqrf` can be used to form the Householder representation `(input, tau)` of matrix `Q`
  13435. from the QR decomposition.
  13436. .. note::
  13437. This function supports backward but it is only fast when ``(input, tau)`` do not require gradients
  13438. and/or ``tau.size(-1)`` is very small.
  13439. ``
  13440. Args:
  13441. input (Tensor): tensor of shape `(*, mn, k)` where `*` is zero or more batch dimensions
  13442. and `mn` equals to `m` or `n` depending on the :attr:`left`.
  13443. tau (Tensor): tensor of shape `(*, min(mn, k))` where `*` is zero or more batch dimensions.
  13444. other (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
  13445. left (bool): controls the order of multiplication.
  13446. transpose (bool): controls whether the matrix `Q` is conjugate transposed or not.
  13447. Keyword args:
  13448. out (Tensor, optional): the output Tensor. Ignored if `None`. Default: `None`.
  13449. .. _Representation of Orthogonal or Unitary Matrices:
  13450. https://www.netlib.org/lapack/lug/node128.html
  13451. """
  13452. ...
  13453. def outer(input: Tensor, vec2: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  13454. r"""
  13455. outer(input, vec2, *, out=None) -> Tensor
  13456. Outer product of :attr:`input` and :attr:`vec2`.
  13457. If :attr:`input` is a vector of size :math:`n` and :attr:`vec2` is a vector of
  13458. size :math:`m`, then :attr:`out` must be a matrix of size :math:`(n \times m)`.
  13459. .. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
  13460. Args:
  13461. input (Tensor): 1-D input vector
  13462. vec2 (Tensor): 1-D input vector
  13463. Keyword args:
  13464. out (Tensor, optional): optional output matrix
  13465. Example::
  13466. >>> v1 = torch.arange(1., 5.)
  13467. >>> v2 = torch.arange(1., 4.)
  13468. >>> torch.outer(v1, v2)
  13469. tensor([[ 1., 2., 3.],
  13470. [ 2., 4., 6.],
  13471. [ 3., 6., 9.],
  13472. [ 4., 8., 12.]])
  13473. """
  13474. ...
  13475. def pairwise_distance(x1: Tensor, x2: Tensor, p: _float = 2, eps: _float = 1e-06, keepdim: _bool = False) -> Tensor: ...
  13476. def pdist(input: Tensor, p: _float = 2) -> Tensor: ...
  13477. def permute(input: Tensor, dims: _size) -> Tensor:
  13478. r"""
  13479. permute(input, dims) -> Tensor
  13480. Returns a view of the original tensor :attr:`input` with its dimensions permuted.
  13481. Args:
  13482. input (Tensor): the input tensor.
  13483. dims (tuple of int): The desired ordering of dimensions
  13484. Example:
  13485. >>> x = torch.randn(2, 3, 5)
  13486. >>> x.size()
  13487. torch.Size([2, 3, 5])
  13488. >>> torch.permute(x, (2, 0, 1)).size()
  13489. torch.Size([5, 2, 3])
  13490. """
  13491. ...
  13492. def permute_copy(input: Tensor, dims: _size, *, out: Optional[Tensor] = None) -> Tensor:
  13493. r"""
  13494. Performs the same operation as :func:`torch.permute`, but all output tensors
  13495. are freshly created instead of aliasing the input.
  13496. """
  13497. ...
  13498. def pinverse(input: Tensor, rcond: _float = 1e-15) -> Tensor:
  13499. r"""
  13500. pinverse(input, rcond=1e-15) -> Tensor
  13501. Alias for :func:`torch.linalg.pinv`
  13502. """
  13503. ...
  13504. def pixel_shuffle(input: Tensor, upscale_factor: _int) -> Tensor: ...
  13505. def pixel_unshuffle(input: Tensor, downscale_factor: _int) -> Tensor: ...
  13506. def poisson(input: Tensor, generator: Optional[Generator] = None) -> Tensor:
  13507. r"""
  13508. poisson(input, generator=None) -> Tensor
  13509. Returns a tensor of the same size as :attr:`input` with each element
  13510. sampled from a Poisson distribution with rate parameter given by the corresponding
  13511. element in :attr:`input` i.e.,
  13512. .. math::
  13513. \text{out}_i \sim \text{Poisson}(\text{input}_i)
  13514. :attr:`input` must be non-negative.
  13515. Args:
  13516. input (Tensor): the input tensor containing the rates of the Poisson distribution
  13517. Keyword args:
  13518. generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
  13519. Example::
  13520. >>> rates = torch.rand(4, 4) * 5 # rate parameter between 0 and 5
  13521. >>> torch.poisson(rates)
  13522. tensor([[9., 1., 3., 5.],
  13523. [8., 6., 6., 0.],
  13524. [0., 4., 5., 3.],
  13525. [2., 1., 4., 2.]])
  13526. """
  13527. ...
  13528. def poisson_nll_loss(input: Tensor, target: Tensor, log_input: _bool, full: _bool, eps: _float, reduction: _int) -> Tensor: ...
  13529. def polar(abs: Tensor, angle: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  13530. r"""
  13531. polar(abs, angle, *, out=None) -> Tensor
  13532. Constructs a complex tensor whose elements are Cartesian coordinates
  13533. corresponding to the polar coordinates with absolute value :attr:`abs` and angle
  13534. :attr:`angle`.
  13535. .. math::
  13536. \text{out} = \text{abs} \cdot \cos(\text{angle}) + \text{abs} \cdot \sin(\text{angle}) \cdot j
  13537. .. note::
  13538. `torch.polar` is similar to
  13539. `std::polar <https://en.cppreference.com/w/cpp/numeric/complex/polar>`_
  13540. and does not compute the polar decomposition
  13541. of a complex tensor like Python's `cmath.polar` and SciPy's `linalg.polar` do.
  13542. The behavior of this function is undefined if `abs` is negative or NaN, or if `angle` is
  13543. infinite.
  13544. Args:
  13545. abs (Tensor): The absolute value the complex tensor. Must be float or double.
  13546. angle (Tensor): The angle of the complex tensor. Must be same dtype as
  13547. :attr:`abs`.
  13548. Keyword args:
  13549. out (Tensor): If the inputs are ``torch.float32``, must be
  13550. ``torch.complex64``. If the inputs are ``torch.float64``, must be
  13551. ``torch.complex128``.
  13552. Example::
  13553. >>> import numpy as np
  13554. >>> abs = torch.tensor([1, 2], dtype=torch.float64)
  13555. >>> angle = torch.tensor([np.pi / 2, 5 * np.pi / 4], dtype=torch.float64)
  13556. >>> z = torch.polar(abs, angle)
  13557. >>> z
  13558. tensor([(0.0000+1.0000j), (-1.4142-1.4142j)], dtype=torch.complex128)
  13559. """
  13560. ...
  13561. def polygamma(n: _int, input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  13562. r"""
  13563. polygamma(n, input, *, out=None) -> Tensor
  13564. Alias for :func:`torch.special.polygamma`.
  13565. """
  13566. ...
  13567. def positive(input: Tensor) -> Tensor:
  13568. r"""
  13569. positive(input) -> Tensor
  13570. Returns :attr:`input`.
  13571. Throws a runtime error if :attr:`input` is a bool tensor.
  13572. Args:
  13573. input (Tensor): the input tensor.
  13574. Example::
  13575. >>> t = torch.randn(5)
  13576. >>> t
  13577. tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940])
  13578. >>> torch.positive(t)
  13579. tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940])
  13580. """
  13581. ...
  13582. @overload
  13583. def pow(input: Tensor, exponent: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  13584. r"""
  13585. pow(input, exponent, *, out=None) -> Tensor
  13586. Takes the power of each element in :attr:`input` with :attr:`exponent` and
  13587. returns a tensor with the result.
  13588. :attr:`exponent` can be either a single ``float`` number or a `Tensor`
  13589. with the same number of elements as :attr:`input`.
  13590. When :attr:`exponent` is a scalar value, the operation applied is:
  13591. .. math::
  13592. \text{out}_i = x_i ^ \text{exponent}
  13593. When :attr:`exponent` is a tensor, the operation applied is:
  13594. .. math::
  13595. \text{out}_i = x_i ^ {\text{exponent}_i}
  13596. When :attr:`exponent` is a tensor, the shapes of :attr:`input`
  13597. and :attr:`exponent` must be :ref:`broadcastable <broadcasting-semantics>`.
  13598. Args:
  13599. input (Tensor): the input tensor.
  13600. exponent (float or tensor): the exponent value
  13601. Keyword args:
  13602. out (Tensor, optional): the output tensor.
  13603. Example::
  13604. >>> a = torch.randn(4)
  13605. >>> a
  13606. tensor([ 0.4331, 1.2475, 0.6834, -0.2791])
  13607. >>> torch.pow(a, 2)
  13608. tensor([ 0.1875, 1.5561, 0.4670, 0.0779])
  13609. >>> exp = torch.arange(1., 5.)
  13610. >>> a = torch.arange(1., 5.)
  13611. >>> a
  13612. tensor([ 1., 2., 3., 4.])
  13613. >>> exp
  13614. tensor([ 1., 2., 3., 4.])
  13615. >>> torch.pow(a, exp)
  13616. tensor([ 1., 4., 27., 256.])
  13617. .. function:: pow(self, exponent, *, out=None) -> Tensor
  13618. :noindex:
  13619. :attr:`self` is a scalar ``float`` value, and :attr:`exponent` is a tensor.
  13620. The returned tensor :attr:`out` is of the same shape as :attr:`exponent`
  13621. The operation applied is:
  13622. .. math::
  13623. \text{out}_i = \text{self} ^ {\text{exponent}_i}
  13624. Args:
  13625. self (float): the scalar base value for the power operation
  13626. exponent (Tensor): the exponent tensor
  13627. Keyword args:
  13628. out (Tensor, optional): the output tensor.
  13629. Example::
  13630. >>> exp = torch.arange(1., 5.)
  13631. >>> base = 2
  13632. >>> torch.pow(base, exp)
  13633. tensor([ 2., 4., 8., 16.])
  13634. """
  13635. ...
  13636. @overload
  13637. def pow(self: Union[Number, _complex], exponent: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  13638. r"""
  13639. pow(input, exponent, *, out=None) -> Tensor
  13640. Takes the power of each element in :attr:`input` with :attr:`exponent` and
  13641. returns a tensor with the result.
  13642. :attr:`exponent` can be either a single ``float`` number or a `Tensor`
  13643. with the same number of elements as :attr:`input`.
  13644. When :attr:`exponent` is a scalar value, the operation applied is:
  13645. .. math::
  13646. \text{out}_i = x_i ^ \text{exponent}
  13647. When :attr:`exponent` is a tensor, the operation applied is:
  13648. .. math::
  13649. \text{out}_i = x_i ^ {\text{exponent}_i}
  13650. When :attr:`exponent` is a tensor, the shapes of :attr:`input`
  13651. and :attr:`exponent` must be :ref:`broadcastable <broadcasting-semantics>`.
  13652. Args:
  13653. input (Tensor): the input tensor.
  13654. exponent (float or tensor): the exponent value
  13655. Keyword args:
  13656. out (Tensor, optional): the output tensor.
  13657. Example::
  13658. >>> a = torch.randn(4)
  13659. >>> a
  13660. tensor([ 0.4331, 1.2475, 0.6834, -0.2791])
  13661. >>> torch.pow(a, 2)
  13662. tensor([ 0.1875, 1.5561, 0.4670, 0.0779])
  13663. >>> exp = torch.arange(1., 5.)
  13664. >>> a = torch.arange(1., 5.)
  13665. >>> a
  13666. tensor([ 1., 2., 3., 4.])
  13667. >>> exp
  13668. tensor([ 1., 2., 3., 4.])
  13669. >>> torch.pow(a, exp)
  13670. tensor([ 1., 4., 27., 256.])
  13671. .. function:: pow(self, exponent, *, out=None) -> Tensor
  13672. :noindex:
  13673. :attr:`self` is a scalar ``float`` value, and :attr:`exponent` is a tensor.
  13674. The returned tensor :attr:`out` is of the same shape as :attr:`exponent`
  13675. The operation applied is:
  13676. .. math::
  13677. \text{out}_i = \text{self} ^ {\text{exponent}_i}
  13678. Args:
  13679. self (float): the scalar base value for the power operation
  13680. exponent (Tensor): the exponent tensor
  13681. Keyword args:
  13682. out (Tensor, optional): the output tensor.
  13683. Example::
  13684. >>> exp = torch.arange(1., 5.)
  13685. >>> base = 2
  13686. >>> torch.pow(base, exp)
  13687. tensor([ 2., 4., 8., 16.])
  13688. """
  13689. ...
  13690. @overload
  13691. def pow(input: Tensor, exponent: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
  13692. r"""
  13693. pow(input, exponent, *, out=None) -> Tensor
  13694. Takes the power of each element in :attr:`input` with :attr:`exponent` and
  13695. returns a tensor with the result.
  13696. :attr:`exponent` can be either a single ``float`` number or a `Tensor`
  13697. with the same number of elements as :attr:`input`.
  13698. When :attr:`exponent` is a scalar value, the operation applied is:
  13699. .. math::
  13700. \text{out}_i = x_i ^ \text{exponent}
  13701. When :attr:`exponent` is a tensor, the operation applied is:
  13702. .. math::
  13703. \text{out}_i = x_i ^ {\text{exponent}_i}
  13704. When :attr:`exponent` is a tensor, the shapes of :attr:`input`
  13705. and :attr:`exponent` must be :ref:`broadcastable <broadcasting-semantics>`.
  13706. Args:
  13707. input (Tensor): the input tensor.
  13708. exponent (float or tensor): the exponent value
  13709. Keyword args:
  13710. out (Tensor, optional): the output tensor.
  13711. Example::
  13712. >>> a = torch.randn(4)
  13713. >>> a
  13714. tensor([ 0.4331, 1.2475, 0.6834, -0.2791])
  13715. >>> torch.pow(a, 2)
  13716. tensor([ 0.1875, 1.5561, 0.4670, 0.0779])
  13717. >>> exp = torch.arange(1., 5.)
  13718. >>> a = torch.arange(1., 5.)
  13719. >>> a
  13720. tensor([ 1., 2., 3., 4.])
  13721. >>> exp
  13722. tensor([ 1., 2., 3., 4.])
  13723. >>> torch.pow(a, exp)
  13724. tensor([ 1., 4., 27., 256.])
  13725. .. function:: pow(self, exponent, *, out=None) -> Tensor
  13726. :noindex:
  13727. :attr:`self` is a scalar ``float`` value, and :attr:`exponent` is a tensor.
  13728. The returned tensor :attr:`out` is of the same shape as :attr:`exponent`
  13729. The operation applied is:
  13730. .. math::
  13731. \text{out}_i = \text{self} ^ {\text{exponent}_i}
  13732. Args:
  13733. self (float): the scalar base value for the power operation
  13734. exponent (Tensor): the exponent tensor
  13735. Keyword args:
  13736. out (Tensor, optional): the output tensor.
  13737. Example::
  13738. >>> exp = torch.arange(1., 5.)
  13739. >>> base = 2
  13740. >>> torch.pow(base, exp)
  13741. tensor([ 2., 4., 8., 16.])
  13742. """
  13743. ...
  13744. def prelu(input: Tensor, weight: Tensor) -> Tensor: ...
  13745. @overload
  13746. def prod(input: Tensor, *, dtype: Optional[_dtype] = None) -> Tensor:
  13747. r"""
  13748. prod(input, *, dtype=None) -> Tensor
  13749. Returns the product of all elements in the :attr:`input` tensor.
  13750. Args:
  13751. input (Tensor): the input tensor.
  13752. Keyword args:
  13753. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  13754. If specified, the input tensor is casted to :attr:`dtype` before the operation
  13755. is performed. This is useful for preventing data type overflows. Default: None.
  13756. Example::
  13757. >>> a = torch.randn(1, 3)
  13758. >>> a
  13759. tensor([[-0.8020, 0.5428, -1.5854]])
  13760. >>> torch.prod(a)
  13761. tensor(0.6902)
  13762. .. function:: prod(input, dim, keepdim=False, *, dtype=None) -> Tensor
  13763. :noindex:
  13764. Returns the product of each row of the :attr:`input` tensor in the given
  13765. dimension :attr:`dim`.
  13766. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  13767. as :attr:`input` except in the dimension :attr:`dim` where it is of size 1.
  13768. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
  13769. the output tensor having 1 fewer dimension than :attr:`input`.
  13770. Args:
  13771. input (Tensor): the input tensor.
  13772. dim (int): the dimension to reduce.
  13773. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  13774. Keyword args:
  13775. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  13776. If specified, the input tensor is casted to :attr:`dtype` before the operation
  13777. is performed. This is useful for preventing data type overflows. Default: None.
  13778. Example::
  13779. >>> a = torch.randn(4, 2)
  13780. >>> a
  13781. tensor([[ 0.5261, -0.3837],
  13782. [ 1.1857, -0.2498],
  13783. [-1.1646, 0.0705],
  13784. [ 1.1131, -1.0629]])
  13785. >>> torch.prod(a, 1)
  13786. tensor([-0.2018, -0.2962, -0.0821, -1.1831])
  13787. """
  13788. ...
  13789. @overload
  13790. def prod(input: Tensor, dim: _int, keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
  13791. r"""
  13792. prod(input, *, dtype=None) -> Tensor
  13793. Returns the product of all elements in the :attr:`input` tensor.
  13794. Args:
  13795. input (Tensor): the input tensor.
  13796. Keyword args:
  13797. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  13798. If specified, the input tensor is casted to :attr:`dtype` before the operation
  13799. is performed. This is useful for preventing data type overflows. Default: None.
  13800. Example::
  13801. >>> a = torch.randn(1, 3)
  13802. >>> a
  13803. tensor([[-0.8020, 0.5428, -1.5854]])
  13804. >>> torch.prod(a)
  13805. tensor(0.6902)
  13806. .. function:: prod(input, dim, keepdim=False, *, dtype=None) -> Tensor
  13807. :noindex:
  13808. Returns the product of each row of the :attr:`input` tensor in the given
  13809. dimension :attr:`dim`.
  13810. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  13811. as :attr:`input` except in the dimension :attr:`dim` where it is of size 1.
  13812. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
  13813. the output tensor having 1 fewer dimension than :attr:`input`.
  13814. Args:
  13815. input (Tensor): the input tensor.
  13816. dim (int): the dimension to reduce.
  13817. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  13818. Keyword args:
  13819. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  13820. If specified, the input tensor is casted to :attr:`dtype` before the operation
  13821. is performed. This is useful for preventing data type overflows. Default: None.
  13822. Example::
  13823. >>> a = torch.randn(4, 2)
  13824. >>> a
  13825. tensor([[ 0.5261, -0.3837],
  13826. [ 1.1857, -0.2498],
  13827. [-1.1646, 0.0705],
  13828. [ 1.1131, -1.0629]])
  13829. >>> torch.prod(a, 1)
  13830. tensor([-0.2018, -0.2962, -0.0821, -1.1831])
  13831. """
  13832. ...
  13833. @overload
  13834. def prod(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
  13835. r"""
  13836. prod(input, *, dtype=None) -> Tensor
  13837. Returns the product of all elements in the :attr:`input` tensor.
  13838. Args:
  13839. input (Tensor): the input tensor.
  13840. Keyword args:
  13841. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  13842. If specified, the input tensor is casted to :attr:`dtype` before the operation
  13843. is performed. This is useful for preventing data type overflows. Default: None.
  13844. Example::
  13845. >>> a = torch.randn(1, 3)
  13846. >>> a
  13847. tensor([[-0.8020, 0.5428, -1.5854]])
  13848. >>> torch.prod(a)
  13849. tensor(0.6902)
  13850. .. function:: prod(input, dim, keepdim=False, *, dtype=None) -> Tensor
  13851. :noindex:
  13852. Returns the product of each row of the :attr:`input` tensor in the given
  13853. dimension :attr:`dim`.
  13854. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  13855. as :attr:`input` except in the dimension :attr:`dim` where it is of size 1.
  13856. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
  13857. the output tensor having 1 fewer dimension than :attr:`input`.
  13858. Args:
  13859. input (Tensor): the input tensor.
  13860. dim (int): the dimension to reduce.
  13861. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  13862. Keyword args:
  13863. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  13864. If specified, the input tensor is casted to :attr:`dtype` before the operation
  13865. is performed. This is useful for preventing data type overflows. Default: None.
  13866. Example::
  13867. >>> a = torch.randn(4, 2)
  13868. >>> a
  13869. tensor([[ 0.5261, -0.3837],
  13870. [ 1.1857, -0.2498],
  13871. [-1.1646, 0.0705],
  13872. [ 1.1131, -1.0629]])
  13873. >>> torch.prod(a, 1)
  13874. tensor([-0.2018, -0.2962, -0.0821, -1.1831])
  13875. """
  13876. ...
  13877. def promote_types(type1: _dtype, type2: _dtype) -> _dtype:
  13878. r"""
  13879. promote_types(type1, type2) -> dtype
  13880. Returns the :class:`torch.dtype` with the smallest size and scalar kind that is
  13881. not smaller nor of lower kind than either `type1` or `type2`. See type promotion
  13882. :ref:`documentation <type-promotion-doc>` for more information on the type
  13883. promotion logic.
  13884. Args:
  13885. type1 (:class:`torch.dtype`)
  13886. type2 (:class:`torch.dtype`)
  13887. Example::
  13888. >>> torch.promote_types(torch.int32, torch.float32)
  13889. torch.float32
  13890. >>> torch.promote_types(torch.uint8, torch.long)
  13891. torch.long
  13892. """
  13893. ...
  13894. def put(input: Tensor, index: Tensor, source: Tensor, accumulate: _bool = False) -> Tensor: ...
  13895. def q_per_channel_axis(input: Tensor) -> _int: ...
  13896. def q_per_channel_scales(input: Tensor) -> Tensor: ...
  13897. def q_per_channel_zero_points(input: Tensor) -> Tensor: ...
  13898. def q_scale(input: Tensor) -> _float: ...
  13899. def q_zero_point(input: Tensor) -> _int: ...
  13900. def qr(input: Tensor, some: _bool = True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.qr:
  13901. r"""
  13902. qr(input, some=True, *, out=None) -> (Tensor, Tensor)
  13903. Computes the QR decomposition of a matrix or a batch of matrices :attr:`input`,
  13904. and returns a namedtuple (Q, R) of tensors such that :math:`\text{input} = Q R`
  13905. with :math:`Q` being an orthogonal matrix or batch of orthogonal matrices and
  13906. :math:`R` being an upper triangular matrix or batch of upper triangular matrices.
  13907. If :attr:`some` is ``True``, then this function returns the thin (reduced) QR factorization.
  13908. Otherwise, if :attr:`some` is ``False``, this function returns the complete QR factorization.
  13909. .. warning::
  13910. :func:`torch.qr` is deprecated in favor of :func:`torch.linalg.qr`
  13911. and will be removed in a future PyTorch release. The boolean parameter :attr:`some` has been
  13912. replaced with a string parameter :attr:`mode`.
  13913. ``Q, R = torch.qr(A)`` should be replaced with
  13914. .. code:: python
  13915. Q, R = torch.linalg.qr(A)
  13916. ``Q, R = torch.qr(A, some=False)`` should be replaced with
  13917. .. code:: python
  13918. Q, R = torch.linalg.qr(A, mode="complete")
  13919. .. warning::
  13920. If you plan to backpropagate through QR, note that the current backward implementation
  13921. is only well-defined when the first :math:`\min(input.size(-1), input.size(-2))`
  13922. columns of :attr:`input` are linearly independent.
  13923. This behavior will probably change once QR supports pivoting.
  13924. .. note:: This function uses LAPACK for CPU inputs and MAGMA for CUDA inputs,
  13925. and may produce different (valid) decompositions on different device types
  13926. or different platforms.
  13927. Args:
  13928. input (Tensor): the input tensor of size :math:`(*, m, n)` where `*` is zero or more
  13929. batch dimensions consisting of matrices of dimension :math:`m \times n`.
  13930. some (bool, optional): Set to ``True`` for reduced QR decomposition and ``False`` for
  13931. complete QR decomposition. If `k = min(m, n)` then:
  13932. * ``some=True`` : returns `(Q, R)` with dimensions (m, k), (k, n) (default)
  13933. * ``'some=False'``: returns `(Q, R)` with dimensions (m, m), (m, n)
  13934. Keyword args:
  13935. out (tuple, optional): tuple of `Q` and `R` tensors.
  13936. The dimensions of `Q` and `R` are detailed in the description of :attr:`some` above.
  13937. Example::
  13938. >>> a = torch.tensor([[12., -51, 4], [6, 167, -68], [-4, 24, -41]])
  13939. >>> q, r = torch.qr(a)
  13940. >>> q
  13941. tensor([[-0.8571, 0.3943, 0.3314],
  13942. [-0.4286, -0.9029, -0.0343],
  13943. [ 0.2857, -0.1714, 0.9429]])
  13944. >>> r
  13945. tensor([[ -14.0000, -21.0000, 14.0000],
  13946. [ 0.0000, -175.0000, 70.0000],
  13947. [ 0.0000, 0.0000, -35.0000]])
  13948. >>> torch.mm(q, r).round()
  13949. tensor([[ 12., -51., 4.],
  13950. [ 6., 167., -68.],
  13951. [ -4., 24., -41.]])
  13952. >>> torch.mm(q.t(), q).round()
  13953. tensor([[ 1., 0., 0.],
  13954. [ 0., 1., -0.],
  13955. [ 0., -0., 1.]])
  13956. >>> a = torch.randn(3, 4, 5)
  13957. >>> q, r = torch.qr(a, some=False)
  13958. >>> torch.allclose(torch.matmul(q, r), a)
  13959. True
  13960. >>> torch.allclose(torch.matmul(q.mT, q), torch.eye(5))
  13961. True
  13962. """
  13963. ...
  13964. @overload
  13965. def quantile(input: Tensor, q: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear", out: Optional[Tensor] = None) -> Tensor:
  13966. r"""
  13967. quantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor
  13968. Computes the q-th quantiles of each row of the :attr:`input` tensor along the dimension :attr:`dim`.
  13969. To compute the quantile, we map q in [0, 1] to the range of indices [0, n] to find the location
  13970. of the quantile in the sorted input. If the quantile lies between two data points ``a < b`` with
  13971. indices ``i`` and ``j`` in the sorted order, result is computed according to the given
  13972. :attr:`interpolation` method as follows:
  13973. - ``linear``: ``a + (b - a) * fraction``, where ``fraction`` is the fractional part of the computed quantile index.
  13974. - ``lower``: ``a``.
  13975. - ``higher``: ``b``.
  13976. - ``nearest``: ``a`` or ``b``, whichever's index is closer to the computed quantile index (rounding down for .5 fractions).
  13977. - ``midpoint``: ``(a + b) / 2``.
  13978. If :attr:`q` is a 1D tensor, the first dimension of the output represents the quantiles and has size
  13979. equal to the size of :attr:`q`, the remaining dimensions are what remains from the reduction.
  13980. .. note::
  13981. By default :attr:`dim` is ``None`` resulting in the :attr:`input` tensor being flattened before computation.
  13982. Args:
  13983. input (Tensor): the input tensor.
  13984. q (float or Tensor): a scalar or 1D tensor of values in the range [0, 1].
  13985. dim (int): the dimension to reduce.
  13986. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  13987. Keyword arguments:
  13988. interpolation (str): interpolation method to use when the desired quantile lies between two data points.
  13989. Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``.
  13990. Default is ``linear``.
  13991. out (Tensor, optional): the output tensor.
  13992. Example::
  13993. >>> a = torch.randn(2, 3)
  13994. >>> a
  13995. tensor([[ 0.0795, -1.2117, 0.9765],
  13996. [ 1.1707, 0.6706, 0.4884]])
  13997. >>> q = torch.tensor([0.25, 0.5, 0.75])
  13998. >>> torch.quantile(a, q, dim=1, keepdim=True)
  13999. tensor([[[-0.5661],
  14000. [ 0.5795]],
  14001. [[ 0.0795],
  14002. [ 0.6706]],
  14003. [[ 0.5280],
  14004. [ 0.9206]]])
  14005. >>> torch.quantile(a, q, dim=1, keepdim=True).shape
  14006. torch.Size([3, 2, 1])
  14007. >>> a = torch.arange(4.)
  14008. >>> a
  14009. tensor([0., 1., 2., 3.])
  14010. >>> torch.quantile(a, 0.6, interpolation='linear')
  14011. tensor(1.8000)
  14012. >>> torch.quantile(a, 0.6, interpolation='lower')
  14013. tensor(1.)
  14014. >>> torch.quantile(a, 0.6, interpolation='higher')
  14015. tensor(2.)
  14016. >>> torch.quantile(a, 0.6, interpolation='midpoint')
  14017. tensor(1.5000)
  14018. >>> torch.quantile(a, 0.6, interpolation='nearest')
  14019. tensor(2.)
  14020. >>> torch.quantile(a, 0.4, interpolation='nearest')
  14021. tensor(1.)
  14022. """
  14023. ...
  14024. @overload
  14025. def quantile(input: Tensor, q: _float, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear", out: Optional[Tensor] = None) -> Tensor:
  14026. r"""
  14027. quantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor
  14028. Computes the q-th quantiles of each row of the :attr:`input` tensor along the dimension :attr:`dim`.
  14029. To compute the quantile, we map q in [0, 1] to the range of indices [0, n] to find the location
  14030. of the quantile in the sorted input. If the quantile lies between two data points ``a < b`` with
  14031. indices ``i`` and ``j`` in the sorted order, result is computed according to the given
  14032. :attr:`interpolation` method as follows:
  14033. - ``linear``: ``a + (b - a) * fraction``, where ``fraction`` is the fractional part of the computed quantile index.
  14034. - ``lower``: ``a``.
  14035. - ``higher``: ``b``.
  14036. - ``nearest``: ``a`` or ``b``, whichever's index is closer to the computed quantile index (rounding down for .5 fractions).
  14037. - ``midpoint``: ``(a + b) / 2``.
  14038. If :attr:`q` is a 1D tensor, the first dimension of the output represents the quantiles and has size
  14039. equal to the size of :attr:`q`, the remaining dimensions are what remains from the reduction.
  14040. .. note::
  14041. By default :attr:`dim` is ``None`` resulting in the :attr:`input` tensor being flattened before computation.
  14042. Args:
  14043. input (Tensor): the input tensor.
  14044. q (float or Tensor): a scalar or 1D tensor of values in the range [0, 1].
  14045. dim (int): the dimension to reduce.
  14046. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  14047. Keyword arguments:
  14048. interpolation (str): interpolation method to use when the desired quantile lies between two data points.
  14049. Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``.
  14050. Default is ``linear``.
  14051. out (Tensor, optional): the output tensor.
  14052. Example::
  14053. >>> a = torch.randn(2, 3)
  14054. >>> a
  14055. tensor([[ 0.0795, -1.2117, 0.9765],
  14056. [ 1.1707, 0.6706, 0.4884]])
  14057. >>> q = torch.tensor([0.25, 0.5, 0.75])
  14058. >>> torch.quantile(a, q, dim=1, keepdim=True)
  14059. tensor([[[-0.5661],
  14060. [ 0.5795]],
  14061. [[ 0.0795],
  14062. [ 0.6706]],
  14063. [[ 0.5280],
  14064. [ 0.9206]]])
  14065. >>> torch.quantile(a, q, dim=1, keepdim=True).shape
  14066. torch.Size([3, 2, 1])
  14067. >>> a = torch.arange(4.)
  14068. >>> a
  14069. tensor([0., 1., 2., 3.])
  14070. >>> torch.quantile(a, 0.6, interpolation='linear')
  14071. tensor(1.8000)
  14072. >>> torch.quantile(a, 0.6, interpolation='lower')
  14073. tensor(1.)
  14074. >>> torch.quantile(a, 0.6, interpolation='higher')
  14075. tensor(2.)
  14076. >>> torch.quantile(a, 0.6, interpolation='midpoint')
  14077. tensor(1.5000)
  14078. >>> torch.quantile(a, 0.6, interpolation='nearest')
  14079. tensor(2.)
  14080. >>> torch.quantile(a, 0.4, interpolation='nearest')
  14081. tensor(1.)
  14082. """
  14083. ...
  14084. def quantize_per_channel(input: Tensor, scales: Tensor, zero_points: Tensor, axis: _int, dtype: _dtype) -> Tensor:
  14085. r"""
  14086. quantize_per_channel(input, scales, zero_points, axis, dtype) -> Tensor
  14087. Converts a float tensor to a per-channel quantized tensor with given scales and zero points.
  14088. Arguments:
  14089. input (Tensor): float tensor to quantize
  14090. scales (Tensor): float 1D tensor of scales to use, size should match ``input.size(axis)``
  14091. zero_points (int): integer 1D tensor of offset to use, size should match ``input.size(axis)``
  14092. axis (int): dimension on which apply per-channel quantization
  14093. dtype (:class:`torch.dtype`): the desired data type of returned tensor.
  14094. Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32``
  14095. Returns:
  14096. Tensor: A newly quantized tensor
  14097. Example::
  14098. >>> x = torch.tensor([[-1.0, 0.0], [1.0, 2.0]])
  14099. >>> torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8)
  14100. tensor([[-1., 0.],
  14101. [ 1., 2.]], size=(2, 2), dtype=torch.quint8,
  14102. quantization_scheme=torch.per_channel_affine,
  14103. scale=tensor([0.1000, 0.0100], dtype=torch.float64),
  14104. zero_point=tensor([10, 0]), axis=0)
  14105. >>> torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8).int_repr()
  14106. tensor([[ 0, 10],
  14107. [100, 200]], dtype=torch.uint8)
  14108. """
  14109. ...
  14110. @overload
  14111. def quantize_per_tensor(input: Tensor, scale: Tensor, zero_point: Tensor, dtype: _dtype) -> Tensor:
  14112. r"""
  14113. quantize_per_tensor(input, scale, zero_point, dtype) -> Tensor
  14114. Converts a float tensor to a quantized tensor with given scale and zero point.
  14115. Arguments:
  14116. input (Tensor): float tensor or list of tensors to quantize
  14117. scale (float or Tensor): scale to apply in quantization formula
  14118. zero_point (int or Tensor): offset in integer value that maps to float zero
  14119. dtype (:class:`torch.dtype`): the desired data type of returned tensor.
  14120. Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32``
  14121. Returns:
  14122. Tensor: A newly quantized tensor or list of quantized tensors.
  14123. Example::
  14124. >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8)
  14125. tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
  14126. quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10)
  14127. >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8).int_repr()
  14128. tensor([ 0, 10, 20, 30], dtype=torch.uint8)
  14129. >>> torch.quantize_per_tensor([torch.tensor([-1.0, 0.0]), torch.tensor([-2.0, 2.0])],
  14130. >>> torch.tensor([0.1, 0.2]), torch.tensor([10, 20]), torch.quint8)
  14131. (tensor([-1., 0.], size=(2,), dtype=torch.quint8,
  14132. quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10),
  14133. tensor([-2., 2.], size=(2,), dtype=torch.quint8,
  14134. quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=20))
  14135. >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.tensor(0.1), torch.tensor(10), torch.quint8)
  14136. tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
  14137. quantization_scheme=torch.per_tensor_affine, scale=0.10, zero_point=10)
  14138. """
  14139. ...
  14140. @overload
  14141. def quantize_per_tensor(input: Tensor, scale: _float, zero_point: _int, dtype: _dtype) -> Tensor:
  14142. r"""
  14143. quantize_per_tensor(input, scale, zero_point, dtype) -> Tensor
  14144. Converts a float tensor to a quantized tensor with given scale and zero point.
  14145. Arguments:
  14146. input (Tensor): float tensor or list of tensors to quantize
  14147. scale (float or Tensor): scale to apply in quantization formula
  14148. zero_point (int or Tensor): offset in integer value that maps to float zero
  14149. dtype (:class:`torch.dtype`): the desired data type of returned tensor.
  14150. Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32``
  14151. Returns:
  14152. Tensor: A newly quantized tensor or list of quantized tensors.
  14153. Example::
  14154. >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8)
  14155. tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
  14156. quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10)
  14157. >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8).int_repr()
  14158. tensor([ 0, 10, 20, 30], dtype=torch.uint8)
  14159. >>> torch.quantize_per_tensor([torch.tensor([-1.0, 0.0]), torch.tensor([-2.0, 2.0])],
  14160. >>> torch.tensor([0.1, 0.2]), torch.tensor([10, 20]), torch.quint8)
  14161. (tensor([-1., 0.], size=(2,), dtype=torch.quint8,
  14162. quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10),
  14163. tensor([-2., 2.], size=(2,), dtype=torch.quint8,
  14164. quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=20))
  14165. >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.tensor(0.1), torch.tensor(10), torch.quint8)
  14166. tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
  14167. quantization_scheme=torch.per_tensor_affine, scale=0.10, zero_point=10)
  14168. """
  14169. ...
  14170. @overload
  14171. def quantize_per_tensor(tensors: Union[Tuple[Tensor, ...], List[Tensor]], scales: Tensor, zero_points: Tensor, dtype: _dtype) -> Tuple[Tensor, ...]:
  14172. r"""
  14173. quantize_per_tensor(input, scale, zero_point, dtype) -> Tensor
  14174. Converts a float tensor to a quantized tensor with given scale and zero point.
  14175. Arguments:
  14176. input (Tensor): float tensor or list of tensors to quantize
  14177. scale (float or Tensor): scale to apply in quantization formula
  14178. zero_point (int or Tensor): offset in integer value that maps to float zero
  14179. dtype (:class:`torch.dtype`): the desired data type of returned tensor.
  14180. Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32``
  14181. Returns:
  14182. Tensor: A newly quantized tensor or list of quantized tensors.
  14183. Example::
  14184. >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8)
  14185. tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
  14186. quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10)
  14187. >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8).int_repr()
  14188. tensor([ 0, 10, 20, 30], dtype=torch.uint8)
  14189. >>> torch.quantize_per_tensor([torch.tensor([-1.0, 0.0]), torch.tensor([-2.0, 2.0])],
  14190. >>> torch.tensor([0.1, 0.2]), torch.tensor([10, 20]), torch.quint8)
  14191. (tensor([-1., 0.], size=(2,), dtype=torch.quint8,
  14192. quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10),
  14193. tensor([-2., 2.], size=(2,), dtype=torch.quint8,
  14194. quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=20))
  14195. >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.tensor(0.1), torch.tensor(10), torch.quint8)
  14196. tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
  14197. quantization_scheme=torch.per_tensor_affine, scale=0.10, zero_point=10)
  14198. """
  14199. ...
  14200. def quantize_per_tensor_dynamic(input: Tensor, dtype: _dtype, reduce_range: _bool) -> Tensor:
  14201. r"""
  14202. quantize_per_tensor_dynamic(input, dtype, reduce_range) -> Tensor
  14203. Converts a float tensor to a quantized tensor with scale and zero_point calculated
  14204. dynamically based on the input.
  14205. Arguments:
  14206. input (Tensor): float tensor or list of tensors to quantize
  14207. dtype (:class:`torch.dtype`): the desired data type of returned tensor.
  14208. Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``
  14209. reduce_range (bool): a flag to indicate whether to reduce the range of quantized
  14210. data by 1 bit, it's required to avoid instruction overflow for some hardwares
  14211. Returns:
  14212. Tensor: A newly (dynamically) quantized tensor
  14213. Example::
  14214. >>> t = torch.quantize_per_tensor_dynamic(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.quint8, False)
  14215. >>> print(t)
  14216. tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
  14217. quantization_scheme=torch.per_tensor_affine, scale=0.011764705882352941,
  14218. zero_point=85)
  14219. >>> t.int_repr()
  14220. tensor([ 0, 85, 170, 255], dtype=torch.uint8)
  14221. """
  14222. ...
  14223. def quantized_batch_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], mean: Tensor, var: Tensor, eps: _float, output_scale: _float, output_zero_point: _int) -> Tensor:
  14224. r"""
  14225. quantized_batch_norm(input, weight=None, bias=None, mean, var, eps, output_scale, output_zero_point) -> Tensor
  14226. Applies batch normalization on a 4D (NCHW) quantized tensor.
  14227. .. math::
  14228. y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
  14229. Arguments:
  14230. input (Tensor): quantized tensor
  14231. weight (Tensor): float tensor that corresponds to the gamma, size C
  14232. bias (Tensor): float tensor that corresponds to the beta, size C
  14233. mean (Tensor): float mean value in batch normalization, size C
  14234. var (Tensor): float tensor for variance, size C
  14235. eps (float): a value added to the denominator for numerical stability.
  14236. output_scale (float): output quantized tensor scale
  14237. output_zero_point (int): output quantized tensor zero_point
  14238. Returns:
  14239. Tensor: A quantized tensor with batch normalization applied.
  14240. Example::
  14241. >>> qx = torch.quantize_per_tensor(torch.rand(2, 2, 2, 2), 1.5, 3, torch.quint8)
  14242. >>> torch.quantized_batch_norm(qx, torch.ones(2), torch.zeros(2), torch.rand(2), torch.rand(2), 0.00001, 0.2, 2)
  14243. tensor([[[[-0.2000, -0.2000],
  14244. [ 1.6000, -0.2000]],
  14245. [[-0.4000, -0.4000],
  14246. [-0.4000, 0.6000]]],
  14247. [[[-0.2000, -0.2000],
  14248. [-0.2000, -0.2000]],
  14249. [[ 0.6000, -0.4000],
  14250. [ 0.6000, -0.4000]]]], size=(2, 2, 2, 2), dtype=torch.quint8,
  14251. quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=2)
  14252. """
  14253. ...
  14254. def quantized_gru_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Union[Number, _complex], scale_hh: Union[Number, _complex], zero_point_ih: Union[Number, _complex], zero_point_hh: Union[Number, _complex]) -> Tensor: ...
  14255. def quantized_lstm_cell(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Union[Number, _complex], scale_hh: Union[Number, _complex], zero_point_ih: Union[Number, _complex], zero_point_hh: Union[Number, _complex]) -> Tuple[Tensor, Tensor]: ...
  14256. def quantized_max_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor:
  14257. r"""
  14258. quantized_max_pool1d(input, kernel_size, stride=[], padding=0, dilation=1, ceil_mode=False) -> Tensor
  14259. Applies a 1D max pooling over an input quantized tensor composed of several input planes.
  14260. Arguments:
  14261. input (Tensor): quantized tensor
  14262. kernel_size (list of int): the size of the sliding window
  14263. stride (``list of int``, optional): the stride of the sliding window
  14264. padding (``list of int``, optional): padding to be added on both sides, must be >= 0 and <= kernel_size / 2
  14265. dilation (``list of int``, optional): The stride between elements within a sliding window, must be > 0. Default 1
  14266. ceil_mode (bool, optional): If True, will use ceil instead of floor to compute the output shape.
  14267. Defaults to False.
  14268. Returns:
  14269. Tensor: A quantized tensor with max_pool1d applied.
  14270. Example::
  14271. >>> qx = torch.quantize_per_tensor(torch.rand(2, 2), 1.5, 3, torch.quint8)
  14272. >>> torch.quantized_max_pool1d(qx, [2])
  14273. tensor([[0.0000],
  14274. [1.5000]], size=(2, 1), dtype=torch.quint8,
  14275. quantization_scheme=torch.per_tensor_affine, scale=1.5, zero_point=3)
  14276. """
  14277. ...
  14278. def quantized_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor:
  14279. r"""
  14280. quantized_max_pool2d(input, kernel_size, stride=[], padding=0, dilation=1, ceil_mode=False) -> Tensor
  14281. Applies a 2D max pooling over an input quantized tensor composed of several input planes.
  14282. Arguments:
  14283. input (Tensor): quantized tensor
  14284. kernel_size (``list of int``): the size of the sliding window
  14285. stride (``list of int``, optional): the stride of the sliding window
  14286. padding (``list of int``, optional): padding to be added on both sides, must be >= 0 and <= kernel_size / 2
  14287. dilation (``list of int``, optional): The stride between elements within a sliding window, must be > 0. Default 1
  14288. ceil_mode (bool, optional): If True, will use ceil instead of floor to compute the output shape.
  14289. Defaults to False.
  14290. Returns:
  14291. Tensor: A quantized tensor with max_pool2d applied.
  14292. Example::
  14293. >>> qx = torch.quantize_per_tensor(torch.rand(2, 2, 2, 2), 1.5, 3, torch.quint8)
  14294. >>> torch.quantized_max_pool2d(qx, [2,2])
  14295. tensor([[[[1.5000]],
  14296. [[1.5000]]],
  14297. [[[0.0000]],
  14298. [[0.0000]]]], size=(2, 2, 1, 1), dtype=torch.quint8,
  14299. quantization_scheme=torch.per_tensor_affine, scale=1.5, zero_point=3)
  14300. """
  14301. ...
  14302. def quantized_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ...
  14303. def quantized_rnn_relu_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Union[Number, _complex], scale_hh: Union[Number, _complex], zero_point_ih: Union[Number, _complex], zero_point_hh: Union[Number, _complex]) -> Tensor: ...
  14304. def quantized_rnn_tanh_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Union[Number, _complex], scale_hh: Union[Number, _complex], zero_point_ih: Union[Number, _complex], zero_point_hh: Union[Number, _complex]) -> Tensor: ...
  14305. def rad2deg(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  14306. r"""
  14307. rad2deg(input, *, out=None) -> Tensor
  14308. Returns a new tensor with each of the elements of :attr:`input`
  14309. converted from angles in radians to degrees.
  14310. Args:
  14311. input (Tensor): the input tensor.
  14312. Keyword arguments:
  14313. out (Tensor, optional): the output tensor.
  14314. Example::
  14315. >>> a = torch.tensor([[3.142, -3.142], [6.283, -6.283], [1.570, -1.570]])
  14316. >>> torch.rad2deg(a)
  14317. tensor([[ 180.0233, -180.0233],
  14318. [ 359.9894, -359.9894],
  14319. [ 89.9544, -89.9544]])
  14320. """
  14321. ...
  14322. def rad2deg_(input: Tensor) -> Tensor: ...
  14323. @overload
  14324. def rand(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  14325. r"""
  14326. rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
  14327. Returns a tensor filled with random numbers from a uniform distribution
  14328. on the interval :math:`[0, 1)`
  14329. The shape of the tensor is defined by the variable argument :attr:`size`.
  14330. Args:
  14331. size (int...): a sequence of integers defining the shape of the output tensor.
  14332. Can be a variable number of arguments or a collection like a list or tuple.
  14333. Keyword args:
  14334. generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
  14335. out (Tensor, optional): the output tensor.
  14336. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  14337. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  14338. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  14339. Default: ``torch.strided``.
  14340. device (:class:`torch.device`, optional): the desired device of returned tensor.
  14341. Default: if ``None``, uses the current device for the default tensor type
  14342. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  14343. for CPU tensor types and the current CUDA device for CUDA tensor types.
  14344. requires_grad (bool, optional): If autograd should record operations on the
  14345. returned tensor. Default: ``False``.
  14346. pin_memory (bool, optional): If set, returned tensor would be allocated in
  14347. the pinned memory. Works only for CPU tensors. Default: ``False``.
  14348. Example::
  14349. >>> torch.rand(4)
  14350. tensor([ 0.5204, 0.2503, 0.3525, 0.5673])
  14351. >>> torch.rand(2, 3)
  14352. tensor([[ 0.8237, 0.5781, 0.6879],
  14353. [ 0.3816, 0.7249, 0.0998]])
  14354. """
  14355. ...
  14356. @overload
  14357. def rand(*size: _int, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  14358. r"""
  14359. rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
  14360. Returns a tensor filled with random numbers from a uniform distribution
  14361. on the interval :math:`[0, 1)`
  14362. The shape of the tensor is defined by the variable argument :attr:`size`.
  14363. Args:
  14364. size (int...): a sequence of integers defining the shape of the output tensor.
  14365. Can be a variable number of arguments or a collection like a list or tuple.
  14366. Keyword args:
  14367. generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
  14368. out (Tensor, optional): the output tensor.
  14369. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  14370. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  14371. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  14372. Default: ``torch.strided``.
  14373. device (:class:`torch.device`, optional): the desired device of returned tensor.
  14374. Default: if ``None``, uses the current device for the default tensor type
  14375. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  14376. for CPU tensor types and the current CUDA device for CUDA tensor types.
  14377. requires_grad (bool, optional): If autograd should record operations on the
  14378. returned tensor. Default: ``False``.
  14379. pin_memory (bool, optional): If set, returned tensor would be allocated in
  14380. the pinned memory. Works only for CPU tensors. Default: ``False``.
  14381. Example::
  14382. >>> torch.rand(4)
  14383. tensor([ 0.5204, 0.2503, 0.3525, 0.5673])
  14384. >>> torch.rand(2, 3)
  14385. tensor([[ 0.8237, 0.5781, 0.6879],
  14386. [ 0.3816, 0.7249, 0.0998]])
  14387. """
  14388. ...
  14389. @overload
  14390. def rand(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  14391. r"""
  14392. rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
  14393. Returns a tensor filled with random numbers from a uniform distribution
  14394. on the interval :math:`[0, 1)`
  14395. The shape of the tensor is defined by the variable argument :attr:`size`.
  14396. Args:
  14397. size (int...): a sequence of integers defining the shape of the output tensor.
  14398. Can be a variable number of arguments or a collection like a list or tuple.
  14399. Keyword args:
  14400. generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
  14401. out (Tensor, optional): the output tensor.
  14402. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  14403. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  14404. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  14405. Default: ``torch.strided``.
  14406. device (:class:`torch.device`, optional): the desired device of returned tensor.
  14407. Default: if ``None``, uses the current device for the default tensor type
  14408. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  14409. for CPU tensor types and the current CUDA device for CUDA tensor types.
  14410. requires_grad (bool, optional): If autograd should record operations on the
  14411. returned tensor. Default: ``False``.
  14412. pin_memory (bool, optional): If set, returned tensor would be allocated in
  14413. the pinned memory. Works only for CPU tensors. Default: ``False``.
  14414. Example::
  14415. >>> torch.rand(4)
  14416. tensor([ 0.5204, 0.2503, 0.3525, 0.5673])
  14417. >>> torch.rand(2, 3)
  14418. tensor([[ 0.8237, 0.5781, 0.6879],
  14419. [ 0.3816, 0.7249, 0.0998]])
  14420. """
  14421. ...
  14422. @overload
  14423. def rand(*size: _int, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  14424. r"""
  14425. rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
  14426. Returns a tensor filled with random numbers from a uniform distribution
  14427. on the interval :math:`[0, 1)`
  14428. The shape of the tensor is defined by the variable argument :attr:`size`.
  14429. Args:
  14430. size (int...): a sequence of integers defining the shape of the output tensor.
  14431. Can be a variable number of arguments or a collection like a list or tuple.
  14432. Keyword args:
  14433. generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
  14434. out (Tensor, optional): the output tensor.
  14435. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  14436. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  14437. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  14438. Default: ``torch.strided``.
  14439. device (:class:`torch.device`, optional): the desired device of returned tensor.
  14440. Default: if ``None``, uses the current device for the default tensor type
  14441. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  14442. for CPU tensor types and the current CUDA device for CUDA tensor types.
  14443. requires_grad (bool, optional): If autograd should record operations on the
  14444. returned tensor. Default: ``False``.
  14445. pin_memory (bool, optional): If set, returned tensor would be allocated in
  14446. the pinned memory. Works only for CPU tensors. Default: ``False``.
  14447. Example::
  14448. >>> torch.rand(4)
  14449. tensor([ 0.5204, 0.2503, 0.3525, 0.5673])
  14450. >>> torch.rand(2, 3)
  14451. tensor([[ 0.8237, 0.5781, 0.6879],
  14452. [ 0.3816, 0.7249, 0.0998]])
  14453. """
  14454. ...
  14455. @overload
  14456. def rand(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  14457. r"""
  14458. rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
  14459. Returns a tensor filled with random numbers from a uniform distribution
  14460. on the interval :math:`[0, 1)`
  14461. The shape of the tensor is defined by the variable argument :attr:`size`.
  14462. Args:
  14463. size (int...): a sequence of integers defining the shape of the output tensor.
  14464. Can be a variable number of arguments or a collection like a list or tuple.
  14465. Keyword args:
  14466. generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
  14467. out (Tensor, optional): the output tensor.
  14468. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  14469. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  14470. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  14471. Default: ``torch.strided``.
  14472. device (:class:`torch.device`, optional): the desired device of returned tensor.
  14473. Default: if ``None``, uses the current device for the default tensor type
  14474. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  14475. for CPU tensor types and the current CUDA device for CUDA tensor types.
  14476. requires_grad (bool, optional): If autograd should record operations on the
  14477. returned tensor. Default: ``False``.
  14478. pin_memory (bool, optional): If set, returned tensor would be allocated in
  14479. the pinned memory. Works only for CPU tensors. Default: ``False``.
  14480. Example::
  14481. >>> torch.rand(4)
  14482. tensor([ 0.5204, 0.2503, 0.3525, 0.5673])
  14483. >>> torch.rand(2, 3)
  14484. tensor([[ 0.8237, 0.5781, 0.6879],
  14485. [ 0.3816, 0.7249, 0.0998]])
  14486. """
  14487. ...
  14488. @overload
  14489. def rand(*size: _int, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  14490. r"""
  14491. rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
  14492. Returns a tensor filled with random numbers from a uniform distribution
  14493. on the interval :math:`[0, 1)`
  14494. The shape of the tensor is defined by the variable argument :attr:`size`.
  14495. Args:
  14496. size (int...): a sequence of integers defining the shape of the output tensor.
  14497. Can be a variable number of arguments or a collection like a list or tuple.
  14498. Keyword args:
  14499. generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
  14500. out (Tensor, optional): the output tensor.
  14501. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  14502. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  14503. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  14504. Default: ``torch.strided``.
  14505. device (:class:`torch.device`, optional): the desired device of returned tensor.
  14506. Default: if ``None``, uses the current device for the default tensor type
  14507. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  14508. for CPU tensor types and the current CUDA device for CUDA tensor types.
  14509. requires_grad (bool, optional): If autograd should record operations on the
  14510. returned tensor. Default: ``False``.
  14511. pin_memory (bool, optional): If set, returned tensor would be allocated in
  14512. the pinned memory. Works only for CPU tensors. Default: ``False``.
  14513. Example::
  14514. >>> torch.rand(4)
  14515. tensor([ 0.5204, 0.2503, 0.3525, 0.5673])
  14516. >>> torch.rand(2, 3)
  14517. tensor([[ 0.8237, 0.5781, 0.6879],
  14518. [ 0.3816, 0.7249, 0.0998]])
  14519. """
  14520. ...
  14521. @overload
  14522. def rand(size: Sequence[Union[_int, SymInt]], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  14523. r"""
  14524. rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
  14525. Returns a tensor filled with random numbers from a uniform distribution
  14526. on the interval :math:`[0, 1)`
  14527. The shape of the tensor is defined by the variable argument :attr:`size`.
  14528. Args:
  14529. size (int...): a sequence of integers defining the shape of the output tensor.
  14530. Can be a variable number of arguments or a collection like a list or tuple.
  14531. Keyword args:
  14532. generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
  14533. out (Tensor, optional): the output tensor.
  14534. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  14535. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  14536. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  14537. Default: ``torch.strided``.
  14538. device (:class:`torch.device`, optional): the desired device of returned tensor.
  14539. Default: if ``None``, uses the current device for the default tensor type
  14540. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  14541. for CPU tensor types and the current CUDA device for CUDA tensor types.
  14542. requires_grad (bool, optional): If autograd should record operations on the
  14543. returned tensor. Default: ``False``.
  14544. pin_memory (bool, optional): If set, returned tensor would be allocated in
  14545. the pinned memory. Works only for CPU tensors. Default: ``False``.
  14546. Example::
  14547. >>> torch.rand(4)
  14548. tensor([ 0.5204, 0.2503, 0.3525, 0.5673])
  14549. >>> torch.rand(2, 3)
  14550. tensor([[ 0.8237, 0.5781, 0.6879],
  14551. [ 0.3816, 0.7249, 0.0998]])
  14552. """
  14553. ...
  14554. @overload
  14555. def rand(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  14556. r"""
  14557. rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
  14558. Returns a tensor filled with random numbers from a uniform distribution
  14559. on the interval :math:`[0, 1)`
  14560. The shape of the tensor is defined by the variable argument :attr:`size`.
  14561. Args:
  14562. size (int...): a sequence of integers defining the shape of the output tensor.
  14563. Can be a variable number of arguments or a collection like a list or tuple.
  14564. Keyword args:
  14565. generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
  14566. out (Tensor, optional): the output tensor.
  14567. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  14568. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  14569. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  14570. Default: ``torch.strided``.
  14571. device (:class:`torch.device`, optional): the desired device of returned tensor.
  14572. Default: if ``None``, uses the current device for the default tensor type
  14573. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  14574. for CPU tensor types and the current CUDA device for CUDA tensor types.
  14575. requires_grad (bool, optional): If autograd should record operations on the
  14576. returned tensor. Default: ``False``.
  14577. pin_memory (bool, optional): If set, returned tensor would be allocated in
  14578. the pinned memory. Works only for CPU tensors. Default: ``False``.
  14579. Example::
  14580. >>> torch.rand(4)
  14581. tensor([ 0.5204, 0.2503, 0.3525, 0.5673])
  14582. >>> torch.rand(2, 3)
  14583. tensor([[ 0.8237, 0.5781, 0.6879],
  14584. [ 0.3816, 0.7249, 0.0998]])
  14585. """
  14586. ...
  14587. def rand_like(input: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  14588. r"""
  14589. rand_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
  14590. Returns a tensor with the same size as :attr:`input` that is filled with
  14591. random numbers from a uniform distribution on the interval :math:`[0, 1)`.
  14592. ``torch.rand_like(input)`` is equivalent to
  14593. ``torch.rand(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
  14594. Args:
  14595. input (Tensor): the size of :attr:`input` will determine size of the output tensor.
  14596. Keyword args:
  14597. dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
  14598. Default: if ``None``, defaults to the dtype of :attr:`input`.
  14599. layout (:class:`torch.layout`, optional): the desired layout of returned tensor.
  14600. Default: if ``None``, defaults to the layout of :attr:`input`.
  14601. device (:class:`torch.device`, optional): the desired device of returned tensor.
  14602. Default: if ``None``, defaults to the device of :attr:`input`.
  14603. requires_grad (bool, optional): If autograd should record operations on the
  14604. returned tensor. Default: ``False``.
  14605. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  14606. returned Tensor. Default: ``torch.preserve_format``.
  14607. """
  14608. ...
  14609. @overload
  14610. def randint(low: _int, high: _int, size: _size, *, generator: Optional[Generator] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
  14611. r"""
  14612. randint(low=0, high, size, \*, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  14613. Returns a tensor filled with random integers generated uniformly
  14614. between :attr:`low` (inclusive) and :attr:`high` (exclusive).
  14615. The shape of the tensor is defined by the variable argument :attr:`size`.
  14616. .. note::
  14617. With the global dtype default (``torch.float32``), this function returns
  14618. a tensor with dtype ``torch.int64``.
  14619. Args:
  14620. low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
  14621. high (int): One above the highest integer to be drawn from the distribution.
  14622. size (tuple): a tuple defining the shape of the output tensor.
  14623. Keyword args:
  14624. generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
  14625. out (Tensor, optional): the output tensor.
  14626. dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``,
  14627. this function returns a tensor with dtype ``torch.int64``.
  14628. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  14629. Default: ``torch.strided``.
  14630. device (:class:`torch.device`, optional): the desired device of returned tensor.
  14631. Default: if ``None``, uses the current device for the default tensor type
  14632. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  14633. for CPU tensor types and the current CUDA device for CUDA tensor types.
  14634. requires_grad (bool, optional): If autograd should record operations on the
  14635. returned tensor. Default: ``False``.
  14636. Example::
  14637. >>> torch.randint(3, 5, (3,))
  14638. tensor([4, 3, 4])
  14639. >>> torch.randint(10, (2, 2))
  14640. tensor([[0, 2],
  14641. [5, 5]])
  14642. >>> torch.randint(3, 10, (2, 2))
  14643. tensor([[4, 5],
  14644. [6, 7]])
  14645. """
  14646. ...
  14647. @overload
  14648. def randint(high: _int, size: _size, *, generator: Optional[Generator] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
  14649. r"""
  14650. randint(low=0, high, size, \*, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  14651. Returns a tensor filled with random integers generated uniformly
  14652. between :attr:`low` (inclusive) and :attr:`high` (exclusive).
  14653. The shape of the tensor is defined by the variable argument :attr:`size`.
  14654. .. note::
  14655. With the global dtype default (``torch.float32``), this function returns
  14656. a tensor with dtype ``torch.int64``.
  14657. Args:
  14658. low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
  14659. high (int): One above the highest integer to be drawn from the distribution.
  14660. size (tuple): a tuple defining the shape of the output tensor.
  14661. Keyword args:
  14662. generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
  14663. out (Tensor, optional): the output tensor.
  14664. dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``,
  14665. this function returns a tensor with dtype ``torch.int64``.
  14666. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  14667. Default: ``torch.strided``.
  14668. device (:class:`torch.device`, optional): the desired device of returned tensor.
  14669. Default: if ``None``, uses the current device for the default tensor type
  14670. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  14671. for CPU tensor types and the current CUDA device for CUDA tensor types.
  14672. requires_grad (bool, optional): If autograd should record operations on the
  14673. returned tensor. Default: ``False``.
  14674. Example::
  14675. >>> torch.randint(3, 5, (3,))
  14676. tensor([4, 3, 4])
  14677. >>> torch.randint(10, (2, 2))
  14678. tensor([[0, 2],
  14679. [5, 5]])
  14680. >>> torch.randint(3, 10, (2, 2))
  14681. tensor([[4, 5],
  14682. [6, 7]])
  14683. """
  14684. ...
  14685. @overload
  14686. def randint(high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  14687. r"""
  14688. randint(low=0, high, size, \*, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  14689. Returns a tensor filled with random integers generated uniformly
  14690. between :attr:`low` (inclusive) and :attr:`high` (exclusive).
  14691. The shape of the tensor is defined by the variable argument :attr:`size`.
  14692. .. note::
  14693. With the global dtype default (``torch.float32``), this function returns
  14694. a tensor with dtype ``torch.int64``.
  14695. Args:
  14696. low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
  14697. high (int): One above the highest integer to be drawn from the distribution.
  14698. size (tuple): a tuple defining the shape of the output tensor.
  14699. Keyword args:
  14700. generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
  14701. out (Tensor, optional): the output tensor.
  14702. dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``,
  14703. this function returns a tensor with dtype ``torch.int64``.
  14704. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  14705. Default: ``torch.strided``.
  14706. device (:class:`torch.device`, optional): the desired device of returned tensor.
  14707. Default: if ``None``, uses the current device for the default tensor type
  14708. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  14709. for CPU tensor types and the current CUDA device for CUDA tensor types.
  14710. requires_grad (bool, optional): If autograd should record operations on the
  14711. returned tensor. Default: ``False``.
  14712. Example::
  14713. >>> torch.randint(3, 5, (3,))
  14714. tensor([4, 3, 4])
  14715. >>> torch.randint(10, (2, 2))
  14716. tensor([[0, 2],
  14717. [5, 5]])
  14718. >>> torch.randint(3, 10, (2, 2))
  14719. tensor([[4, 5],
  14720. [6, 7]])
  14721. """
  14722. ...
  14723. @overload
  14724. def randint(high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  14725. r"""
  14726. randint(low=0, high, size, \*, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  14727. Returns a tensor filled with random integers generated uniformly
  14728. between :attr:`low` (inclusive) and :attr:`high` (exclusive).
  14729. The shape of the tensor is defined by the variable argument :attr:`size`.
  14730. .. note::
  14731. With the global dtype default (``torch.float32``), this function returns
  14732. a tensor with dtype ``torch.int64``.
  14733. Args:
  14734. low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
  14735. high (int): One above the highest integer to be drawn from the distribution.
  14736. size (tuple): a tuple defining the shape of the output tensor.
  14737. Keyword args:
  14738. generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
  14739. out (Tensor, optional): the output tensor.
  14740. dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``,
  14741. this function returns a tensor with dtype ``torch.int64``.
  14742. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  14743. Default: ``torch.strided``.
  14744. device (:class:`torch.device`, optional): the desired device of returned tensor.
  14745. Default: if ``None``, uses the current device for the default tensor type
  14746. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  14747. for CPU tensor types and the current CUDA device for CUDA tensor types.
  14748. requires_grad (bool, optional): If autograd should record operations on the
  14749. returned tensor. Default: ``False``.
  14750. Example::
  14751. >>> torch.randint(3, 5, (3,))
  14752. tensor([4, 3, 4])
  14753. >>> torch.randint(10, (2, 2))
  14754. tensor([[0, 2],
  14755. [5, 5]])
  14756. >>> torch.randint(3, 10, (2, 2))
  14757. tensor([[4, 5],
  14758. [6, 7]])
  14759. """
  14760. ...
  14761. @overload
  14762. def randint(low: Union[_int, SymInt], high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  14763. r"""
  14764. randint(low=0, high, size, \*, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  14765. Returns a tensor filled with random integers generated uniformly
  14766. between :attr:`low` (inclusive) and :attr:`high` (exclusive).
  14767. The shape of the tensor is defined by the variable argument :attr:`size`.
  14768. .. note::
  14769. With the global dtype default (``torch.float32``), this function returns
  14770. a tensor with dtype ``torch.int64``.
  14771. Args:
  14772. low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
  14773. high (int): One above the highest integer to be drawn from the distribution.
  14774. size (tuple): a tuple defining the shape of the output tensor.
  14775. Keyword args:
  14776. generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
  14777. out (Tensor, optional): the output tensor.
  14778. dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``,
  14779. this function returns a tensor with dtype ``torch.int64``.
  14780. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  14781. Default: ``torch.strided``.
  14782. device (:class:`torch.device`, optional): the desired device of returned tensor.
  14783. Default: if ``None``, uses the current device for the default tensor type
  14784. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  14785. for CPU tensor types and the current CUDA device for CUDA tensor types.
  14786. requires_grad (bool, optional): If autograd should record operations on the
  14787. returned tensor. Default: ``False``.
  14788. Example::
  14789. >>> torch.randint(3, 5, (3,))
  14790. tensor([4, 3, 4])
  14791. >>> torch.randint(10, (2, 2))
  14792. tensor([[0, 2],
  14793. [5, 5]])
  14794. >>> torch.randint(3, 10, (2, 2))
  14795. tensor([[4, 5],
  14796. [6, 7]])
  14797. """
  14798. ...
  14799. @overload
  14800. def randint(low: Union[_int, SymInt], high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  14801. r"""
  14802. randint(low=0, high, size, \*, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  14803. Returns a tensor filled with random integers generated uniformly
  14804. between :attr:`low` (inclusive) and :attr:`high` (exclusive).
  14805. The shape of the tensor is defined by the variable argument :attr:`size`.
  14806. .. note::
  14807. With the global dtype default (``torch.float32``), this function returns
  14808. a tensor with dtype ``torch.int64``.
  14809. Args:
  14810. low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
  14811. high (int): One above the highest integer to be drawn from the distribution.
  14812. size (tuple): a tuple defining the shape of the output tensor.
  14813. Keyword args:
  14814. generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
  14815. out (Tensor, optional): the output tensor.
  14816. dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``,
  14817. this function returns a tensor with dtype ``torch.int64``.
  14818. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  14819. Default: ``torch.strided``.
  14820. device (:class:`torch.device`, optional): the desired device of returned tensor.
  14821. Default: if ``None``, uses the current device for the default tensor type
  14822. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  14823. for CPU tensor types and the current CUDA device for CUDA tensor types.
  14824. requires_grad (bool, optional): If autograd should record operations on the
  14825. returned tensor. Default: ``False``.
  14826. Example::
  14827. >>> torch.randint(3, 5, (3,))
  14828. tensor([4, 3, 4])
  14829. >>> torch.randint(10, (2, 2))
  14830. tensor([[0, 2],
  14831. [5, 5]])
  14832. >>> torch.randint(3, 10, (2, 2))
  14833. tensor([[4, 5],
  14834. [6, 7]])
  14835. """
  14836. ...
  14837. @overload
  14838. def randint_like(input: Tensor, high: Union[_int, SymInt], *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  14839. r"""
  14840. randint_like(input, low=0, high, \*, dtype=None, layout=torch.strided, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
  14841. Returns a tensor with the same shape as Tensor :attr:`input` filled with
  14842. random integers generated uniformly between :attr:`low` (inclusive) and
  14843. :attr:`high` (exclusive).
  14844. .. note:
  14845. With the global dtype default (``torch.float32``), this function returns
  14846. a tensor with dtype ``torch.int64``.
  14847. Args:
  14848. input (Tensor): the size of :attr:`input` will determine size of the output tensor.
  14849. low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
  14850. high (int): One above the highest integer to be drawn from the distribution.
  14851. Keyword args:
  14852. dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
  14853. Default: if ``None``, defaults to the dtype of :attr:`input`.
  14854. layout (:class:`torch.layout`, optional): the desired layout of returned tensor.
  14855. Default: if ``None``, defaults to the layout of :attr:`input`.
  14856. device (:class:`torch.device`, optional): the desired device of returned tensor.
  14857. Default: if ``None``, defaults to the device of :attr:`input`.
  14858. requires_grad (bool, optional): If autograd should record operations on the
  14859. returned tensor. Default: ``False``.
  14860. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  14861. returned Tensor. Default: ``torch.preserve_format``.
  14862. """
  14863. ...
  14864. @overload
  14865. def randint_like(input: Tensor, low: Union[_int, SymInt], high: Union[_int, SymInt], *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  14866. r"""
  14867. randint_like(input, low=0, high, \*, dtype=None, layout=torch.strided, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
  14868. Returns a tensor with the same shape as Tensor :attr:`input` filled with
  14869. random integers generated uniformly between :attr:`low` (inclusive) and
  14870. :attr:`high` (exclusive).
  14871. .. note:
  14872. With the global dtype default (``torch.float32``), this function returns
  14873. a tensor with dtype ``torch.int64``.
  14874. Args:
  14875. input (Tensor): the size of :attr:`input` will determine size of the output tensor.
  14876. low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
  14877. high (int): One above the highest integer to be drawn from the distribution.
  14878. Keyword args:
  14879. dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
  14880. Default: if ``None``, defaults to the dtype of :attr:`input`.
  14881. layout (:class:`torch.layout`, optional): the desired layout of returned tensor.
  14882. Default: if ``None``, defaults to the layout of :attr:`input`.
  14883. device (:class:`torch.device`, optional): the desired device of returned tensor.
  14884. Default: if ``None``, defaults to the device of :attr:`input`.
  14885. requires_grad (bool, optional): If autograd should record operations on the
  14886. returned tensor. Default: ``False``.
  14887. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  14888. returned Tensor. Default: ``torch.preserve_format``.
  14889. """
  14890. ...
  14891. @overload
  14892. def randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  14893. r"""
  14894. randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
  14895. Returns a tensor filled with random numbers from a normal distribution
  14896. with mean `0` and variance `1` (also called the standard normal
  14897. distribution).
  14898. .. math::
  14899. \text{out}_{i} \sim \mathcal{N}(0, 1)
  14900. For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and
  14901. unit variance as
  14902. .. math::
  14903. \text{out}_{i} \sim \mathcal{CN}(0, 1)
  14904. This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary
  14905. :math:`(\operatorname{Im})` part of :math:`\text{out}_i` as
  14906. .. math::
  14907. \operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad
  14908. \operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2})
  14909. The shape of the tensor is defined by the variable argument :attr:`size`.
  14910. Args:
  14911. size (int...): a sequence of integers defining the shape of the output tensor.
  14912. Can be a variable number of arguments or a collection like a list or tuple.
  14913. Keyword args:
  14914. generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
  14915. out (Tensor, optional): the output tensor.
  14916. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  14917. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  14918. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  14919. Default: ``torch.strided``.
  14920. device (:class:`torch.device`, optional): the desired device of returned tensor.
  14921. Default: if ``None``, uses the current device for the default tensor type
  14922. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  14923. for CPU tensor types and the current CUDA device for CUDA tensor types.
  14924. requires_grad (bool, optional): If autograd should record operations on the
  14925. returned tensor. Default: ``False``.
  14926. pin_memory (bool, optional): If set, returned tensor would be allocated in
  14927. the pinned memory. Works only for CPU tensors. Default: ``False``.
  14928. Example::
  14929. >>> torch.randn(4)
  14930. tensor([-2.1436, 0.9966, 2.3426, -0.6366])
  14931. >>> torch.randn(2, 3)
  14932. tensor([[ 1.5954, 2.8929, -1.0923],
  14933. [ 1.1719, -0.4709, -0.1996]])
  14934. .. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution
  14935. """
  14936. ...
  14937. @overload
  14938. def randn(*size: _int, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  14939. r"""
  14940. randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
  14941. Returns a tensor filled with random numbers from a normal distribution
  14942. with mean `0` and variance `1` (also called the standard normal
  14943. distribution).
  14944. .. math::
  14945. \text{out}_{i} \sim \mathcal{N}(0, 1)
  14946. For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and
  14947. unit variance as
  14948. .. math::
  14949. \text{out}_{i} \sim \mathcal{CN}(0, 1)
  14950. This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary
  14951. :math:`(\operatorname{Im})` part of :math:`\text{out}_i` as
  14952. .. math::
  14953. \operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad
  14954. \operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2})
  14955. The shape of the tensor is defined by the variable argument :attr:`size`.
  14956. Args:
  14957. size (int...): a sequence of integers defining the shape of the output tensor.
  14958. Can be a variable number of arguments or a collection like a list or tuple.
  14959. Keyword args:
  14960. generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
  14961. out (Tensor, optional): the output tensor.
  14962. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  14963. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  14964. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  14965. Default: ``torch.strided``.
  14966. device (:class:`torch.device`, optional): the desired device of returned tensor.
  14967. Default: if ``None``, uses the current device for the default tensor type
  14968. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  14969. for CPU tensor types and the current CUDA device for CUDA tensor types.
  14970. requires_grad (bool, optional): If autograd should record operations on the
  14971. returned tensor. Default: ``False``.
  14972. pin_memory (bool, optional): If set, returned tensor would be allocated in
  14973. the pinned memory. Works only for CPU tensors. Default: ``False``.
  14974. Example::
  14975. >>> torch.randn(4)
  14976. tensor([-2.1436, 0.9966, 2.3426, -0.6366])
  14977. >>> torch.randn(2, 3)
  14978. tensor([[ 1.5954, 2.8929, -1.0923],
  14979. [ 1.1719, -0.4709, -0.1996]])
  14980. .. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution
  14981. """
  14982. ...
  14983. @overload
  14984. def randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  14985. r"""
  14986. randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
  14987. Returns a tensor filled with random numbers from a normal distribution
  14988. with mean `0` and variance `1` (also called the standard normal
  14989. distribution).
  14990. .. math::
  14991. \text{out}_{i} \sim \mathcal{N}(0, 1)
  14992. For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and
  14993. unit variance as
  14994. .. math::
  14995. \text{out}_{i} \sim \mathcal{CN}(0, 1)
  14996. This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary
  14997. :math:`(\operatorname{Im})` part of :math:`\text{out}_i` as
  14998. .. math::
  14999. \operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad
  15000. \operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2})
  15001. The shape of the tensor is defined by the variable argument :attr:`size`.
  15002. Args:
  15003. size (int...): a sequence of integers defining the shape of the output tensor.
  15004. Can be a variable number of arguments or a collection like a list or tuple.
  15005. Keyword args:
  15006. generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
  15007. out (Tensor, optional): the output tensor.
  15008. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  15009. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  15010. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  15011. Default: ``torch.strided``.
  15012. device (:class:`torch.device`, optional): the desired device of returned tensor.
  15013. Default: if ``None``, uses the current device for the default tensor type
  15014. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  15015. for CPU tensor types and the current CUDA device for CUDA tensor types.
  15016. requires_grad (bool, optional): If autograd should record operations on the
  15017. returned tensor. Default: ``False``.
  15018. pin_memory (bool, optional): If set, returned tensor would be allocated in
  15019. the pinned memory. Works only for CPU tensors. Default: ``False``.
  15020. Example::
  15021. >>> torch.randn(4)
  15022. tensor([-2.1436, 0.9966, 2.3426, -0.6366])
  15023. >>> torch.randn(2, 3)
  15024. tensor([[ 1.5954, 2.8929, -1.0923],
  15025. [ 1.1719, -0.4709, -0.1996]])
  15026. .. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution
  15027. """
  15028. ...
  15029. @overload
  15030. def randn(*size: _int, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  15031. r"""
  15032. randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
  15033. Returns a tensor filled with random numbers from a normal distribution
  15034. with mean `0` and variance `1` (also called the standard normal
  15035. distribution).
  15036. .. math::
  15037. \text{out}_{i} \sim \mathcal{N}(0, 1)
  15038. For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and
  15039. unit variance as
  15040. .. math::
  15041. \text{out}_{i} \sim \mathcal{CN}(0, 1)
  15042. This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary
  15043. :math:`(\operatorname{Im})` part of :math:`\text{out}_i` as
  15044. .. math::
  15045. \operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad
  15046. \operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2})
  15047. The shape of the tensor is defined by the variable argument :attr:`size`.
  15048. Args:
  15049. size (int...): a sequence of integers defining the shape of the output tensor.
  15050. Can be a variable number of arguments or a collection like a list or tuple.
  15051. Keyword args:
  15052. generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
  15053. out (Tensor, optional): the output tensor.
  15054. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  15055. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  15056. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  15057. Default: ``torch.strided``.
  15058. device (:class:`torch.device`, optional): the desired device of returned tensor.
  15059. Default: if ``None``, uses the current device for the default tensor type
  15060. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  15061. for CPU tensor types and the current CUDA device for CUDA tensor types.
  15062. requires_grad (bool, optional): If autograd should record operations on the
  15063. returned tensor. Default: ``False``.
  15064. pin_memory (bool, optional): If set, returned tensor would be allocated in
  15065. the pinned memory. Works only for CPU tensors. Default: ``False``.
  15066. Example::
  15067. >>> torch.randn(4)
  15068. tensor([-2.1436, 0.9966, 2.3426, -0.6366])
  15069. >>> torch.randn(2, 3)
  15070. tensor([[ 1.5954, 2.8929, -1.0923],
  15071. [ 1.1719, -0.4709, -0.1996]])
  15072. .. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution
  15073. """
  15074. ...
  15075. @overload
  15076. def randn(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  15077. r"""
  15078. randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
  15079. Returns a tensor filled with random numbers from a normal distribution
  15080. with mean `0` and variance `1` (also called the standard normal
  15081. distribution).
  15082. .. math::
  15083. \text{out}_{i} \sim \mathcal{N}(0, 1)
  15084. For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and
  15085. unit variance as
  15086. .. math::
  15087. \text{out}_{i} \sim \mathcal{CN}(0, 1)
  15088. This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary
  15089. :math:`(\operatorname{Im})` part of :math:`\text{out}_i` as
  15090. .. math::
  15091. \operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad
  15092. \operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2})
  15093. The shape of the tensor is defined by the variable argument :attr:`size`.
  15094. Args:
  15095. size (int...): a sequence of integers defining the shape of the output tensor.
  15096. Can be a variable number of arguments or a collection like a list or tuple.
  15097. Keyword args:
  15098. generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
  15099. out (Tensor, optional): the output tensor.
  15100. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  15101. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  15102. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  15103. Default: ``torch.strided``.
  15104. device (:class:`torch.device`, optional): the desired device of returned tensor.
  15105. Default: if ``None``, uses the current device for the default tensor type
  15106. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  15107. for CPU tensor types and the current CUDA device for CUDA tensor types.
  15108. requires_grad (bool, optional): If autograd should record operations on the
  15109. returned tensor. Default: ``False``.
  15110. pin_memory (bool, optional): If set, returned tensor would be allocated in
  15111. the pinned memory. Works only for CPU tensors. Default: ``False``.
  15112. Example::
  15113. >>> torch.randn(4)
  15114. tensor([-2.1436, 0.9966, 2.3426, -0.6366])
  15115. >>> torch.randn(2, 3)
  15116. tensor([[ 1.5954, 2.8929, -1.0923],
  15117. [ 1.1719, -0.4709, -0.1996]])
  15118. .. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution
  15119. """
  15120. ...
  15121. @overload
  15122. def randn(*size: _int, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  15123. r"""
  15124. randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
  15125. Returns a tensor filled with random numbers from a normal distribution
  15126. with mean `0` and variance `1` (also called the standard normal
  15127. distribution).
  15128. .. math::
  15129. \text{out}_{i} \sim \mathcal{N}(0, 1)
  15130. For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and
  15131. unit variance as
  15132. .. math::
  15133. \text{out}_{i} \sim \mathcal{CN}(0, 1)
  15134. This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary
  15135. :math:`(\operatorname{Im})` part of :math:`\text{out}_i` as
  15136. .. math::
  15137. \operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad
  15138. \operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2})
  15139. The shape of the tensor is defined by the variable argument :attr:`size`.
  15140. Args:
  15141. size (int...): a sequence of integers defining the shape of the output tensor.
  15142. Can be a variable number of arguments or a collection like a list or tuple.
  15143. Keyword args:
  15144. generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
  15145. out (Tensor, optional): the output tensor.
  15146. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  15147. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  15148. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  15149. Default: ``torch.strided``.
  15150. device (:class:`torch.device`, optional): the desired device of returned tensor.
  15151. Default: if ``None``, uses the current device for the default tensor type
  15152. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  15153. for CPU tensor types and the current CUDA device for CUDA tensor types.
  15154. requires_grad (bool, optional): If autograd should record operations on the
  15155. returned tensor. Default: ``False``.
  15156. pin_memory (bool, optional): If set, returned tensor would be allocated in
  15157. the pinned memory. Works only for CPU tensors. Default: ``False``.
  15158. Example::
  15159. >>> torch.randn(4)
  15160. tensor([-2.1436, 0.9966, 2.3426, -0.6366])
  15161. >>> torch.randn(2, 3)
  15162. tensor([[ 1.5954, 2.8929, -1.0923],
  15163. [ 1.1719, -0.4709, -0.1996]])
  15164. .. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution
  15165. """
  15166. ...
  15167. @overload
  15168. def randn(size: Sequence[Union[_int, SymInt]], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  15169. r"""
  15170. randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
  15171. Returns a tensor filled with random numbers from a normal distribution
  15172. with mean `0` and variance `1` (also called the standard normal
  15173. distribution).
  15174. .. math::
  15175. \text{out}_{i} \sim \mathcal{N}(0, 1)
  15176. For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and
  15177. unit variance as
  15178. .. math::
  15179. \text{out}_{i} \sim \mathcal{CN}(0, 1)
  15180. This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary
  15181. :math:`(\operatorname{Im})` part of :math:`\text{out}_i` as
  15182. .. math::
  15183. \operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad
  15184. \operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2})
  15185. The shape of the tensor is defined by the variable argument :attr:`size`.
  15186. Args:
  15187. size (int...): a sequence of integers defining the shape of the output tensor.
  15188. Can be a variable number of arguments or a collection like a list or tuple.
  15189. Keyword args:
  15190. generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
  15191. out (Tensor, optional): the output tensor.
  15192. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  15193. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  15194. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  15195. Default: ``torch.strided``.
  15196. device (:class:`torch.device`, optional): the desired device of returned tensor.
  15197. Default: if ``None``, uses the current device for the default tensor type
  15198. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  15199. for CPU tensor types and the current CUDA device for CUDA tensor types.
  15200. requires_grad (bool, optional): If autograd should record operations on the
  15201. returned tensor. Default: ``False``.
  15202. pin_memory (bool, optional): If set, returned tensor would be allocated in
  15203. the pinned memory. Works only for CPU tensors. Default: ``False``.
  15204. Example::
  15205. >>> torch.randn(4)
  15206. tensor([-2.1436, 0.9966, 2.3426, -0.6366])
  15207. >>> torch.randn(2, 3)
  15208. tensor([[ 1.5954, 2.8929, -1.0923],
  15209. [ 1.1719, -0.4709, -0.1996]])
  15210. .. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution
  15211. """
  15212. ...
  15213. @overload
  15214. def randn(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  15215. r"""
  15216. randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
  15217. Returns a tensor filled with random numbers from a normal distribution
  15218. with mean `0` and variance `1` (also called the standard normal
  15219. distribution).
  15220. .. math::
  15221. \text{out}_{i} \sim \mathcal{N}(0, 1)
  15222. For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and
  15223. unit variance as
  15224. .. math::
  15225. \text{out}_{i} \sim \mathcal{CN}(0, 1)
  15226. This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary
  15227. :math:`(\operatorname{Im})` part of :math:`\text{out}_i` as
  15228. .. math::
  15229. \operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad
  15230. \operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2})
  15231. The shape of the tensor is defined by the variable argument :attr:`size`.
  15232. Args:
  15233. size (int...): a sequence of integers defining the shape of the output tensor.
  15234. Can be a variable number of arguments or a collection like a list or tuple.
  15235. Keyword args:
  15236. generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
  15237. out (Tensor, optional): the output tensor.
  15238. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  15239. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  15240. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  15241. Default: ``torch.strided``.
  15242. device (:class:`torch.device`, optional): the desired device of returned tensor.
  15243. Default: if ``None``, uses the current device for the default tensor type
  15244. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  15245. for CPU tensor types and the current CUDA device for CUDA tensor types.
  15246. requires_grad (bool, optional): If autograd should record operations on the
  15247. returned tensor. Default: ``False``.
  15248. pin_memory (bool, optional): If set, returned tensor would be allocated in
  15249. the pinned memory. Works only for CPU tensors. Default: ``False``.
  15250. Example::
  15251. >>> torch.randn(4)
  15252. tensor([-2.1436, 0.9966, 2.3426, -0.6366])
  15253. >>> torch.randn(2, 3)
  15254. tensor([[ 1.5954, 2.8929, -1.0923],
  15255. [ 1.1719, -0.4709, -0.1996]])
  15256. .. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution
  15257. """
  15258. ...
  15259. def randn_like(input: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  15260. r"""
  15261. randn_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
  15262. Returns a tensor with the same size as :attr:`input` that is filled with
  15263. random numbers from a normal distribution with mean 0 and variance 1. Please refer to :func:`torch.randn` for the
  15264. sampling process of complex dtypes. ``torch.randn_like(input)`` is equivalent to
  15265. ``torch.randn(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
  15266. Args:
  15267. input (Tensor): the size of :attr:`input` will determine size of the output tensor.
  15268. Keyword args:
  15269. dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
  15270. Default: if ``None``, defaults to the dtype of :attr:`input`.
  15271. layout (:class:`torch.layout`, optional): the desired layout of returned tensor.
  15272. Default: if ``None``, defaults to the layout of :attr:`input`.
  15273. device (:class:`torch.device`, optional): the desired device of returned tensor.
  15274. Default: if ``None``, defaults to the device of :attr:`input`.
  15275. requires_grad (bool, optional): If autograd should record operations on the
  15276. returned tensor. Default: ``False``.
  15277. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  15278. returned Tensor. Default: ``torch.preserve_format``.
  15279. """
  15280. ...
  15281. @overload
  15282. def randperm(n: Union[_int, SymInt], *, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  15283. r"""
  15284. randperm(n, *, generator=None, out=None, dtype=torch.int64,layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
  15285. Returns a random permutation of integers from ``0`` to ``n - 1``.
  15286. Args:
  15287. n (int): the upper bound (exclusive)
  15288. Keyword args:
  15289. generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
  15290. out (Tensor, optional): the output tensor.
  15291. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  15292. Default: ``torch.int64``.
  15293. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  15294. Default: ``torch.strided``.
  15295. device (:class:`torch.device`, optional): the desired device of returned tensor.
  15296. Default: if ``None``, uses the current device for the default tensor type
  15297. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  15298. for CPU tensor types and the current CUDA device for CUDA tensor types.
  15299. requires_grad (bool, optional): If autograd should record operations on the
  15300. returned tensor. Default: ``False``.
  15301. pin_memory (bool, optional): If set, returned tensor would be allocated in
  15302. the pinned memory. Works only for CPU tensors. Default: ``False``.
  15303. Example::
  15304. >>> torch.randperm(4)
  15305. tensor([2, 1, 0, 3])
  15306. """
  15307. ...
  15308. @overload
  15309. def randperm(n: Union[_int, SymInt], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  15310. r"""
  15311. randperm(n, *, generator=None, out=None, dtype=torch.int64,layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
  15312. Returns a random permutation of integers from ``0`` to ``n - 1``.
  15313. Args:
  15314. n (int): the upper bound (exclusive)
  15315. Keyword args:
  15316. generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
  15317. out (Tensor, optional): the output tensor.
  15318. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  15319. Default: ``torch.int64``.
  15320. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  15321. Default: ``torch.strided``.
  15322. device (:class:`torch.device`, optional): the desired device of returned tensor.
  15323. Default: if ``None``, uses the current device for the default tensor type
  15324. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  15325. for CPU tensor types and the current CUDA device for CUDA tensor types.
  15326. requires_grad (bool, optional): If autograd should record operations on the
  15327. returned tensor. Default: ``False``.
  15328. pin_memory (bool, optional): If set, returned tensor would be allocated in
  15329. the pinned memory. Works only for CPU tensors. Default: ``False``.
  15330. Example::
  15331. >>> torch.randperm(4)
  15332. tensor([2, 1, 0, 3])
  15333. """
  15334. ...
  15335. def range(start: Number, end: Number, step: Number = 1, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
  15336. r"""
  15337. range(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  15338. Returns a 1-D tensor of size :math:`\left\lfloor \frac{\text{end} - \text{start}}{\text{step}} \right\rfloor + 1`
  15339. with values from :attr:`start` to :attr:`end` with step :attr:`step`. Step is
  15340. the gap between two values in the tensor.
  15341. .. math::
  15342. \text{out}_{i+1} = \text{out}_i + \text{step}.
  15343. .. warning::
  15344. This function is deprecated and will be removed in a future release because its behavior is inconsistent with
  15345. Python's range builtin. Instead, use :func:`torch.arange`, which produces values in [start, end).
  15346. Args:
  15347. start (float): the starting value for the set of points. Default: ``0``.
  15348. end (float): the ending value for the set of points
  15349. step (float): the gap between each pair of adjacent points. Default: ``1``.
  15350. Keyword args:
  15351. out (Tensor, optional): the output tensor.
  15352. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  15353. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). If `dtype` is not given, infer the data type from the other input
  15354. arguments. If any of `start`, `end`, or `stop` are floating-point, the
  15355. `dtype` is inferred to be the default dtype, see
  15356. :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
  15357. be `torch.int64`.
  15358. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  15359. Default: ``torch.strided``.
  15360. device (:class:`torch.device`, optional): the desired device of returned tensor.
  15361. Default: if ``None``, uses the current device for the default tensor type
  15362. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  15363. for CPU tensor types and the current CUDA device for CUDA tensor types.
  15364. requires_grad (bool, optional): If autograd should record operations on the
  15365. returned tensor. Default: ``False``.
  15366. Example::
  15367. >>> torch.range(1, 4)
  15368. tensor([ 1., 2., 3., 4.])
  15369. >>> torch.range(1, 4, 0.5)
  15370. tensor([ 1.0000, 1.5000, 2.0000, 2.5000, 3.0000, 3.5000, 4.0000])
  15371. """
  15372. ...
  15373. def ravel(input: Tensor) -> Tensor:
  15374. r"""
  15375. ravel(input) -> Tensor
  15376. Return a contiguous flattened tensor. A copy is made only if needed.
  15377. Args:
  15378. input (Tensor): the input tensor.
  15379. Example::
  15380. >>> t = torch.tensor([[[1, 2],
  15381. ... [3, 4]],
  15382. ... [[5, 6],
  15383. ... [7, 8]]])
  15384. >>> torch.ravel(t)
  15385. tensor([1, 2, 3, 4, 5, 6, 7, 8])
  15386. """
  15387. ...
  15388. def real(input: Tensor) -> Tensor:
  15389. r"""
  15390. real(input) -> Tensor
  15391. Returns a new tensor containing real values of the :attr:`self` tensor.
  15392. The returned tensor and :attr:`self` share the same underlying storage.
  15393. Args:
  15394. input (Tensor): the input tensor.
  15395. Example::
  15396. >>> x=torch.randn(4, dtype=torch.cfloat)
  15397. >>> x
  15398. tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
  15399. >>> x.real
  15400. tensor([ 0.3100, -0.5445, -1.6492, -0.0638])
  15401. """
  15402. ...
  15403. def reciprocal(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  15404. r"""
  15405. reciprocal(input, *, out=None) -> Tensor
  15406. Returns a new tensor with the reciprocal of the elements of :attr:`input`
  15407. .. math::
  15408. \text{out}_{i} = \frac{1}{\text{input}_{i}}
  15409. .. note::
  15410. Unlike NumPy's reciprocal, torch.reciprocal supports integral inputs. Integral
  15411. inputs to reciprocal are automatically :ref:`promoted <type-promotion-doc>` to
  15412. the default scalar type.
  15413. Args:
  15414. input (Tensor): the input tensor.
  15415. Keyword args:
  15416. out (Tensor, optional): the output tensor.
  15417. Example::
  15418. >>> a = torch.randn(4)
  15419. >>> a
  15420. tensor([-0.4595, -2.1219, -1.4314, 0.7298])
  15421. >>> torch.reciprocal(a)
  15422. tensor([-2.1763, -0.4713, -0.6986, 1.3702])
  15423. """
  15424. ...
  15425. def reciprocal_(input: Tensor) -> Tensor: ...
  15426. def relu(input: Tensor) -> Tensor: ...
  15427. def relu_(input: Tensor) -> Tensor: ...
  15428. @overload
  15429. def remainder(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  15430. r"""
  15431. remainder(input, other, *, out=None) -> Tensor
  15432. Computes
  15433. `Python's modulus operation <https://docs.python.org/3/reference/expressions.html#binary-arithmetic-operations>`_
  15434. entrywise. The result has the same sign as the divisor :attr:`other` and its absolute value
  15435. is less than that of :attr:`other`.
  15436. It may also be defined in terms of :func:`torch.div` as
  15437. .. code:: python
  15438. torch.remainder(a, b) == a - a.div(b, rounding_mode="floor") * b
  15439. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
  15440. :ref:`type promotion <type-promotion-doc>`, and integer and float inputs.
  15441. .. note::
  15442. Complex inputs are not supported. In some cases, it is not mathematically
  15443. possible to satisfy the definition of a modulo operation with complex numbers.
  15444. See :func:`torch.fmod` for how division by zero is handled.
  15445. .. seealso::
  15446. :func:`torch.fmod` which implements C++'s `std::fmod <https://en.cppreference.com/w/cpp/numeric/math/fmod>`_.
  15447. This one is defined in terms of division rounding towards zero.
  15448. Args:
  15449. input (Tensor or Scalar): the dividend
  15450. other (Tensor or Scalar): the divisor
  15451. Keyword args:
  15452. out (Tensor, optional): the output tensor.
  15453. Example::
  15454. >>> torch.remainder(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
  15455. tensor([ 1., 0., 1., 1., 0., 1.])
  15456. >>> torch.remainder(torch.tensor([1, 2, 3, 4, 5]), -1.5)
  15457. tensor([ -0.5000, -1.0000, 0.0000, -0.5000, -1.0000 ])
  15458. """
  15459. ...
  15460. @overload
  15461. def remainder(self: Union[Number, _complex], other: Tensor) -> Tensor:
  15462. r"""
  15463. remainder(input, other, *, out=None) -> Tensor
  15464. Computes
  15465. `Python's modulus operation <https://docs.python.org/3/reference/expressions.html#binary-arithmetic-operations>`_
  15466. entrywise. The result has the same sign as the divisor :attr:`other` and its absolute value
  15467. is less than that of :attr:`other`.
  15468. It may also be defined in terms of :func:`torch.div` as
  15469. .. code:: python
  15470. torch.remainder(a, b) == a - a.div(b, rounding_mode="floor") * b
  15471. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
  15472. :ref:`type promotion <type-promotion-doc>`, and integer and float inputs.
  15473. .. note::
  15474. Complex inputs are not supported. In some cases, it is not mathematically
  15475. possible to satisfy the definition of a modulo operation with complex numbers.
  15476. See :func:`torch.fmod` for how division by zero is handled.
  15477. .. seealso::
  15478. :func:`torch.fmod` which implements C++'s `std::fmod <https://en.cppreference.com/w/cpp/numeric/math/fmod>`_.
  15479. This one is defined in terms of division rounding towards zero.
  15480. Args:
  15481. input (Tensor or Scalar): the dividend
  15482. other (Tensor or Scalar): the divisor
  15483. Keyword args:
  15484. out (Tensor, optional): the output tensor.
  15485. Example::
  15486. >>> torch.remainder(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
  15487. tensor([ 1., 0., 1., 1., 0., 1.])
  15488. >>> torch.remainder(torch.tensor([1, 2, 3, 4, 5]), -1.5)
  15489. tensor([ -0.5000, -1.0000, 0.0000, -0.5000, -1.0000 ])
  15490. """
  15491. ...
  15492. @overload
  15493. def remainder(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
  15494. r"""
  15495. remainder(input, other, *, out=None) -> Tensor
  15496. Computes
  15497. `Python's modulus operation <https://docs.python.org/3/reference/expressions.html#binary-arithmetic-operations>`_
  15498. entrywise. The result has the same sign as the divisor :attr:`other` and its absolute value
  15499. is less than that of :attr:`other`.
  15500. It may also be defined in terms of :func:`torch.div` as
  15501. .. code:: python
  15502. torch.remainder(a, b) == a - a.div(b, rounding_mode="floor") * b
  15503. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
  15504. :ref:`type promotion <type-promotion-doc>`, and integer and float inputs.
  15505. .. note::
  15506. Complex inputs are not supported. In some cases, it is not mathematically
  15507. possible to satisfy the definition of a modulo operation with complex numbers.
  15508. See :func:`torch.fmod` for how division by zero is handled.
  15509. .. seealso::
  15510. :func:`torch.fmod` which implements C++'s `std::fmod <https://en.cppreference.com/w/cpp/numeric/math/fmod>`_.
  15511. This one is defined in terms of division rounding towards zero.
  15512. Args:
  15513. input (Tensor or Scalar): the dividend
  15514. other (Tensor or Scalar): the divisor
  15515. Keyword args:
  15516. out (Tensor, optional): the output tensor.
  15517. Example::
  15518. >>> torch.remainder(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
  15519. tensor([ 1., 0., 1., 1., 0., 1.])
  15520. >>> torch.remainder(torch.tensor([1, 2, 3, 4, 5]), -1.5)
  15521. tensor([ -0.5000, -1.0000, 0.0000, -0.5000, -1.0000 ])
  15522. """
  15523. ...
  15524. def renorm(input: Tensor, p: Union[Number, _complex], dim: _int, maxnorm: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
  15525. r"""
  15526. renorm(input, p, dim, maxnorm, *, out=None) -> Tensor
  15527. Returns a tensor where each sub-tensor of :attr:`input` along dimension
  15528. :attr:`dim` is normalized such that the `p`-norm of the sub-tensor is lower
  15529. than the value :attr:`maxnorm`
  15530. .. note:: If the norm of a row is lower than `maxnorm`, the row is unchanged
  15531. Args:
  15532. input (Tensor): the input tensor.
  15533. p (float): the power for the norm computation
  15534. dim (int): the dimension to slice over to get the sub-tensors
  15535. maxnorm (float): the maximum norm to keep each sub-tensor under
  15536. Keyword args:
  15537. out (Tensor, optional): the output tensor.
  15538. Example::
  15539. >>> x = torch.ones(3, 3)
  15540. >>> x[1].fill_(2)
  15541. tensor([ 2., 2., 2.])
  15542. >>> x[2].fill_(3)
  15543. tensor([ 3., 3., 3.])
  15544. >>> x
  15545. tensor([[ 1., 1., 1.],
  15546. [ 2., 2., 2.],
  15547. [ 3., 3., 3.]])
  15548. >>> torch.renorm(x, 1, 0, 5)
  15549. tensor([[ 1.0000, 1.0000, 1.0000],
  15550. [ 1.6667, 1.6667, 1.6667],
  15551. [ 1.6667, 1.6667, 1.6667]])
  15552. """
  15553. ...
  15554. @overload
  15555. def repeat_interleave(input: Tensor, repeats: Tensor, dim: Optional[_int] = None, *, output_size: Optional[Union[_int, SymInt]] = None) -> Tensor:
  15556. r"""
  15557. repeat_interleave(input, repeats, dim=None, *, output_size=None) -> Tensor
  15558. Repeat elements of a tensor.
  15559. .. warning::
  15560. This is different from :meth:`torch.Tensor.repeat` but similar to ``numpy.repeat``.
  15561. Args:
  15562. input (Tensor): the input tensor.
  15563. repeats (Tensor or int): The number of repetitions for each element.
  15564. repeats is broadcasted to fit the shape of the given axis.
  15565. dim (int, optional): The dimension along which to repeat values.
  15566. By default, use the flattened input array, and return a flat output
  15567. array.
  15568. Keyword args:
  15569. output_size (int, optional): Total output size for the given axis
  15570. ( e.g. sum of repeats). If given, it will avoid stream synchronization
  15571. needed to calculate output shape of the tensor.
  15572. Returns:
  15573. Tensor: Repeated tensor which has the same shape as input, except along the given axis.
  15574. Example::
  15575. >>> x = torch.tensor([1, 2, 3])
  15576. >>> x.repeat_interleave(2)
  15577. tensor([1, 1, 2, 2, 3, 3])
  15578. >>> y = torch.tensor([[1, 2], [3, 4]])
  15579. >>> torch.repeat_interleave(y, 2)
  15580. tensor([1, 1, 2, 2, 3, 3, 4, 4])
  15581. >>> torch.repeat_interleave(y, 3, dim=1)
  15582. tensor([[1, 1, 1, 2, 2, 2],
  15583. [3, 3, 3, 4, 4, 4]])
  15584. >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0)
  15585. tensor([[1, 2],
  15586. [3, 4],
  15587. [3, 4]])
  15588. >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0, output_size=3)
  15589. tensor([[1, 2],
  15590. [3, 4],
  15591. [3, 4]])
  15592. If the `repeats` is `tensor([n1, n2, n3, ...])`, then the output will be
  15593. `tensor([0, 0, ..., 1, 1, ..., 2, 2, ..., ...])` where `0` appears `n1` times,
  15594. `1` appears `n2` times, `2` appears `n3` times, etc.
  15595. .. function:: repeat_interleave(repeats, *) -> Tensor
  15596. :noindex:
  15597. Repeats 0 repeats[0] times, 1 repeats[1] times, 2 repeats[2] times, etc.
  15598. Args:
  15599. repeats (Tensor): The number of repetitions for each element.
  15600. Returns:
  15601. Tensor: Repeated tensor of size `sum(repeats)`.
  15602. Example::
  15603. >>> torch.repeat_interleave(torch.tensor([1, 2, 3]))
  15604. tensor([0, 1, 1, 2, 2, 2])
  15605. """
  15606. ...
  15607. @overload
  15608. def repeat_interleave(repeats: Tensor, *, output_size: Optional[Union[_int, SymInt]] = None) -> Tensor:
  15609. r"""
  15610. repeat_interleave(input, repeats, dim=None, *, output_size=None) -> Tensor
  15611. Repeat elements of a tensor.
  15612. .. warning::
  15613. This is different from :meth:`torch.Tensor.repeat` but similar to ``numpy.repeat``.
  15614. Args:
  15615. input (Tensor): the input tensor.
  15616. repeats (Tensor or int): The number of repetitions for each element.
  15617. repeats is broadcasted to fit the shape of the given axis.
  15618. dim (int, optional): The dimension along which to repeat values.
  15619. By default, use the flattened input array, and return a flat output
  15620. array.
  15621. Keyword args:
  15622. output_size (int, optional): Total output size for the given axis
  15623. ( e.g. sum of repeats). If given, it will avoid stream synchronization
  15624. needed to calculate output shape of the tensor.
  15625. Returns:
  15626. Tensor: Repeated tensor which has the same shape as input, except along the given axis.
  15627. Example::
  15628. >>> x = torch.tensor([1, 2, 3])
  15629. >>> x.repeat_interleave(2)
  15630. tensor([1, 1, 2, 2, 3, 3])
  15631. >>> y = torch.tensor([[1, 2], [3, 4]])
  15632. >>> torch.repeat_interleave(y, 2)
  15633. tensor([1, 1, 2, 2, 3, 3, 4, 4])
  15634. >>> torch.repeat_interleave(y, 3, dim=1)
  15635. tensor([[1, 1, 1, 2, 2, 2],
  15636. [3, 3, 3, 4, 4, 4]])
  15637. >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0)
  15638. tensor([[1, 2],
  15639. [3, 4],
  15640. [3, 4]])
  15641. >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0, output_size=3)
  15642. tensor([[1, 2],
  15643. [3, 4],
  15644. [3, 4]])
  15645. If the `repeats` is `tensor([n1, n2, n3, ...])`, then the output will be
  15646. `tensor([0, 0, ..., 1, 1, ..., 2, 2, ..., ...])` where `0` appears `n1` times,
  15647. `1` appears `n2` times, `2` appears `n3` times, etc.
  15648. .. function:: repeat_interleave(repeats, *) -> Tensor
  15649. :noindex:
  15650. Repeats 0 repeats[0] times, 1 repeats[1] times, 2 repeats[2] times, etc.
  15651. Args:
  15652. repeats (Tensor): The number of repetitions for each element.
  15653. Returns:
  15654. Tensor: Repeated tensor of size `sum(repeats)`.
  15655. Example::
  15656. >>> torch.repeat_interleave(torch.tensor([1, 2, 3]))
  15657. tensor([0, 1, 1, 2, 2, 2])
  15658. """
  15659. ...
  15660. @overload
  15661. def repeat_interleave(input: Tensor, repeats: Union[_int, SymInt], dim: Optional[_int] = None, *, output_size: Optional[Union[_int, SymInt]] = None) -> Tensor:
  15662. r"""
  15663. repeat_interleave(input, repeats, dim=None, *, output_size=None) -> Tensor
  15664. Repeat elements of a tensor.
  15665. .. warning::
  15666. This is different from :meth:`torch.Tensor.repeat` but similar to ``numpy.repeat``.
  15667. Args:
  15668. input (Tensor): the input tensor.
  15669. repeats (Tensor or int): The number of repetitions for each element.
  15670. repeats is broadcasted to fit the shape of the given axis.
  15671. dim (int, optional): The dimension along which to repeat values.
  15672. By default, use the flattened input array, and return a flat output
  15673. array.
  15674. Keyword args:
  15675. output_size (int, optional): Total output size for the given axis
  15676. ( e.g. sum of repeats). If given, it will avoid stream synchronization
  15677. needed to calculate output shape of the tensor.
  15678. Returns:
  15679. Tensor: Repeated tensor which has the same shape as input, except along the given axis.
  15680. Example::
  15681. >>> x = torch.tensor([1, 2, 3])
  15682. >>> x.repeat_interleave(2)
  15683. tensor([1, 1, 2, 2, 3, 3])
  15684. >>> y = torch.tensor([[1, 2], [3, 4]])
  15685. >>> torch.repeat_interleave(y, 2)
  15686. tensor([1, 1, 2, 2, 3, 3, 4, 4])
  15687. >>> torch.repeat_interleave(y, 3, dim=1)
  15688. tensor([[1, 1, 1, 2, 2, 2],
  15689. [3, 3, 3, 4, 4, 4]])
  15690. >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0)
  15691. tensor([[1, 2],
  15692. [3, 4],
  15693. [3, 4]])
  15694. >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0, output_size=3)
  15695. tensor([[1, 2],
  15696. [3, 4],
  15697. [3, 4]])
  15698. If the `repeats` is `tensor([n1, n2, n3, ...])`, then the output will be
  15699. `tensor([0, 0, ..., 1, 1, ..., 2, 2, ..., ...])` where `0` appears `n1` times,
  15700. `1` appears `n2` times, `2` appears `n3` times, etc.
  15701. .. function:: repeat_interleave(repeats, *) -> Tensor
  15702. :noindex:
  15703. Repeats 0 repeats[0] times, 1 repeats[1] times, 2 repeats[2] times, etc.
  15704. Args:
  15705. repeats (Tensor): The number of repetitions for each element.
  15706. Returns:
  15707. Tensor: Repeated tensor of size `sum(repeats)`.
  15708. Example::
  15709. >>> torch.repeat_interleave(torch.tensor([1, 2, 3]))
  15710. tensor([0, 1, 1, 2, 2, 2])
  15711. """
  15712. ...
  15713. def reshape(input: Tensor, shape: Sequence[Union[_int, SymInt]]) -> Tensor:
  15714. r"""
  15715. reshape(input, shape) -> Tensor
  15716. Returns a tensor with the same data and number of elements as :attr:`input`,
  15717. but with the specified shape. When possible, the returned tensor will be a view
  15718. of :attr:`input`. Otherwise, it will be a copy. Contiguous inputs and inputs
  15719. with compatible strides can be reshaped without copying, but you should not
  15720. depend on the copying vs. viewing behavior.
  15721. See :meth:`torch.Tensor.view` on when it is possible to return a view.
  15722. A single dimension may be -1, in which case it's inferred from the remaining
  15723. dimensions and the number of elements in :attr:`input`.
  15724. Args:
  15725. input (Tensor): the tensor to be reshaped
  15726. shape (tuple of int): the new shape
  15727. Example::
  15728. >>> a = torch.arange(4.)
  15729. >>> torch.reshape(a, (2, 2))
  15730. tensor([[ 0., 1.],
  15731. [ 2., 3.]])
  15732. >>> b = torch.tensor([[0, 1], [2, 3]])
  15733. >>> torch.reshape(b, (-1,))
  15734. tensor([ 0, 1, 2, 3])
  15735. """
  15736. ...
  15737. def resize_as_(input: Tensor, the_template: Tensor, *, memory_format: Optional[memory_format] = None) -> Tensor: ...
  15738. def resize_as_sparse_(input: Tensor, the_template: Tensor) -> Tensor: ...
  15739. def resolve_conj(input: Tensor) -> Tensor:
  15740. r"""
  15741. resolve_conj(input) -> Tensor
  15742. Returns a new tensor with materialized conjugation if :attr:`input`'s conjugate bit is set to `True`,
  15743. else returns :attr:`input`. The output tensor will always have its conjugate bit set to `False`.
  15744. Args:
  15745. input (Tensor): the input tensor.
  15746. Example::
  15747. >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])
  15748. >>> y = x.conj()
  15749. >>> y.is_conj()
  15750. True
  15751. >>> z = y.resolve_conj()
  15752. >>> z
  15753. tensor([-1 - 1j, -2 - 2j, 3 + 3j])
  15754. >>> z.is_conj()
  15755. False
  15756. """
  15757. ...
  15758. def resolve_neg(input: Tensor) -> Tensor:
  15759. r"""
  15760. resolve_neg(input) -> Tensor
  15761. Returns a new tensor with materialized negation if :attr:`input`'s negative bit is set to `True`,
  15762. else returns :attr:`input`. The output tensor will always have its negative bit set to `False`.
  15763. Args:
  15764. input (Tensor): the input tensor.
  15765. Example::
  15766. >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])
  15767. >>> y = x.conj()
  15768. >>> z = y.imag
  15769. >>> z.is_neg()
  15770. True
  15771. >>> out = z.resolve_neg()
  15772. >>> out
  15773. tensor([-1., -2., 3.])
  15774. >>> out.is_neg()
  15775. False
  15776. """
  15777. ...
  15778. @overload
  15779. def result_type(tensor: Tensor, other: Tensor) -> _dtype:
  15780. r"""
  15781. result_type(tensor1, tensor2) -> dtype
  15782. Returns the :class:`torch.dtype` that would result from performing an arithmetic
  15783. operation on the provided input tensors. See type promotion :ref:`documentation <type-promotion-doc>`
  15784. for more information on the type promotion logic.
  15785. Args:
  15786. tensor1 (Tensor or Number): an input tensor or number
  15787. tensor2 (Tensor or Number): an input tensor or number
  15788. Example::
  15789. >>> torch.result_type(torch.tensor([1, 2], dtype=torch.int), 1.0)
  15790. torch.float32
  15791. >>> torch.result_type(torch.tensor([1, 2], dtype=torch.uint8), torch.tensor(1))
  15792. torch.uint8
  15793. """
  15794. ...
  15795. @overload
  15796. def result_type(scalar: Union[Number, _complex], tensor: Tensor) -> _dtype:
  15797. r"""
  15798. result_type(tensor1, tensor2) -> dtype
  15799. Returns the :class:`torch.dtype` that would result from performing an arithmetic
  15800. operation on the provided input tensors. See type promotion :ref:`documentation <type-promotion-doc>`
  15801. for more information on the type promotion logic.
  15802. Args:
  15803. tensor1 (Tensor or Number): an input tensor or number
  15804. tensor2 (Tensor or Number): an input tensor or number
  15805. Example::
  15806. >>> torch.result_type(torch.tensor([1, 2], dtype=torch.int), 1.0)
  15807. torch.float32
  15808. >>> torch.result_type(torch.tensor([1, 2], dtype=torch.uint8), torch.tensor(1))
  15809. torch.uint8
  15810. """
  15811. ...
  15812. @overload
  15813. def result_type(tensor: Tensor, other: Union[Number, _complex]) -> _dtype:
  15814. r"""
  15815. result_type(tensor1, tensor2) -> dtype
  15816. Returns the :class:`torch.dtype` that would result from performing an arithmetic
  15817. operation on the provided input tensors. See type promotion :ref:`documentation <type-promotion-doc>`
  15818. for more information on the type promotion logic.
  15819. Args:
  15820. tensor1 (Tensor or Number): an input tensor or number
  15821. tensor2 (Tensor or Number): an input tensor or number
  15822. Example::
  15823. >>> torch.result_type(torch.tensor([1, 2], dtype=torch.int), 1.0)
  15824. torch.float32
  15825. >>> torch.result_type(torch.tensor([1, 2], dtype=torch.uint8), torch.tensor(1))
  15826. torch.uint8
  15827. """
  15828. ...
  15829. @overload
  15830. def result_type(scalar1: Union[Number, _complex], scalar2: Union[Number, _complex]) -> _dtype:
  15831. r"""
  15832. result_type(tensor1, tensor2) -> dtype
  15833. Returns the :class:`torch.dtype` that would result from performing an arithmetic
  15834. operation on the provided input tensors. See type promotion :ref:`documentation <type-promotion-doc>`
  15835. for more information on the type promotion logic.
  15836. Args:
  15837. tensor1 (Tensor or Number): an input tensor or number
  15838. tensor2 (Tensor or Number): an input tensor or number
  15839. Example::
  15840. >>> torch.result_type(torch.tensor([1, 2], dtype=torch.int), 1.0)
  15841. torch.float32
  15842. >>> torch.result_type(torch.tensor([1, 2], dtype=torch.uint8), torch.tensor(1))
  15843. torch.uint8
  15844. """
  15845. ...
  15846. def rms_norm(input: Tensor, normalized_shape: _size, weight: Optional[Tensor] = None, eps: Optional[_float] = None) -> Tensor: ...
  15847. @overload
  15848. def rnn_relu(data: Tensor, batch_sizes: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor]: ...
  15849. @overload
  15850. def rnn_relu(input: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor]: ...
  15851. def rnn_relu_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor] = None, b_hh: Optional[Tensor] = None) -> Tensor: ...
  15852. @overload
  15853. def rnn_tanh(data: Tensor, batch_sizes: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor]: ...
  15854. @overload
  15855. def rnn_tanh(input: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor]: ...
  15856. def rnn_tanh_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor] = None, b_hh: Optional[Tensor] = None) -> Tensor: ...
  15857. def roll(input: Tensor, shifts: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]], dims: Union[_int, _size] = ()) -> Tensor:
  15858. r"""
  15859. roll(input, shifts, dims=None) -> Tensor
  15860. Roll the tensor :attr:`input` along the given dimension(s). Elements that are
  15861. shifted beyond the last position are re-introduced at the first position. If
  15862. :attr:`dims` is `None`, the tensor will be flattened before rolling and then
  15863. restored to the original shape.
  15864. Args:
  15865. input (Tensor): the input tensor.
  15866. shifts (int or tuple of ints): The number of places by which the elements
  15867. of the tensor are shifted. If shifts is a tuple, dims must be a tuple of
  15868. the same size, and each dimension will be rolled by the corresponding
  15869. value
  15870. dims (int or tuple of ints): Axis along which to roll
  15871. Example::
  15872. >>> x = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8]).view(4, 2)
  15873. >>> x
  15874. tensor([[1, 2],
  15875. [3, 4],
  15876. [5, 6],
  15877. [7, 8]])
  15878. >>> torch.roll(x, 1)
  15879. tensor([[8, 1],
  15880. [2, 3],
  15881. [4, 5],
  15882. [6, 7]])
  15883. >>> torch.roll(x, 1, 0)
  15884. tensor([[7, 8],
  15885. [1, 2],
  15886. [3, 4],
  15887. [5, 6]])
  15888. >>> torch.roll(x, -1, 0)
  15889. tensor([[3, 4],
  15890. [5, 6],
  15891. [7, 8],
  15892. [1, 2]])
  15893. >>> torch.roll(x, shifts=(2, 1), dims=(0, 1))
  15894. tensor([[6, 5],
  15895. [8, 7],
  15896. [2, 1],
  15897. [4, 3]])
  15898. """
  15899. ...
  15900. def rot90(input: Tensor, k: _int = 1, dims: _size = (0,1)) -> Tensor:
  15901. r"""
  15902. rot90(input, k=1, dims=[0,1]) -> Tensor
  15903. Rotate an n-D tensor by 90 degrees in the plane specified by dims axis.
  15904. Rotation direction is from the first towards the second axis if k > 0, and from the second towards the first for k < 0.
  15905. Args:
  15906. input (Tensor): the input tensor.
  15907. k (int): number of times to rotate. Default value is 1
  15908. dims (a list or tuple): axis to rotate. Default value is [0, 1]
  15909. Example::
  15910. >>> x = torch.arange(4).view(2, 2)
  15911. >>> x
  15912. tensor([[0, 1],
  15913. [2, 3]])
  15914. >>> torch.rot90(x, 1, [0, 1])
  15915. tensor([[1, 3],
  15916. [0, 2]])
  15917. >>> x = torch.arange(8).view(2, 2, 2)
  15918. >>> x
  15919. tensor([[[0, 1],
  15920. [2, 3]],
  15921. [[4, 5],
  15922. [6, 7]]])
  15923. >>> torch.rot90(x, 1, [1, 2])
  15924. tensor([[[1, 3],
  15925. [0, 2]],
  15926. [[5, 7],
  15927. [4, 6]]])
  15928. """
  15929. ...
  15930. @overload
  15931. def round(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  15932. r"""
  15933. round(input, *, decimals=0, out=None) -> Tensor
  15934. Rounds elements of :attr:`input` to the nearest integer.
  15935. For integer inputs, follows the array-api convention of returning a
  15936. copy of the input tensor.
  15937. The return type of output is same as that of input's dtype.
  15938. .. note::
  15939. This function implements the "round half to even" to
  15940. break ties when a number is equidistant from two
  15941. integers (e.g. `round(2.5)` is 2).
  15942. When the :attr:\`decimals\` argument is specified the
  15943. algorithm used is similar to NumPy's `around`. This
  15944. algorithm is fast but inexact and it can easily
  15945. overflow for low precision dtypes.
  15946. Eg. `round(tensor([10000], dtype=torch.float16), decimals=3)` is `inf`.
  15947. .. seealso::
  15948. :func:`torch.ceil`, which rounds up.
  15949. :func:`torch.floor`, which rounds down.
  15950. :func:`torch.trunc`, which rounds towards zero.
  15951. Args:
  15952. input (Tensor): the input tensor.
  15953. decimals (int): Number of decimal places to round to (default: 0).
  15954. If decimals is negative, it specifies the number of positions
  15955. to the left of the decimal point.
  15956. Keyword args:
  15957. out (Tensor, optional): the output tensor.
  15958. Example::
  15959. >>> torch.round(torch.tensor((4.7, -2.3, 9.1, -7.7)))
  15960. tensor([ 5., -2., 9., -8.])
  15961. >>> # Values equidistant from two integers are rounded towards the
  15962. >>> # the nearest even value (zero is treated as even)
  15963. >>> torch.round(torch.tensor([-0.5, 0.5, 1.5, 2.5]))
  15964. tensor([-0., 0., 2., 2.])
  15965. >>> # A positive decimals argument rounds to the to that decimal place
  15966. >>> torch.round(torch.tensor([0.1234567]), decimals=3)
  15967. tensor([0.1230])
  15968. >>> # A negative decimals argument rounds to the left of the decimal
  15969. >>> torch.round(torch.tensor([1200.1234567]), decimals=-3)
  15970. tensor([1000.])
  15971. """
  15972. ...
  15973. @overload
  15974. def round(input: Tensor, *, decimals: _int, out: Optional[Tensor] = None) -> Tensor:
  15975. r"""
  15976. round(input, *, decimals=0, out=None) -> Tensor
  15977. Rounds elements of :attr:`input` to the nearest integer.
  15978. For integer inputs, follows the array-api convention of returning a
  15979. copy of the input tensor.
  15980. The return type of output is same as that of input's dtype.
  15981. .. note::
  15982. This function implements the "round half to even" to
  15983. break ties when a number is equidistant from two
  15984. integers (e.g. `round(2.5)` is 2).
  15985. When the :attr:\`decimals\` argument is specified the
  15986. algorithm used is similar to NumPy's `around`. This
  15987. algorithm is fast but inexact and it can easily
  15988. overflow for low precision dtypes.
  15989. Eg. `round(tensor([10000], dtype=torch.float16), decimals=3)` is `inf`.
  15990. .. seealso::
  15991. :func:`torch.ceil`, which rounds up.
  15992. :func:`torch.floor`, which rounds down.
  15993. :func:`torch.trunc`, which rounds towards zero.
  15994. Args:
  15995. input (Tensor): the input tensor.
  15996. decimals (int): Number of decimal places to round to (default: 0).
  15997. If decimals is negative, it specifies the number of positions
  15998. to the left of the decimal point.
  15999. Keyword args:
  16000. out (Tensor, optional): the output tensor.
  16001. Example::
  16002. >>> torch.round(torch.tensor((4.7, -2.3, 9.1, -7.7)))
  16003. tensor([ 5., -2., 9., -8.])
  16004. >>> # Values equidistant from two integers are rounded towards the
  16005. >>> # the nearest even value (zero is treated as even)
  16006. >>> torch.round(torch.tensor([-0.5, 0.5, 1.5, 2.5]))
  16007. tensor([-0., 0., 2., 2.])
  16008. >>> # A positive decimals argument rounds to the to that decimal place
  16009. >>> torch.round(torch.tensor([0.1234567]), decimals=3)
  16010. tensor([0.1230])
  16011. >>> # A negative decimals argument rounds to the left of the decimal
  16012. >>> torch.round(torch.tensor([1200.1234567]), decimals=-3)
  16013. tensor([1000.])
  16014. """
  16015. ...
  16016. @overload
  16017. def round_(input: Tensor) -> Tensor: ...
  16018. @overload
  16019. def round_(input: Tensor, *, decimals: _int) -> Tensor: ...
  16020. def row_indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
  16021. def row_stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor] = None) -> Tensor:
  16022. r"""
  16023. row_stack(tensors, *, out=None) -> Tensor
  16024. Alias of :func:`torch.vstack`.
  16025. """
  16026. ...
  16027. def rrelu(input: Tensor, lower: Union[Number, _complex] = 0.125, upper: Union[Number, _complex] = 0.3333333333333333, training: _bool = False, generator: Optional[Generator] = None) -> Tensor: ...
  16028. def rrelu_(input: Tensor, lower: Union[Number, _complex] = 0.125, upper: Union[Number, _complex] = 0.3333333333333333, training: _bool = False, generator: Optional[Generator] = None) -> Tensor: ...
  16029. def rsqrt(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  16030. r"""
  16031. rsqrt(input, *, out=None) -> Tensor
  16032. Returns a new tensor with the reciprocal of the square-root of each of
  16033. the elements of :attr:`input`.
  16034. .. math::
  16035. \text{out}_{i} = \frac{1}{\sqrt{\text{input}_{i}}}
  16036. Args:
  16037. input (Tensor): the input tensor.
  16038. Keyword args:
  16039. out (Tensor, optional): the output tensor.
  16040. Example::
  16041. >>> a = torch.randn(4)
  16042. >>> a
  16043. tensor([-0.0370, 0.2970, 1.5420, -0.9105])
  16044. >>> torch.rsqrt(a)
  16045. tensor([ nan, 1.8351, 0.8053, nan])
  16046. """
  16047. ...
  16048. def rsqrt_(input: Tensor) -> Tensor: ...
  16049. @overload
  16050. def rsub(input: Tensor, other: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor: ...
  16051. @overload
  16052. def rsub(input: Tensor, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor: ...
  16053. def saddmm(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Number = 1, alpha: Number = 1, out: Optional[Tensor] = None) -> Tensor: ...
  16054. def scalar_tensor(s: Union[Number, _complex], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
  16055. @overload
  16056. def scatter(input: Tensor, dim: _int, index: Tensor, src: Tensor, *, reduce: str, out: Optional[Tensor] = None) -> Tensor:
  16057. r"""
  16058. scatter(input, dim, index, src) -> Tensor
  16059. Out-of-place version of :meth:`torch.Tensor.scatter_`
  16060. """
  16061. ...
  16062. @overload
  16063. def scatter(input: Tensor, dim: _int, index: Tensor, src: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  16064. r"""
  16065. scatter(input, dim, index, src) -> Tensor
  16066. Out-of-place version of :meth:`torch.Tensor.scatter_`
  16067. """
  16068. ...
  16069. @overload
  16070. def scatter(input: Tensor, dim: _int, index: Tensor, value: Union[Number, _complex], *, reduce: str, out: Optional[Tensor] = None) -> Tensor:
  16071. r"""
  16072. scatter(input, dim, index, src) -> Tensor
  16073. Out-of-place version of :meth:`torch.Tensor.scatter_`
  16074. """
  16075. ...
  16076. @overload
  16077. def scatter(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, src: Tensor) -> Tensor:
  16078. r"""
  16079. scatter(input, dim, index, src) -> Tensor
  16080. Out-of-place version of :meth:`torch.Tensor.scatter_`
  16081. """
  16082. ...
  16083. @overload
  16084. def scatter(input: Tensor, dim: _int, index: Tensor, value: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
  16085. r"""
  16086. scatter(input, dim, index, src) -> Tensor
  16087. Out-of-place version of :meth:`torch.Tensor.scatter_`
  16088. """
  16089. ...
  16090. @overload
  16091. def scatter(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, value: Union[Number, _complex]) -> Tensor:
  16092. r"""
  16093. scatter(input, dim, index, src) -> Tensor
  16094. Out-of-place version of :meth:`torch.Tensor.scatter_`
  16095. """
  16096. ...
  16097. @overload
  16098. def scatter_add(input: Tensor, dim: _int, index: Tensor, src: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  16099. r"""
  16100. scatter_add(input, dim, index, src) -> Tensor
  16101. Out-of-place version of :meth:`torch.Tensor.scatter_add_`
  16102. """
  16103. ...
  16104. @overload
  16105. def scatter_add(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, src: Tensor) -> Tensor:
  16106. r"""
  16107. scatter_add(input, dim, index, src) -> Tensor
  16108. Out-of-place version of :meth:`torch.Tensor.scatter_add_`
  16109. """
  16110. ...
  16111. def scatter_reduce(input: Tensor, dim: _int, index: Tensor, src: Tensor, reduce: str, *, include_self: _bool = True, out: Optional[Tensor] = None) -> Tensor:
  16112. r"""
  16113. scatter_reduce(input, dim, index, src, reduce, *, include_self=True) -> Tensor
  16114. Out-of-place version of :meth:`torch.Tensor.scatter_reduce_`
  16115. """
  16116. ...
  16117. @overload
  16118. def searchsorted(sorted_sequence: Tensor, input: Tensor, *, out_int32: _bool = False, right: _bool = False, side: Optional[str] = None, sorter: Optional[Tensor] = None, out: Optional[Tensor] = None) -> Tensor:
  16119. r"""
  16120. searchsorted(sorted_sequence, values, *, out_int32=False, right=False, side=None, out=None, sorter=None) -> Tensor
  16121. Find the indices from the *innermost* dimension of :attr:`sorted_sequence` such that, if the
  16122. corresponding values in :attr:`values` were inserted before the indices, when sorted, the order
  16123. of the corresponding *innermost* dimension within :attr:`sorted_sequence` would be preserved.
  16124. Return a new tensor with the same size as :attr:`values`. More formally,
  16125. the returned index satisfies the following rules:
  16126. .. list-table::
  16127. :widths: 12 10 78
  16128. :header-rows: 1
  16129. * - :attr:`sorted_sequence`
  16130. - :attr:`right`
  16131. - *returned index satisfies*
  16132. * - 1-D
  16133. - False
  16134. - ``sorted_sequence[i-1] < values[m][n]...[l][x] <= sorted_sequence[i]``
  16135. * - 1-D
  16136. - True
  16137. - ``sorted_sequence[i-1] <= values[m][n]...[l][x] < sorted_sequence[i]``
  16138. * - N-D
  16139. - False
  16140. - ``sorted_sequence[m][n]...[l][i-1] < values[m][n]...[l][x] <= sorted_sequence[m][n]...[l][i]``
  16141. * - N-D
  16142. - True
  16143. - ``sorted_sequence[m][n]...[l][i-1] <= values[m][n]...[l][x] < sorted_sequence[m][n]...[l][i]``
  16144. Args:
  16145. sorted_sequence (Tensor): N-D or 1-D tensor, containing monotonically increasing sequence on the *innermost*
  16146. dimension unless :attr:`sorter` is provided, in which case the sequence does not
  16147. need to be sorted
  16148. values (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s).
  16149. Keyword args:
  16150. out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise.
  16151. Default value is False, i.e. default output data type is torch.int64.
  16152. right (bool, optional): if False, return the first suitable location that is found. If True, return the
  16153. last such index. If no suitable index found, return 0 for non-numerical value
  16154. (eg. nan, inf) or the size of *innermost* dimension within :attr:`sorted_sequence`
  16155. (one pass the last index of the *innermost* dimension). In other words, if False,
  16156. gets the lower bound index for each value in :attr:`values` on the corresponding
  16157. *innermost* dimension of the :attr:`sorted_sequence`. If True, gets the upper
  16158. bound index instead. Default value is False. :attr:`side` does the same and is
  16159. preferred. It will error if :attr:`side` is set to "left" while this is True.
  16160. side (str, optional): the same as :attr:`right` but preferred. "left" corresponds to False for :attr:`right`
  16161. and "right" corresponds to True for :attr:`right`. It will error if this is set to
  16162. "left" while :attr:`right` is True. Default value is None.
  16163. out (Tensor, optional): the output tensor, must be the same size as :attr:`values` if provided.
  16164. sorter (LongTensor, optional): if provided, a tensor matching the shape of the unsorted
  16165. :attr:`sorted_sequence` containing a sequence of indices that sort it in the
  16166. ascending order on the innermost dimension
  16167. Example::
  16168. >>> sorted_sequence = torch.tensor([[1, 3, 5, 7, 9], [2, 4, 6, 8, 10]])
  16169. >>> sorted_sequence
  16170. tensor([[ 1, 3, 5, 7, 9],
  16171. [ 2, 4, 6, 8, 10]])
  16172. >>> values = torch.tensor([[3, 6, 9], [3, 6, 9]])
  16173. >>> values
  16174. tensor([[3, 6, 9],
  16175. [3, 6, 9]])
  16176. >>> torch.searchsorted(sorted_sequence, values)
  16177. tensor([[1, 3, 4],
  16178. [1, 2, 4]])
  16179. >>> torch.searchsorted(sorted_sequence, values, side='right')
  16180. tensor([[2, 3, 5],
  16181. [1, 3, 4]])
  16182. >>> sorted_sequence_1d = torch.tensor([1, 3, 5, 7, 9])
  16183. >>> sorted_sequence_1d
  16184. tensor([1, 3, 5, 7, 9])
  16185. >>> torch.searchsorted(sorted_sequence_1d, values)
  16186. tensor([[1, 3, 4],
  16187. [1, 3, 4]])
  16188. """
  16189. ...
  16190. @overload
  16191. def searchsorted(sorted_sequence: Tensor, self: Union[Number, _complex], *, out_int32: _bool = False, right: _bool = False, side: Optional[str] = None, sorter: Optional[Tensor] = None, out: Optional[Tensor] = None) -> Tensor:
  16192. r"""
  16193. searchsorted(sorted_sequence, values, *, out_int32=False, right=False, side=None, out=None, sorter=None) -> Tensor
  16194. Find the indices from the *innermost* dimension of :attr:`sorted_sequence` such that, if the
  16195. corresponding values in :attr:`values` were inserted before the indices, when sorted, the order
  16196. of the corresponding *innermost* dimension within :attr:`sorted_sequence` would be preserved.
  16197. Return a new tensor with the same size as :attr:`values`. More formally,
  16198. the returned index satisfies the following rules:
  16199. .. list-table::
  16200. :widths: 12 10 78
  16201. :header-rows: 1
  16202. * - :attr:`sorted_sequence`
  16203. - :attr:`right`
  16204. - *returned index satisfies*
  16205. * - 1-D
  16206. - False
  16207. - ``sorted_sequence[i-1] < values[m][n]...[l][x] <= sorted_sequence[i]``
  16208. * - 1-D
  16209. - True
  16210. - ``sorted_sequence[i-1] <= values[m][n]...[l][x] < sorted_sequence[i]``
  16211. * - N-D
  16212. - False
  16213. - ``sorted_sequence[m][n]...[l][i-1] < values[m][n]...[l][x] <= sorted_sequence[m][n]...[l][i]``
  16214. * - N-D
  16215. - True
  16216. - ``sorted_sequence[m][n]...[l][i-1] <= values[m][n]...[l][x] < sorted_sequence[m][n]...[l][i]``
  16217. Args:
  16218. sorted_sequence (Tensor): N-D or 1-D tensor, containing monotonically increasing sequence on the *innermost*
  16219. dimension unless :attr:`sorter` is provided, in which case the sequence does not
  16220. need to be sorted
  16221. values (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s).
  16222. Keyword args:
  16223. out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise.
  16224. Default value is False, i.e. default output data type is torch.int64.
  16225. right (bool, optional): if False, return the first suitable location that is found. If True, return the
  16226. last such index. If no suitable index found, return 0 for non-numerical value
  16227. (eg. nan, inf) or the size of *innermost* dimension within :attr:`sorted_sequence`
  16228. (one pass the last index of the *innermost* dimension). In other words, if False,
  16229. gets the lower bound index for each value in :attr:`values` on the corresponding
  16230. *innermost* dimension of the :attr:`sorted_sequence`. If True, gets the upper
  16231. bound index instead. Default value is False. :attr:`side` does the same and is
  16232. preferred. It will error if :attr:`side` is set to "left" while this is True.
  16233. side (str, optional): the same as :attr:`right` but preferred. "left" corresponds to False for :attr:`right`
  16234. and "right" corresponds to True for :attr:`right`. It will error if this is set to
  16235. "left" while :attr:`right` is True. Default value is None.
  16236. out (Tensor, optional): the output tensor, must be the same size as :attr:`values` if provided.
  16237. sorter (LongTensor, optional): if provided, a tensor matching the shape of the unsorted
  16238. :attr:`sorted_sequence` containing a sequence of indices that sort it in the
  16239. ascending order on the innermost dimension
  16240. Example::
  16241. >>> sorted_sequence = torch.tensor([[1, 3, 5, 7, 9], [2, 4, 6, 8, 10]])
  16242. >>> sorted_sequence
  16243. tensor([[ 1, 3, 5, 7, 9],
  16244. [ 2, 4, 6, 8, 10]])
  16245. >>> values = torch.tensor([[3, 6, 9], [3, 6, 9]])
  16246. >>> values
  16247. tensor([[3, 6, 9],
  16248. [3, 6, 9]])
  16249. >>> torch.searchsorted(sorted_sequence, values)
  16250. tensor([[1, 3, 4],
  16251. [1, 2, 4]])
  16252. >>> torch.searchsorted(sorted_sequence, values, side='right')
  16253. tensor([[2, 3, 5],
  16254. [1, 3, 4]])
  16255. >>> sorted_sequence_1d = torch.tensor([1, 3, 5, 7, 9])
  16256. >>> sorted_sequence_1d
  16257. tensor([1, 3, 5, 7, 9])
  16258. >>> torch.searchsorted(sorted_sequence_1d, values)
  16259. tensor([[1, 3, 4],
  16260. [1, 3, 4]])
  16261. """
  16262. ...
  16263. def segment_reduce(data: Tensor, reduce: str, *, lengths: Optional[Tensor] = None, indices: Optional[Tensor] = None, offsets: Optional[Tensor] = None, axis: _int = 0, unsafe: _bool = False, initial: Optional[Union[Number, _complex]] = None) -> Tensor: ...
  16264. @overload
  16265. def select(input: Tensor, dim: _int, index: Union[_int, SymInt]) -> Tensor:
  16266. r"""
  16267. select(input, dim, index) -> Tensor
  16268. Slices the :attr:`input` tensor along the selected dimension at the given index.
  16269. This function returns a view of the original tensor with the given dimension removed.
  16270. .. note:: If :attr:`input` is a sparse tensor and returning a view of
  16271. the tensor is not possible, a RuntimeError exception is
  16272. raised. In this is the case, consider using
  16273. :func:`torch.select_copy` function.
  16274. Args:
  16275. input (Tensor): the input tensor.
  16276. dim (int): the dimension to slice
  16277. index (int): the index to select with
  16278. .. note::
  16279. :meth:`select` is equivalent to slicing. For example,
  16280. ``tensor.select(0, index)`` is equivalent to ``tensor[index]`` and
  16281. ``tensor.select(2, index)`` is equivalent to ``tensor[:,:,index]``.
  16282. """
  16283. ...
  16284. @overload
  16285. def select(input: Tensor, dim: Union[str, ellipsis, None], index: _int) -> Tensor:
  16286. r"""
  16287. select(input, dim, index) -> Tensor
  16288. Slices the :attr:`input` tensor along the selected dimension at the given index.
  16289. This function returns a view of the original tensor with the given dimension removed.
  16290. .. note:: If :attr:`input` is a sparse tensor and returning a view of
  16291. the tensor is not possible, a RuntimeError exception is
  16292. raised. In this is the case, consider using
  16293. :func:`torch.select_copy` function.
  16294. Args:
  16295. input (Tensor): the input tensor.
  16296. dim (int): the dimension to slice
  16297. index (int): the index to select with
  16298. .. note::
  16299. :meth:`select` is equivalent to slicing. For example,
  16300. ``tensor.select(0, index)`` is equivalent to ``tensor[index]`` and
  16301. ``tensor.select(2, index)`` is equivalent to ``tensor[:,:,index]``.
  16302. """
  16303. ...
  16304. def select_copy(input: Tensor, dim: _int, index: Union[_int, SymInt], *, out: Optional[Tensor] = None) -> Tensor:
  16305. r"""
  16306. Performs the same operation as :func:`torch.select`, but all output tensors
  16307. are freshly created instead of aliasing the input.
  16308. """
  16309. ...
  16310. def select_scatter(input: Tensor, src: Tensor, dim: _int, index: Union[_int, SymInt]) -> Tensor:
  16311. r"""
  16312. select_scatter(input, src, dim, index) -> Tensor
  16313. Embeds the values of the :attr:`src` tensor into :attr:`input` at the given index.
  16314. This function returns a tensor with fresh storage; it does not create a view.
  16315. Args:
  16316. input (Tensor): the input tensor.
  16317. src (Tensor): The tensor to embed into :attr:`input`
  16318. dim (int): the dimension to insert the slice into.
  16319. index (int): the index to select with
  16320. .. note::
  16321. :attr:`src` must be of the proper size in order to be embedded
  16322. into :attr:`input`. Specifically, it should have the same shape as
  16323. ``torch.select(input, dim, index)``
  16324. Example::
  16325. >>> a = torch.zeros(2, 2)
  16326. >>> b = torch.ones(2)
  16327. >>> a.select_scatter(b, 0, 0)
  16328. tensor([[1., 1.],
  16329. [0., 0.]])
  16330. """
  16331. ...
  16332. def selu(input: Tensor) -> Tensor: ...
  16333. def selu_(input: Tensor) -> Tensor: ...
  16334. def set_flush_denormal(mode: _bool) -> _bool:
  16335. r"""
  16336. set_flush_denormal(mode) -> bool
  16337. Disables denormal floating numbers on CPU.
  16338. Returns ``True`` if your system supports flushing denormal numbers and it
  16339. successfully configures flush denormal mode. :meth:`~torch.set_flush_denormal`
  16340. is supported on x86 architectures supporting SSE3 and AArch64 architecture.
  16341. Args:
  16342. mode (bool): Controls whether to enable flush denormal mode or not
  16343. Example::
  16344. >>> torch.set_flush_denormal(True)
  16345. True
  16346. >>> torch.tensor([1e-323], dtype=torch.float64)
  16347. tensor([ 0.], dtype=torch.float64)
  16348. >>> torch.set_flush_denormal(False)
  16349. True
  16350. >>> torch.tensor([1e-323], dtype=torch.float64)
  16351. tensor(9.88131e-324 *
  16352. [ 1.0000], dtype=torch.float64)
  16353. """
  16354. ...
  16355. def set_num_interop_threads(num: _int) -> None:
  16356. r"""
  16357. set_num_interop_threads(int)
  16358. Sets the number of threads used for interop parallelism
  16359. (e.g. in JIT interpreter) on CPU.
  16360. .. warning::
  16361. Can only be called once and before any inter-op parallel work
  16362. is started (e.g. JIT execution).
  16363. """
  16364. ...
  16365. def set_num_threads(num: _int) -> None:
  16366. r"""
  16367. set_num_threads(int)
  16368. Sets the number of threads used for intraop parallelism on CPU.
  16369. .. warning::
  16370. To ensure that the correct number of threads is used, set_num_threads
  16371. must be called before running eager, JIT or autograd code.
  16372. """
  16373. ...
  16374. def sgn(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  16375. r"""
  16376. sgn(input, *, out=None) -> Tensor
  16377. This function is an extension of torch.sign() to complex tensors.
  16378. It computes a new tensor whose elements have
  16379. the same angles as the corresponding elements of :attr:`input` and
  16380. absolute values (i.e. magnitudes) of one for complex tensors and
  16381. is equivalent to torch.sign() for non-complex tensors.
  16382. .. math::
  16383. \text{out}_{i} = \begin{cases}
  16384. 0 & |\text{{input}}_i| == 0 \\
  16385. \frac{{\text{{input}}_i}}{|{\text{{input}}_i}|} & \text{otherwise}
  16386. \end{cases}
  16387. Args:
  16388. input (Tensor): the input tensor.
  16389. Keyword args:
  16390. out (Tensor, optional): the output tensor.
  16391. Example::
  16392. >>> t = torch.tensor([3+4j, 7-24j, 0, 1+2j])
  16393. >>> t.sgn()
  16394. tensor([0.6000+0.8000j, 0.2800-0.9600j, 0.0000+0.0000j, 0.4472+0.8944j])
  16395. """
  16396. ...
  16397. def sigmoid(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  16398. r"""
  16399. sigmoid(input, *, out=None) -> Tensor
  16400. Alias for :func:`torch.special.expit`.
  16401. """
  16402. ...
  16403. def sigmoid_(input: Tensor) -> Tensor: ...
  16404. def sign(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  16405. r"""
  16406. sign(input, *, out=None) -> Tensor
  16407. Returns a new tensor with the signs of the elements of :attr:`input`.
  16408. .. math::
  16409. \text{out}_{i} = \operatorname{sgn}(\text{input}_{i})
  16410. Args:
  16411. input (Tensor): the input tensor.
  16412. Keyword args:
  16413. out (Tensor, optional): the output tensor.
  16414. Example::
  16415. >>> a = torch.tensor([0.7, -1.2, 0., 2.3])
  16416. >>> a
  16417. tensor([ 0.7000, -1.2000, 0.0000, 2.3000])
  16418. >>> torch.sign(a)
  16419. tensor([ 1., -1., 0., 1.])
  16420. """
  16421. ...
  16422. def signbit(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  16423. r"""
  16424. signbit(input, *, out=None) -> Tensor
  16425. Tests if each element of :attr:`input` has its sign bit set or not.
  16426. Args:
  16427. input (Tensor): the input tensor.
  16428. Keyword args:
  16429. out (Tensor, optional): the output tensor.
  16430. Example::
  16431. >>> a = torch.tensor([0.7, -1.2, 0., 2.3])
  16432. >>> torch.signbit(a)
  16433. tensor([ False, True, False, False])
  16434. >>> a = torch.tensor([-0.0, 0.0])
  16435. >>> torch.signbit(a)
  16436. tensor([ True, False])
  16437. .. note::
  16438. signbit handles signed zeros, so negative zero (-0) returns True.
  16439. """
  16440. ...
  16441. def sin(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  16442. r"""
  16443. sin(input, *, out=None) -> Tensor
  16444. Returns a new tensor with the sine of the elements of :attr:`input`.
  16445. .. math::
  16446. \text{out}_{i} = \sin(\text{input}_{i})
  16447. Args:
  16448. input (Tensor): the input tensor.
  16449. Keyword args:
  16450. out (Tensor, optional): the output tensor.
  16451. Example::
  16452. >>> a = torch.randn(4)
  16453. >>> a
  16454. tensor([-0.5461, 0.1347, -2.7266, -0.2746])
  16455. >>> torch.sin(a)
  16456. tensor([-0.5194, 0.1343, -0.4032, -0.2711])
  16457. """
  16458. ...
  16459. def sin_(input: Tensor) -> Tensor: ...
  16460. def sinc(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  16461. r"""
  16462. sinc(input, *, out=None) -> Tensor
  16463. Alias for :func:`torch.special.sinc`.
  16464. """
  16465. ...
  16466. def sinc_(input: Tensor) -> Tensor: ...
  16467. def sinh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  16468. r"""
  16469. sinh(input, *, out=None) -> Tensor
  16470. Returns a new tensor with the hyperbolic sine of the elements of
  16471. :attr:`input`.
  16472. .. math::
  16473. \text{out}_{i} = \sinh(\text{input}_{i})
  16474. Args:
  16475. input (Tensor): the input tensor.
  16476. Keyword args:
  16477. out (Tensor, optional): the output tensor.
  16478. Example::
  16479. >>> a = torch.randn(4)
  16480. >>> a
  16481. tensor([ 0.5380, -0.8632, -0.1265, 0.9399])
  16482. >>> torch.sinh(a)
  16483. tensor([ 0.5644, -0.9744, -0.1268, 1.0845])
  16484. .. note::
  16485. When :attr:`input` is on the CPU, the implementation of torch.sinh may use
  16486. the Sleef library, which rounds very large results to infinity or negative
  16487. infinity. See `here <https://sleef.org/purec.xhtml>`_ for details.
  16488. """
  16489. ...
  16490. def sinh_(input: Tensor) -> Tensor: ...
  16491. def slice_copy(input: Tensor, dim: _int = 0, start: Optional[Union[_int, SymInt]] = None, end: Optional[Union[_int, SymInt]] = None, step: Union[_int, SymInt] = 1, *, out: Optional[Tensor] = None) -> Tensor:
  16492. r"""
  16493. Performs the same operation as :func:`torch.slice`, but all output tensors
  16494. are freshly created instead of aliasing the input.
  16495. """
  16496. ...
  16497. def slice_inverse(input: Tensor, src: Tensor, dim: _int = 0, start: Optional[Union[_int, SymInt]] = None, end: Optional[Union[_int, SymInt]] = None, step: Union[_int, SymInt] = 1) -> Tensor: ...
  16498. def slice_scatter(input: Tensor, src: Tensor, dim: _int = 0, start: Optional[Union[_int, SymInt]] = None, end: Optional[Union[_int, SymInt]] = None, step: Union[_int, SymInt] = 1, *, out: Optional[Tensor] = None) -> Tensor:
  16499. r"""
  16500. slice_scatter(input, src, dim=0, start=None, end=None, step=1) -> Tensor
  16501. Embeds the values of the :attr:`src` tensor into :attr:`input` at the given
  16502. dimension.
  16503. This function returns a tensor with fresh storage; it does not create a view.
  16504. Args:
  16505. input (Tensor): the input tensor.
  16506. src (Tensor): The tensor to embed into :attr:`input`
  16507. dim (int): the dimension to insert the slice into
  16508. start (Optional[int]): the start index of where to insert the slice
  16509. end (Optional[int]): the end index of where to insert the slice
  16510. step (int): the how many elements to skip in
  16511. Example::
  16512. >>> a = torch.zeros(8, 8)
  16513. >>> b = torch.ones(2, 8)
  16514. >>> a.slice_scatter(b, start=6)
  16515. tensor([[0., 0., 0., 0., 0., 0., 0., 0.],
  16516. [0., 0., 0., 0., 0., 0., 0., 0.],
  16517. [0., 0., 0., 0., 0., 0., 0., 0.],
  16518. [0., 0., 0., 0., 0., 0., 0., 0.],
  16519. [0., 0., 0., 0., 0., 0., 0., 0.],
  16520. [0., 0., 0., 0., 0., 0., 0., 0.],
  16521. [1., 1., 1., 1., 1., 1., 1., 1.],
  16522. [1., 1., 1., 1., 1., 1., 1., 1.]])
  16523. >>> b = torch.ones(8, 2)
  16524. >>> a.slice_scatter(b, dim=1, start=2, end=6, step=2)
  16525. tensor([[0., 0., 1., 0., 1., 0., 0., 0.],
  16526. [0., 0., 1., 0., 1., 0., 0., 0.],
  16527. [0., 0., 1., 0., 1., 0., 0., 0.],
  16528. [0., 0., 1., 0., 1., 0., 0., 0.],
  16529. [0., 0., 1., 0., 1., 0., 0., 0.],
  16530. [0., 0., 1., 0., 1., 0., 0., 0.],
  16531. [0., 0., 1., 0., 1., 0., 0., 0.],
  16532. [0., 0., 1., 0., 1., 0., 0., 0.]])
  16533. """
  16534. ...
  16535. def slogdet(input: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.slogdet:
  16536. r"""
  16537. slogdet(input) -> (Tensor, Tensor)
  16538. Alias for :func:`torch.linalg.slogdet`
  16539. """
  16540. ...
  16541. def smm(input: Tensor, mat2: Tensor) -> Tensor:
  16542. r"""
  16543. smm(input, mat) -> Tensor
  16544. Performs a matrix multiplication of the sparse matrix :attr:`input`
  16545. with the dense matrix :attr:`mat`.
  16546. Args:
  16547. input (Tensor): a sparse matrix to be matrix multiplied
  16548. mat (Tensor): a dense matrix to be matrix multiplied
  16549. """
  16550. ...
  16551. @overload
  16552. def softmax(input: Tensor, dim: _int, dtype: Optional[_dtype] = None, *, out: Optional[Tensor] = None) -> Tensor:
  16553. r"""
  16554. softmax(input, dim, *, dtype=None) -> Tensor
  16555. Alias for :func:`torch.nn.functional.softmax`.
  16556. """
  16557. ...
  16558. @overload
  16559. def softmax(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor:
  16560. r"""
  16561. softmax(input, dim, *, dtype=None) -> Tensor
  16562. Alias for :func:`torch.nn.functional.softmax`.
  16563. """
  16564. ...
  16565. @overload
  16566. def sort(input: Tensor, *, stable: Optional[_bool], dim: _int = -1, descending: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.sort:
  16567. r"""
  16568. sort(input, dim=-1, descending=False, stable=False, *, out=None) -> (Tensor, LongTensor)
  16569. Sorts the elements of the :attr:`input` tensor along a given dimension
  16570. in ascending order by value.
  16571. If :attr:`dim` is not given, the last dimension of the `input` is chosen.
  16572. If :attr:`descending` is ``True`` then the elements are sorted in descending
  16573. order by value.
  16574. If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving
  16575. the order of equivalent elements.
  16576. A namedtuple of (values, indices) is returned, where the `values` are the
  16577. sorted values and `indices` are the indices of the elements in the original
  16578. `input` tensor.
  16579. Args:
  16580. input (Tensor): the input tensor.
  16581. dim (int, optional): the dimension to sort along
  16582. descending (bool, optional): controls the sorting order (ascending or descending)
  16583. stable (bool, optional): makes the sorting routine stable, which guarantees that the order
  16584. of equivalent elements is preserved.
  16585. Keyword args:
  16586. out (tuple, optional): the output tuple of (`Tensor`, `LongTensor`) that can
  16587. be optionally given to be used as output buffers
  16588. Example::
  16589. >>> x = torch.randn(3, 4)
  16590. >>> sorted, indices = torch.sort(x)
  16591. >>> sorted
  16592. tensor([[-0.2162, 0.0608, 0.6719, 2.3332],
  16593. [-0.5793, 0.0061, 0.6058, 0.9497],
  16594. [-0.5071, 0.3343, 0.9553, 1.0960]])
  16595. >>> indices
  16596. tensor([[ 1, 0, 2, 3],
  16597. [ 3, 1, 0, 2],
  16598. [ 0, 3, 1, 2]])
  16599. >>> sorted, indices = torch.sort(x, 0)
  16600. >>> sorted
  16601. tensor([[-0.5071, -0.2162, 0.6719, -0.5793],
  16602. [ 0.0608, 0.0061, 0.9497, 0.3343],
  16603. [ 0.6058, 0.9553, 1.0960, 2.3332]])
  16604. >>> indices
  16605. tensor([[ 2, 0, 0, 1],
  16606. [ 0, 1, 1, 2],
  16607. [ 1, 2, 2, 0]])
  16608. >>> x = torch.tensor([0, 1] * 9)
  16609. >>> x.sort()
  16610. torch.return_types.sort(
  16611. values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
  16612. indices=tensor([ 2, 16, 4, 6, 14, 8, 0, 10, 12, 9, 17, 15, 13, 11, 7, 5, 3, 1]))
  16613. >>> x.sort(stable=True)
  16614. torch.return_types.sort(
  16615. values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
  16616. indices=tensor([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 1, 3, 5, 7, 9, 11, 13, 15, 17]))
  16617. """
  16618. ...
  16619. @overload
  16620. def sort(input: Tensor, dim: _int = -1, descending: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.sort:
  16621. r"""
  16622. sort(input, dim=-1, descending=False, stable=False, *, out=None) -> (Tensor, LongTensor)
  16623. Sorts the elements of the :attr:`input` tensor along a given dimension
  16624. in ascending order by value.
  16625. If :attr:`dim` is not given, the last dimension of the `input` is chosen.
  16626. If :attr:`descending` is ``True`` then the elements are sorted in descending
  16627. order by value.
  16628. If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving
  16629. the order of equivalent elements.
  16630. A namedtuple of (values, indices) is returned, where the `values` are the
  16631. sorted values and `indices` are the indices of the elements in the original
  16632. `input` tensor.
  16633. Args:
  16634. input (Tensor): the input tensor.
  16635. dim (int, optional): the dimension to sort along
  16636. descending (bool, optional): controls the sorting order (ascending or descending)
  16637. stable (bool, optional): makes the sorting routine stable, which guarantees that the order
  16638. of equivalent elements is preserved.
  16639. Keyword args:
  16640. out (tuple, optional): the output tuple of (`Tensor`, `LongTensor`) that can
  16641. be optionally given to be used as output buffers
  16642. Example::
  16643. >>> x = torch.randn(3, 4)
  16644. >>> sorted, indices = torch.sort(x)
  16645. >>> sorted
  16646. tensor([[-0.2162, 0.0608, 0.6719, 2.3332],
  16647. [-0.5793, 0.0061, 0.6058, 0.9497],
  16648. [-0.5071, 0.3343, 0.9553, 1.0960]])
  16649. >>> indices
  16650. tensor([[ 1, 0, 2, 3],
  16651. [ 3, 1, 0, 2],
  16652. [ 0, 3, 1, 2]])
  16653. >>> sorted, indices = torch.sort(x, 0)
  16654. >>> sorted
  16655. tensor([[-0.5071, -0.2162, 0.6719, -0.5793],
  16656. [ 0.0608, 0.0061, 0.9497, 0.3343],
  16657. [ 0.6058, 0.9553, 1.0960, 2.3332]])
  16658. >>> indices
  16659. tensor([[ 2, 0, 0, 1],
  16660. [ 0, 1, 1, 2],
  16661. [ 1, 2, 2, 0]])
  16662. >>> x = torch.tensor([0, 1] * 9)
  16663. >>> x.sort()
  16664. torch.return_types.sort(
  16665. values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
  16666. indices=tensor([ 2, 16, 4, 6, 14, 8, 0, 10, 12, 9, 17, 15, 13, 11, 7, 5, 3, 1]))
  16667. >>> x.sort(stable=True)
  16668. torch.return_types.sort(
  16669. values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
  16670. indices=tensor([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 1, 3, 5, 7, 9, 11, 13, 15, 17]))
  16671. """
  16672. ...
  16673. @overload
  16674. def sort(input: Tensor, *, stable: Optional[_bool], dim: Union[str, ellipsis, None], descending: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.sort:
  16675. r"""
  16676. sort(input, dim=-1, descending=False, stable=False, *, out=None) -> (Tensor, LongTensor)
  16677. Sorts the elements of the :attr:`input` tensor along a given dimension
  16678. in ascending order by value.
  16679. If :attr:`dim` is not given, the last dimension of the `input` is chosen.
  16680. If :attr:`descending` is ``True`` then the elements are sorted in descending
  16681. order by value.
  16682. If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving
  16683. the order of equivalent elements.
  16684. A namedtuple of (values, indices) is returned, where the `values` are the
  16685. sorted values and `indices` are the indices of the elements in the original
  16686. `input` tensor.
  16687. Args:
  16688. input (Tensor): the input tensor.
  16689. dim (int, optional): the dimension to sort along
  16690. descending (bool, optional): controls the sorting order (ascending or descending)
  16691. stable (bool, optional): makes the sorting routine stable, which guarantees that the order
  16692. of equivalent elements is preserved.
  16693. Keyword args:
  16694. out (tuple, optional): the output tuple of (`Tensor`, `LongTensor`) that can
  16695. be optionally given to be used as output buffers
  16696. Example::
  16697. >>> x = torch.randn(3, 4)
  16698. >>> sorted, indices = torch.sort(x)
  16699. >>> sorted
  16700. tensor([[-0.2162, 0.0608, 0.6719, 2.3332],
  16701. [-0.5793, 0.0061, 0.6058, 0.9497],
  16702. [-0.5071, 0.3343, 0.9553, 1.0960]])
  16703. >>> indices
  16704. tensor([[ 1, 0, 2, 3],
  16705. [ 3, 1, 0, 2],
  16706. [ 0, 3, 1, 2]])
  16707. >>> sorted, indices = torch.sort(x, 0)
  16708. >>> sorted
  16709. tensor([[-0.5071, -0.2162, 0.6719, -0.5793],
  16710. [ 0.0608, 0.0061, 0.9497, 0.3343],
  16711. [ 0.6058, 0.9553, 1.0960, 2.3332]])
  16712. >>> indices
  16713. tensor([[ 2, 0, 0, 1],
  16714. [ 0, 1, 1, 2],
  16715. [ 1, 2, 2, 0]])
  16716. >>> x = torch.tensor([0, 1] * 9)
  16717. >>> x.sort()
  16718. torch.return_types.sort(
  16719. values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
  16720. indices=tensor([ 2, 16, 4, 6, 14, 8, 0, 10, 12, 9, 17, 15, 13, 11, 7, 5, 3, 1]))
  16721. >>> x.sort(stable=True)
  16722. torch.return_types.sort(
  16723. values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
  16724. indices=tensor([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 1, 3, 5, 7, 9, 11, 13, 15, 17]))
  16725. """
  16726. ...
  16727. @overload
  16728. def sort(input: Tensor, dim: Union[str, ellipsis, None], descending: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.sort:
  16729. r"""
  16730. sort(input, dim=-1, descending=False, stable=False, *, out=None) -> (Tensor, LongTensor)
  16731. Sorts the elements of the :attr:`input` tensor along a given dimension
  16732. in ascending order by value.
  16733. If :attr:`dim` is not given, the last dimension of the `input` is chosen.
  16734. If :attr:`descending` is ``True`` then the elements are sorted in descending
  16735. order by value.
  16736. If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving
  16737. the order of equivalent elements.
  16738. A namedtuple of (values, indices) is returned, where the `values` are the
  16739. sorted values and `indices` are the indices of the elements in the original
  16740. `input` tensor.
  16741. Args:
  16742. input (Tensor): the input tensor.
  16743. dim (int, optional): the dimension to sort along
  16744. descending (bool, optional): controls the sorting order (ascending or descending)
  16745. stable (bool, optional): makes the sorting routine stable, which guarantees that the order
  16746. of equivalent elements is preserved.
  16747. Keyword args:
  16748. out (tuple, optional): the output tuple of (`Tensor`, `LongTensor`) that can
  16749. be optionally given to be used as output buffers
  16750. Example::
  16751. >>> x = torch.randn(3, 4)
  16752. >>> sorted, indices = torch.sort(x)
  16753. >>> sorted
  16754. tensor([[-0.2162, 0.0608, 0.6719, 2.3332],
  16755. [-0.5793, 0.0061, 0.6058, 0.9497],
  16756. [-0.5071, 0.3343, 0.9553, 1.0960]])
  16757. >>> indices
  16758. tensor([[ 1, 0, 2, 3],
  16759. [ 3, 1, 0, 2],
  16760. [ 0, 3, 1, 2]])
  16761. >>> sorted, indices = torch.sort(x, 0)
  16762. >>> sorted
  16763. tensor([[-0.5071, -0.2162, 0.6719, -0.5793],
  16764. [ 0.0608, 0.0061, 0.9497, 0.3343],
  16765. [ 0.6058, 0.9553, 1.0960, 2.3332]])
  16766. >>> indices
  16767. tensor([[ 2, 0, 0, 1],
  16768. [ 0, 1, 1, 2],
  16769. [ 1, 2, 2, 0]])
  16770. >>> x = torch.tensor([0, 1] * 9)
  16771. >>> x.sort()
  16772. torch.return_types.sort(
  16773. values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
  16774. indices=tensor([ 2, 16, 4, 6, 14, 8, 0, 10, 12, 9, 17, 15, 13, 11, 7, 5, 3, 1]))
  16775. >>> x.sort(stable=True)
  16776. torch.return_types.sort(
  16777. values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
  16778. indices=tensor([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 1, 3, 5, 7, 9, 11, 13, 15, 17]))
  16779. """
  16780. ...
  16781. def sparse_bsc_tensor(ccol_indices: Union[Tensor, List], row_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None) -> Tensor:
  16782. r"""
  16783. sparse_bsc_tensor(ccol_indices, row_indices, values, size=None, *, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
  16784. Constructs a :ref:`sparse tensor in BSC (Block Compressed Sparse
  16785. Column)) <sparse-bsc-docs>` with specified 2-dimensional blocks at the
  16786. given :attr:`ccol_indices` and :attr:`row_indices`. Sparse matrix
  16787. multiplication operations in BSC format are typically faster than that
  16788. for sparse tensors in COO format. Make you have a look at :ref:`the
  16789. note on the data type of the indices <sparse-bsc-docs>`.
  16790. .. note::
  16791. If the ``device`` argument is not specified the device of the given
  16792. :attr:`values` and indices tensor(s) must match. If, however, the
  16793. argument is specified the input Tensors will be converted to the
  16794. given device and in turn determine the device of the constructed
  16795. sparse tensor.
  16796. Args:
  16797. ccol_indices (array_like): (B+1)-dimensional array of size
  16798. ``(*batchsize, ncolblocks + 1)``. The last element of each
  16799. batch is the number of non-zeros. This tensor encodes the
  16800. index in values and row_indices depending on where the given
  16801. column starts. Each successive number in the tensor subtracted
  16802. by the number before it denotes the number of elements in a
  16803. given column.
  16804. row_indices (array_like): Row block co-ordinates of each block in
  16805. values. (B+1)-dimensional tensor with the same length
  16806. as values.
  16807. values (array_list): Initial blocks for the tensor. Can be a list,
  16808. tuple, NumPy ``ndarray``, and other types that
  16809. represents a (1 + 2 + K)-dimensional tensor where ``K`` is the
  16810. number of dense dimensions.
  16811. size (list, tuple, :class:`torch.Size`, optional): Size of the
  16812. sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols *
  16813. blocksize[1], *densesize)`` If not provided, the size will be
  16814. inferred as the minimum size big enough to hold all non-zero
  16815. blocks.
  16816. Keyword args:
  16817. dtype (:class:`torch.dtype`, optional): the desired data type of
  16818. returned tensor. Default: if None, infers data type from
  16819. :attr:`values`.
  16820. device (:class:`torch.device`, optional): the desired device of
  16821. returned tensor. Default: if None, uses the current device
  16822. for the default tensor type (see
  16823. :func:`torch.set_default_device`). :attr:`device` will be
  16824. the CPU for CPU tensor types and the current CUDA device for
  16825. CUDA tensor types.
  16826. requires_grad (bool, optional): If autograd should record operations on the
  16827. returned tensor. Default: ``False``.
  16828. check_invariants (bool, optional): If sparse tensor invariants are checked.
  16829. Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`,
  16830. initially False.
  16831. Example::
  16832. >>> ccol_indices = [0, 1, 2]
  16833. >>> row_indices = [0, 1]
  16834. >>> values = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
  16835. >>> torch.sparse_bsc_tensor(torch.tensor(ccol_indices, dtype=torch.int64),
  16836. ... torch.tensor(row_indices, dtype=torch.int64),
  16837. ... torch.tensor(values), dtype=torch.double)
  16838. tensor(ccol_indices=tensor([0, 1, 2]),
  16839. row_indices=tensor([0, 1]),
  16840. values=tensor([[[1., 2.],
  16841. [3., 4.]],
  16842. [[5., 6.],
  16843. [7., 8.]]]), size=(2, 2), nnz=2, dtype=torch.float64,
  16844. layout=torch.sparse_bsc)
  16845. """
  16846. ...
  16847. def sparse_bsr_tensor(crow_indices: Union[Tensor, List], col_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None) -> Tensor:
  16848. r"""
  16849. sparse_bsr_tensor(crow_indices, col_indices, values, size=None, *, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
  16850. Constructs a :ref:`sparse tensor in BSR (Block Compressed Sparse Row))
  16851. <sparse-bsr-docs>` with specified 2-dimensional blocks at the given
  16852. :attr:`crow_indices` and :attr:`col_indices`. Sparse matrix
  16853. multiplication operations in BSR format are typically faster than that
  16854. for sparse tensors in COO format. Make you have a look at :ref:`the
  16855. note on the data type of the indices <sparse-bsr-docs>`.
  16856. .. note::
  16857. If the ``device`` argument is not specified the device of the given
  16858. :attr:`values` and indices tensor(s) must match. If, however, the
  16859. argument is specified the input Tensors will be converted to the
  16860. given device and in turn determine the device of the constructed
  16861. sparse tensor.
  16862. Args:
  16863. crow_indices (array_like): (B+1)-dimensional array of size
  16864. ``(*batchsize, nrowblocks + 1)``. The last element of each
  16865. batch is the number of non-zeros. This tensor encodes the
  16866. block index in values and col_indices depending on where the
  16867. given row block starts. Each successive number in the tensor
  16868. subtracted by the number before it denotes the number of
  16869. blocks in a given row.
  16870. col_indices (array_like): Column block co-ordinates of each block
  16871. in values. (B+1)-dimensional tensor with the same length as
  16872. values.
  16873. values (array_list): Initial values for the tensor. Can be a list,
  16874. tuple, NumPy ``ndarray``, scalar, and other types that
  16875. represents a (1 + 2 + K)-dimensional tensor where ``K`` is the
  16876. number of dense dimensions.
  16877. size (list, tuple, :class:`torch.Size`, optional): Size of the
  16878. sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols *
  16879. blocksize[1], *densesize)`` where ``blocksize ==
  16880. values.shape[1:3]``. If not provided, the size will be
  16881. inferred as the minimum size big enough to hold all non-zero
  16882. blocks.
  16883. Keyword args:
  16884. dtype (:class:`torch.dtype`, optional): the desired data type of
  16885. returned tensor. Default: if None, infers data type from
  16886. :attr:`values`.
  16887. device (:class:`torch.device`, optional): the desired device of
  16888. returned tensor. Default: if None, uses the current device
  16889. for the default tensor type (see
  16890. :func:`torch.set_default_device`). :attr:`device` will be
  16891. the CPU for CPU tensor types and the current CUDA device for
  16892. CUDA tensor types.
  16893. requires_grad (bool, optional): If autograd should record operations on the
  16894. returned tensor. Default: ``False``.
  16895. check_invariants (bool, optional): If sparse tensor invariants are checked.
  16896. Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`,
  16897. initially False.
  16898. Example::
  16899. >>> crow_indices = [0, 1, 2]
  16900. >>> col_indices = [0, 1]
  16901. >>> values = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
  16902. >>> torch.sparse_bsr_tensor(torch.tensor(crow_indices, dtype=torch.int64),
  16903. ... torch.tensor(col_indices, dtype=torch.int64),
  16904. ... torch.tensor(values), dtype=torch.double)
  16905. tensor(crow_indices=tensor([0, 1, 2]),
  16906. col_indices=tensor([0, 1]),
  16907. values=tensor([[[1., 2.],
  16908. [3., 4.]],
  16909. [[5., 6.],
  16910. [7., 8.]]]), size=(2, 2), nnz=2, dtype=torch.float64,
  16911. layout=torch.sparse_bsr)
  16912. """
  16913. ...
  16914. def sparse_compressed_tensor(compressed_indices: Union[Tensor, List], plain_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None) -> Tensor:
  16915. r"""
  16916. sparse_compressed_tensor(compressed_indices, plain_indices, values, size=None, *, dtype=None, layout=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
  16917. Constructs a :ref:`sparse tensor in Compressed Sparse format - CSR,
  16918. CSC, BSR, or BSC - <sparse-compressed-docs>` with specified values at
  16919. the given :attr:`compressed_indices` and :attr:`plain_indices`. Sparse
  16920. matrix multiplication operations in Compressed Sparse format are
  16921. typically faster than that for sparse tensors in COO format. Make you
  16922. have a look at :ref:`the note on the data type of the indices
  16923. <sparse-compressed-docs>`.
  16924. .. note::
  16925. If the ``device`` argument is not specified the device of the given
  16926. :attr:`values` and indices tensor(s) must match. If, however, the
  16927. argument is specified the input Tensors will be converted to the
  16928. given device and in turn determine the device of the constructed
  16929. sparse tensor.
  16930. Args:
  16931. compressed_indices (array_like): (B+1)-dimensional array of size
  16932. ``(*batchsize, compressed_dim_size + 1)``. The last element of
  16933. each batch is the number of non-zero elements or blocks. This
  16934. tensor encodes the index in ``values`` and ``plain_indices``
  16935. depending on where the given compressed dimension (row or
  16936. column) starts. Each successive number in the tensor
  16937. subtracted by the number before it denotes the number of
  16938. elements or blocks in a given compressed dimension.
  16939. plain_indices (array_like): Plain dimension (column or row)
  16940. co-ordinates of each element or block in values. (B+1)-dimensional
  16941. tensor with the same length as values.
  16942. values (array_list): Initial values for the tensor. Can be a list,
  16943. tuple, NumPy ``ndarray``, scalar, and other types. that
  16944. represents a (1+K)-dimensional (for CSR and CSC layouts) or
  16945. (1+2+K)-dimensional tensor (for BSR and BSC layouts) where
  16946. ``K`` is the number of dense dimensions.
  16947. size (list, tuple, :class:`torch.Size`, optional): Size of the
  16948. sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols *
  16949. blocksize[1], *densesize)`` where ``blocksize[0] ==
  16950. blocksize[1] == 1`` for CSR and CSC formats. If not provided,
  16951. the size will be inferred as the minimum size big enough to
  16952. hold all non-zero elements or blocks.
  16953. Keyword args:
  16954. dtype (:class:`torch.dtype`, optional): the desired data type of
  16955. returned tensor. Default: if None, infers data type from
  16956. :attr:`values`.
  16957. layout (:class:`torch.layout`, required): the desired layout of
  16958. returned tensor: :attr:`torch.sparse_csr`,
  16959. :attr:`torch.sparse_csc`, :attr:`torch.sparse_bsr`, or
  16960. :attr:`torch.sparse_bsc`.
  16961. device (:class:`torch.device`, optional): the desired device of
  16962. returned tensor. Default: if None, uses the current device
  16963. for the default tensor type (see
  16964. :func:`torch.set_default_device`). :attr:`device` will be
  16965. the CPU for CPU tensor types and the current CUDA device for
  16966. CUDA tensor types.
  16967. requires_grad (bool, optional): If autograd should record operations on the
  16968. returned tensor. Default: ``False``.
  16969. check_invariants (bool, optional): If sparse tensor invariants are checked.
  16970. Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`,
  16971. initially False.
  16972. Example::
  16973. >>> compressed_indices = [0, 2, 4]
  16974. >>> plain_indices = [0, 1, 0, 1]
  16975. >>> values = [1, 2, 3, 4]
  16976. >>> torch.sparse_compressed_tensor(torch.tensor(compressed_indices, dtype=torch.int64),
  16977. ... torch.tensor(plain_indices, dtype=torch.int64),
  16978. ... torch.tensor(values), dtype=torch.double, layout=torch.sparse_csr)
  16979. tensor(crow_indices=tensor([0, 2, 4]),
  16980. col_indices=tensor([0, 1, 0, 1]),
  16981. values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4,
  16982. dtype=torch.float64, layout=torch.sparse_csr)
  16983. """
  16984. ...
  16985. def sparse_coo_tensor(indices: Tensor, values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None, is_coalesced: Optional[_bool] = None) -> Tensor:
  16986. r"""
  16987. sparse_coo_tensor(indices, values, size=None, *, dtype=None, device=None, requires_grad=False, check_invariants=None, is_coalesced=None) -> Tensor
  16988. Constructs a :ref:`sparse tensor in COO(rdinate) format
  16989. <sparse-coo-docs>` with specified values at the given
  16990. :attr:`indices`.
  16991. .. note::
  16992. This function returns an :ref:`uncoalesced tensor
  16993. <sparse-uncoalesced-coo-docs>` when :attr:`is_coalesced` is
  16994. unspecified or ``None``.
  16995. .. note::
  16996. If the ``device`` argument is not specified the device of the given
  16997. :attr:`values` and indices tensor(s) must match. If, however, the
  16998. argument is specified the input Tensors will be converted to the
  16999. given device and in turn determine the device of the constructed
  17000. sparse tensor.
  17001. Args:
  17002. indices (array_like): Initial data for the tensor. Can be a list, tuple,
  17003. NumPy ``ndarray``, scalar, and other types. Will be cast to a :class:`torch.LongTensor`
  17004. internally. The indices are the coordinates of the non-zero values in the matrix, and thus
  17005. should be two-dimensional where the first dimension is the number of tensor dimensions and
  17006. the second dimension is the number of non-zero values.
  17007. values (array_like): Initial values for the tensor. Can be a list, tuple,
  17008. NumPy ``ndarray``, scalar, and other types.
  17009. size (list, tuple, or :class:`torch.Size`, optional): Size of the sparse tensor. If not
  17010. provided the size will be inferred as the minimum size big enough to hold all non-zero
  17011. elements.
  17012. Keyword args:
  17013. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  17014. Default: if None, infers data type from :attr:`values`.
  17015. device (:class:`torch.device`, optional): the desired device of returned tensor.
  17016. Default: if None, uses the current device for the default tensor type
  17017. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  17018. for CPU tensor types and the current CUDA device for CUDA tensor types.
  17019. requires_grad (bool, optional): If autograd should record operations on the
  17020. returned tensor. Default: ``False``.
  17021. check_invariants (bool, optional): If sparse tensor invariants are checked.
  17022. Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`,
  17023. initially False.
  17024. is_coalesced (bool, optional): When``True``, the caller is
  17025. responsible for providing tensor indices that correspond to a
  17026. coalesced tensor. If the :attr:`check_invariants` flag is
  17027. False, no error will be raised if the prerequisites are not
  17028. met and this will lead to silently incorrect results. To force
  17029. coalescion please use :meth:`coalesce` on the resulting
  17030. Tensor.
  17031. Default: None: except for trivial cases (e.g. nnz < 2) the
  17032. resulting Tensor has is_coalesced set to ``False```.
  17033. Example::
  17034. >>> i = torch.tensor([[0, 1, 1],
  17035. ... [2, 0, 2]])
  17036. >>> v = torch.tensor([3, 4, 5], dtype=torch.float32)
  17037. >>> torch.sparse_coo_tensor(i, v, [2, 4])
  17038. tensor(indices=tensor([[0, 1, 1],
  17039. [2, 0, 2]]),
  17040. values=tensor([3., 4., 5.]),
  17041. size=(2, 4), nnz=3, layout=torch.sparse_coo)
  17042. >>> torch.sparse_coo_tensor(i, v) # Shape inference
  17043. tensor(indices=tensor([[0, 1, 1],
  17044. [2, 0, 2]]),
  17045. values=tensor([3., 4., 5.]),
  17046. size=(2, 3), nnz=3, layout=torch.sparse_coo)
  17047. >>> torch.sparse_coo_tensor(i, v, [2, 4],
  17048. ... dtype=torch.float64,
  17049. ... device=torch.device('cuda:0'))
  17050. tensor(indices=tensor([[0, 1, 1],
  17051. [2, 0, 2]]),
  17052. values=tensor([3., 4., 5.]),
  17053. device='cuda:0', size=(2, 4), nnz=3, dtype=torch.float64,
  17054. layout=torch.sparse_coo)
  17055. # Create an empty sparse tensor with the following invariants:
  17056. # 1. sparse_dim + dense_dim = len(SparseTensor.shape)
  17057. # 2. SparseTensor._indices().shape = (sparse_dim, nnz)
  17058. # 3. SparseTensor._values().shape = (nnz, SparseTensor.shape[sparse_dim:])
  17059. #
  17060. # For instance, to create an empty sparse tensor with nnz = 0, dense_dim = 0 and
  17061. # sparse_dim = 1 (hence indices is a 2D tensor of shape = (1, 0))
  17062. >>> S = torch.sparse_coo_tensor(torch.empty([1, 0]), [], [1])
  17063. tensor(indices=tensor([], size=(1, 0)),
  17064. values=tensor([], size=(0,)),
  17065. size=(1,), nnz=0, layout=torch.sparse_coo)
  17066. # and to create an empty sparse tensor with nnz = 0, dense_dim = 1 and
  17067. # sparse_dim = 1
  17068. >>> S = torch.sparse_coo_tensor(torch.empty([1, 0]), torch.empty([0, 2]), [1, 2])
  17069. tensor(indices=tensor([], size=(1, 0)),
  17070. values=tensor([], size=(0, 2)),
  17071. size=(1, 2), nnz=0, layout=torch.sparse_coo)
  17072. .. _torch.sparse: https://pytorch.org/docs/stable/sparse.html
  17073. """
  17074. ...
  17075. def sparse_csc_tensor(ccol_indices: Union[Tensor, List], row_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None) -> Tensor:
  17076. r"""
  17077. sparse_csc_tensor(ccol_indices, row_indices, values, size=None, *, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
  17078. Constructs a :ref:`sparse tensor in CSC (Compressed Sparse Column)
  17079. <sparse-csc-docs>` with specified values at the given
  17080. :attr:`ccol_indices` and :attr:`row_indices`. Sparse matrix
  17081. multiplication operations in CSC format are typically faster than that
  17082. for sparse tensors in COO format. Make you have a look at :ref:`the
  17083. note on the data type of the indices <sparse-csc-docs>`.
  17084. .. note::
  17085. If the ``device`` argument is not specified the device of the given
  17086. :attr:`values` and indices tensor(s) must match. If, however, the
  17087. argument is specified the input Tensors will be converted to the
  17088. given device and in turn determine the device of the constructed
  17089. sparse tensor.
  17090. Args:
  17091. ccol_indices (array_like): (B+1)-dimensional array of size
  17092. ``(*batchsize, ncols + 1)``. The last element of each batch
  17093. is the number of non-zeros. This tensor encodes the index in
  17094. values and row_indices depending on where the given column
  17095. starts. Each successive number in the tensor subtracted by the
  17096. number before it denotes the number of elements in a given
  17097. column.
  17098. row_indices (array_like): Row co-ordinates of each element in
  17099. values. (B+1)-dimensional tensor with the same length as
  17100. values.
  17101. values (array_list): Initial values for the tensor. Can be a list,
  17102. tuple, NumPy ``ndarray``, scalar, and other types that
  17103. represents a (1+K)-dimensional tensor where ``K`` is the number
  17104. of dense dimensions.
  17105. size (list, tuple, :class:`torch.Size`, optional): Size of the
  17106. sparse tensor: ``(*batchsize, nrows, ncols, *densesize)``. If
  17107. not provided, the size will be inferred as the minimum size
  17108. big enough to hold all non-zero elements.
  17109. Keyword args:
  17110. dtype (:class:`torch.dtype`, optional): the desired data type of
  17111. returned tensor. Default: if None, infers data type from
  17112. :attr:`values`.
  17113. device (:class:`torch.device`, optional): the desired device of
  17114. returned tensor. Default: if None, uses the current device
  17115. for the default tensor type (see
  17116. :func:`torch.set_default_device`). :attr:`device` will be
  17117. the CPU for CPU tensor types and the current CUDA device for
  17118. CUDA tensor types.
  17119. requires_grad (bool, optional): If autograd should record operations on the
  17120. returned tensor. Default: ``False``.
  17121. check_invariants (bool, optional): If sparse tensor invariants are checked.
  17122. Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`,
  17123. initially False.
  17124. Example::
  17125. >>> ccol_indices = [0, 2, 4]
  17126. >>> row_indices = [0, 1, 0, 1]
  17127. >>> values = [1, 2, 3, 4]
  17128. >>> torch.sparse_csc_tensor(torch.tensor(ccol_indices, dtype=torch.int64),
  17129. ... torch.tensor(row_indices, dtype=torch.int64),
  17130. ... torch.tensor(values), dtype=torch.double)
  17131. tensor(ccol_indices=tensor([0, 2, 4]),
  17132. row_indices=tensor([0, 1, 0, 1]),
  17133. values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4,
  17134. dtype=torch.float64, layout=torch.sparse_csc)
  17135. """
  17136. ...
  17137. def sparse_csr_tensor(crow_indices: Union[Tensor, List], col_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None) -> Tensor:
  17138. r"""
  17139. sparse_csr_tensor(crow_indices, col_indices, values, size=None, *, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
  17140. Constructs a :ref:`sparse tensor in CSR (Compressed Sparse Row) <sparse-csr-docs>` with specified
  17141. values at the given :attr:`crow_indices` and :attr:`col_indices`. Sparse matrix multiplication operations
  17142. in CSR format are typically faster than that for sparse tensors in COO format. Make you have a look
  17143. at :ref:`the note on the data type of the indices <sparse-csr-docs>`.
  17144. .. note::
  17145. If the ``device`` argument is not specified the device of the given
  17146. :attr:`values` and indices tensor(s) must match. If, however, the
  17147. argument is specified the input Tensors will be converted to the
  17148. given device and in turn determine the device of the constructed
  17149. sparse tensor.
  17150. Args:
  17151. crow_indices (array_like): (B+1)-dimensional array of size
  17152. ``(*batchsize, nrows + 1)``. The last element of each batch
  17153. is the number of non-zeros. This tensor encodes the index in
  17154. values and col_indices depending on where the given row
  17155. starts. Each successive number in the tensor subtracted by the
  17156. number before it denotes the number of elements in a given
  17157. row.
  17158. col_indices (array_like): Column co-ordinates of each element in
  17159. values. (B+1)-dimensional tensor with the same length
  17160. as values.
  17161. values (array_list): Initial values for the tensor. Can be a list,
  17162. tuple, NumPy ``ndarray``, scalar, and other types that
  17163. represents a (1+K)-dimensional tensor where ``K`` is the number
  17164. of dense dimensions.
  17165. size (list, tuple, :class:`torch.Size`, optional): Size of the
  17166. sparse tensor: ``(*batchsize, nrows, ncols, *densesize)``. If
  17167. not provided, the size will be inferred as the minimum size
  17168. big enough to hold all non-zero elements.
  17169. Keyword args:
  17170. dtype (:class:`torch.dtype`, optional): the desired data type of
  17171. returned tensor. Default: if None, infers data type from
  17172. :attr:`values`.
  17173. device (:class:`torch.device`, optional): the desired device of
  17174. returned tensor. Default: if None, uses the current device
  17175. for the default tensor type (see
  17176. :func:`torch.set_default_device`). :attr:`device` will be
  17177. the CPU for CPU tensor types and the current CUDA device for
  17178. CUDA tensor types.
  17179. requires_grad (bool, optional): If autograd should record operations on the
  17180. returned tensor. Default: ``False``.
  17181. check_invariants (bool, optional): If sparse tensor invariants are checked.
  17182. Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`,
  17183. initially False.
  17184. Example::
  17185. >>> crow_indices = [0, 2, 4]
  17186. >>> col_indices = [0, 1, 0, 1]
  17187. >>> values = [1, 2, 3, 4]
  17188. >>> torch.sparse_csr_tensor(torch.tensor(crow_indices, dtype=torch.int64),
  17189. ... torch.tensor(col_indices, dtype=torch.int64),
  17190. ... torch.tensor(values), dtype=torch.double)
  17191. tensor(crow_indices=tensor([0, 2, 4]),
  17192. col_indices=tensor([0, 1, 0, 1]),
  17193. values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4,
  17194. dtype=torch.float64, layout=torch.sparse_csr)
  17195. """
  17196. ...
  17197. def split_copy(input: Tensor, split_size: Union[_int, SymInt], dim: _int = 0, *, out: Union[Tuple[Tensor, ...], List[Tensor], None] = None) -> None:
  17198. r"""
  17199. Performs the same operation as :func:`torch.split`, but all output tensors
  17200. are freshly created instead of aliasing the input.
  17201. """
  17202. ...
  17203. def split_with_sizes(input: Tensor, split_sizes: Sequence[Union[_int, SymInt]], dim: _int = 0) -> Tuple[Tensor, ...]: ...
  17204. def split_with_sizes_copy(input: Tensor, split_sizes: Sequence[Union[_int, SymInt]], dim: _int = 0, *, out: Union[Tuple[Tensor, ...], List[Tensor], None] = None) -> None:
  17205. r"""
  17206. Performs the same operation as :func:`torch.split_with_sizes`, but all output tensors
  17207. are freshly created instead of aliasing the input.
  17208. """
  17209. ...
  17210. def spmm(input: Tensor, mat2: Tensor) -> Tensor: ...
  17211. def sqrt(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  17212. r"""
  17213. sqrt(input, *, out=None) -> Tensor
  17214. Returns a new tensor with the square-root of the elements of :attr:`input`.
  17215. .. math::
  17216. \text{out}_{i} = \sqrt{\text{input}_{i}}
  17217. Args:
  17218. input (Tensor): the input tensor.
  17219. Keyword args:
  17220. out (Tensor, optional): the output tensor.
  17221. Example::
  17222. >>> a = torch.randn(4)
  17223. >>> a
  17224. tensor([-2.0755, 1.0226, 0.0831, 0.4806])
  17225. >>> torch.sqrt(a)
  17226. tensor([ nan, 1.0112, 0.2883, 0.6933])
  17227. """
  17228. ...
  17229. def sqrt_(input: Tensor) -> Tensor: ...
  17230. def square(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  17231. r"""
  17232. square(input, *, out=None) -> Tensor
  17233. Returns a new tensor with the square of the elements of :attr:`input`.
  17234. Args:
  17235. input (Tensor): the input tensor.
  17236. Keyword args:
  17237. out (Tensor, optional): the output tensor.
  17238. Example::
  17239. >>> a = torch.randn(4)
  17240. >>> a
  17241. tensor([-2.0755, 1.0226, 0.0831, 0.4806])
  17242. >>> torch.square(a)
  17243. tensor([ 4.3077, 1.0457, 0.0069, 0.2310])
  17244. """
  17245. ...
  17246. def square_(input: Tensor) -> Tensor: ...
  17247. @overload
  17248. def squeeze(input: Tensor) -> Tensor:
  17249. r"""
  17250. squeeze(input, dim=None) -> Tensor
  17251. Returns a tensor with all specified dimensions of :attr:`input` of size `1` removed.
  17252. For example, if `input` is of shape:
  17253. :math:`(A \times 1 \times B \times C \times 1 \times D)` then the `input.squeeze()`
  17254. will be of shape: :math:`(A \times B \times C \times D)`.
  17255. When :attr:`dim` is given, a squeeze operation is done only in the given
  17256. dimension(s). If `input` is of shape: :math:`(A \times 1 \times B)`,
  17257. ``squeeze(input, 0)`` leaves the tensor unchanged, but ``squeeze(input, 1)``
  17258. will squeeze the tensor to the shape :math:`(A \times B)`.
  17259. .. note:: The returned tensor shares the storage with the input tensor,
  17260. so changing the contents of one will change the contents of the other.
  17261. .. warning:: If the tensor has a batch dimension of size 1, then `squeeze(input)`
  17262. will also remove the batch dimension, which can lead to unexpected
  17263. errors. Consider specifying only the dims you wish to be squeezed.
  17264. Args:
  17265. input (Tensor): the input tensor.
  17266. dim (int or tuple of ints, optional): if given, the input will be squeezed
  17267. only in the specified dimensions.
  17268. .. versionchanged:: 2.0
  17269. :attr:`dim` now accepts tuples of dimensions.
  17270. Example::
  17271. >>> x = torch.zeros(2, 1, 2, 1, 2)
  17272. >>> x.size()
  17273. torch.Size([2, 1, 2, 1, 2])
  17274. >>> y = torch.squeeze(x)
  17275. >>> y.size()
  17276. torch.Size([2, 2, 2])
  17277. >>> y = torch.squeeze(x, 0)
  17278. >>> y.size()
  17279. torch.Size([2, 1, 2, 1, 2])
  17280. >>> y = torch.squeeze(x, 1)
  17281. >>> y.size()
  17282. torch.Size([2, 2, 1, 2])
  17283. >>> y = torch.squeeze(x, (1, 2, 3))
  17284. torch.Size([2, 2, 2])
  17285. """
  17286. ...
  17287. @overload
  17288. def squeeze(input: Tensor, dim: _int) -> Tensor:
  17289. r"""
  17290. squeeze(input, dim=None) -> Tensor
  17291. Returns a tensor with all specified dimensions of :attr:`input` of size `1` removed.
  17292. For example, if `input` is of shape:
  17293. :math:`(A \times 1 \times B \times C \times 1 \times D)` then the `input.squeeze()`
  17294. will be of shape: :math:`(A \times B \times C \times D)`.
  17295. When :attr:`dim` is given, a squeeze operation is done only in the given
  17296. dimension(s). If `input` is of shape: :math:`(A \times 1 \times B)`,
  17297. ``squeeze(input, 0)`` leaves the tensor unchanged, but ``squeeze(input, 1)``
  17298. will squeeze the tensor to the shape :math:`(A \times B)`.
  17299. .. note:: The returned tensor shares the storage with the input tensor,
  17300. so changing the contents of one will change the contents of the other.
  17301. .. warning:: If the tensor has a batch dimension of size 1, then `squeeze(input)`
  17302. will also remove the batch dimension, which can lead to unexpected
  17303. errors. Consider specifying only the dims you wish to be squeezed.
  17304. Args:
  17305. input (Tensor): the input tensor.
  17306. dim (int or tuple of ints, optional): if given, the input will be squeezed
  17307. only in the specified dimensions.
  17308. .. versionchanged:: 2.0
  17309. :attr:`dim` now accepts tuples of dimensions.
  17310. Example::
  17311. >>> x = torch.zeros(2, 1, 2, 1, 2)
  17312. >>> x.size()
  17313. torch.Size([2, 1, 2, 1, 2])
  17314. >>> y = torch.squeeze(x)
  17315. >>> y.size()
  17316. torch.Size([2, 2, 2])
  17317. >>> y = torch.squeeze(x, 0)
  17318. >>> y.size()
  17319. torch.Size([2, 1, 2, 1, 2])
  17320. >>> y = torch.squeeze(x, 1)
  17321. >>> y.size()
  17322. torch.Size([2, 2, 1, 2])
  17323. >>> y = torch.squeeze(x, (1, 2, 3))
  17324. torch.Size([2, 2, 2])
  17325. """
  17326. ...
  17327. @overload
  17328. def squeeze(input: Tensor, dim: _size) -> Tensor:
  17329. r"""
  17330. squeeze(input, dim=None) -> Tensor
  17331. Returns a tensor with all specified dimensions of :attr:`input` of size `1` removed.
  17332. For example, if `input` is of shape:
  17333. :math:`(A \times 1 \times B \times C \times 1 \times D)` then the `input.squeeze()`
  17334. will be of shape: :math:`(A \times B \times C \times D)`.
  17335. When :attr:`dim` is given, a squeeze operation is done only in the given
  17336. dimension(s). If `input` is of shape: :math:`(A \times 1 \times B)`,
  17337. ``squeeze(input, 0)`` leaves the tensor unchanged, but ``squeeze(input, 1)``
  17338. will squeeze the tensor to the shape :math:`(A \times B)`.
  17339. .. note:: The returned tensor shares the storage with the input tensor,
  17340. so changing the contents of one will change the contents of the other.
  17341. .. warning:: If the tensor has a batch dimension of size 1, then `squeeze(input)`
  17342. will also remove the batch dimension, which can lead to unexpected
  17343. errors. Consider specifying only the dims you wish to be squeezed.
  17344. Args:
  17345. input (Tensor): the input tensor.
  17346. dim (int or tuple of ints, optional): if given, the input will be squeezed
  17347. only in the specified dimensions.
  17348. .. versionchanged:: 2.0
  17349. :attr:`dim` now accepts tuples of dimensions.
  17350. Example::
  17351. >>> x = torch.zeros(2, 1, 2, 1, 2)
  17352. >>> x.size()
  17353. torch.Size([2, 1, 2, 1, 2])
  17354. >>> y = torch.squeeze(x)
  17355. >>> y.size()
  17356. torch.Size([2, 2, 2])
  17357. >>> y = torch.squeeze(x, 0)
  17358. >>> y.size()
  17359. torch.Size([2, 1, 2, 1, 2])
  17360. >>> y = torch.squeeze(x, 1)
  17361. >>> y.size()
  17362. torch.Size([2, 2, 1, 2])
  17363. >>> y = torch.squeeze(x, (1, 2, 3))
  17364. torch.Size([2, 2, 2])
  17365. """
  17366. ...
  17367. @overload
  17368. def squeeze(input: Tensor, dim: Union[str, ellipsis, None]) -> Tensor:
  17369. r"""
  17370. squeeze(input, dim=None) -> Tensor
  17371. Returns a tensor with all specified dimensions of :attr:`input` of size `1` removed.
  17372. For example, if `input` is of shape:
  17373. :math:`(A \times 1 \times B \times C \times 1 \times D)` then the `input.squeeze()`
  17374. will be of shape: :math:`(A \times B \times C \times D)`.
  17375. When :attr:`dim` is given, a squeeze operation is done only in the given
  17376. dimension(s). If `input` is of shape: :math:`(A \times 1 \times B)`,
  17377. ``squeeze(input, 0)`` leaves the tensor unchanged, but ``squeeze(input, 1)``
  17378. will squeeze the tensor to the shape :math:`(A \times B)`.
  17379. .. note:: The returned tensor shares the storage with the input tensor,
  17380. so changing the contents of one will change the contents of the other.
  17381. .. warning:: If the tensor has a batch dimension of size 1, then `squeeze(input)`
  17382. will also remove the batch dimension, which can lead to unexpected
  17383. errors. Consider specifying only the dims you wish to be squeezed.
  17384. Args:
  17385. input (Tensor): the input tensor.
  17386. dim (int or tuple of ints, optional): if given, the input will be squeezed
  17387. only in the specified dimensions.
  17388. .. versionchanged:: 2.0
  17389. :attr:`dim` now accepts tuples of dimensions.
  17390. Example::
  17391. >>> x = torch.zeros(2, 1, 2, 1, 2)
  17392. >>> x.size()
  17393. torch.Size([2, 1, 2, 1, 2])
  17394. >>> y = torch.squeeze(x)
  17395. >>> y.size()
  17396. torch.Size([2, 2, 2])
  17397. >>> y = torch.squeeze(x, 0)
  17398. >>> y.size()
  17399. torch.Size([2, 1, 2, 1, 2])
  17400. >>> y = torch.squeeze(x, 1)
  17401. >>> y.size()
  17402. torch.Size([2, 2, 1, 2])
  17403. >>> y = torch.squeeze(x, (1, 2, 3))
  17404. torch.Size([2, 2, 2])
  17405. """
  17406. ...
  17407. @overload
  17408. def squeeze_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  17409. r"""
  17410. Performs the same operation as :func:`torch.squeeze`, but all output tensors
  17411. are freshly created instead of aliasing the input.
  17412. """
  17413. ...
  17414. @overload
  17415. def squeeze_copy(input: Tensor, dim: _int, *, out: Optional[Tensor] = None) -> Tensor:
  17416. r"""
  17417. Performs the same operation as :func:`torch.squeeze`, but all output tensors
  17418. are freshly created instead of aliasing the input.
  17419. """
  17420. ...
  17421. @overload
  17422. def squeeze_copy(input: Tensor, dim: _size, *, out: Optional[Tensor] = None) -> Tensor:
  17423. r"""
  17424. Performs the same operation as :func:`torch.squeeze`, but all output tensors
  17425. are freshly created instead of aliasing the input.
  17426. """
  17427. ...
  17428. @overload
  17429. def sspaddmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat1: Tensor, mat2: Tensor) -> Tensor:
  17430. r"""
  17431. sspaddmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
  17432. Matrix multiplies a sparse tensor :attr:`mat1` with a dense tensor
  17433. :attr:`mat2`, then adds the sparse tensor :attr:`input` to the result.
  17434. Note: This function is equivalent to :func:`torch.addmm`, except
  17435. :attr:`input` and :attr:`mat1` are sparse.
  17436. Args:
  17437. input (Tensor): a sparse matrix to be added
  17438. mat1 (Tensor): a sparse matrix to be matrix multiplied
  17439. mat2 (Tensor): a dense matrix to be matrix multiplied
  17440. Keyword args:
  17441. beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`)
  17442. alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
  17443. out (Tensor, optional): the output tensor.
  17444. """
  17445. ...
  17446. @overload
  17447. def sspaddmm(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor:
  17448. r"""
  17449. sspaddmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
  17450. Matrix multiplies a sparse tensor :attr:`mat1` with a dense tensor
  17451. :attr:`mat2`, then adds the sparse tensor :attr:`input` to the result.
  17452. Note: This function is equivalent to :func:`torch.addmm`, except
  17453. :attr:`input` and :attr:`mat1` are sparse.
  17454. Args:
  17455. input (Tensor): a sparse matrix to be added
  17456. mat1 (Tensor): a sparse matrix to be matrix multiplied
  17457. mat2 (Tensor): a dense matrix to be matrix multiplied
  17458. Keyword args:
  17459. beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`)
  17460. alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
  17461. out (Tensor, optional): the output tensor.
  17462. """
  17463. ...
  17464. @overload
  17465. def sspaddmm(beta: Union[Number, _complex], self: Tensor, mat1: Tensor, mat2: Tensor) -> Tensor:
  17466. r"""
  17467. sspaddmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
  17468. Matrix multiplies a sparse tensor :attr:`mat1` with a dense tensor
  17469. :attr:`mat2`, then adds the sparse tensor :attr:`input` to the result.
  17470. Note: This function is equivalent to :func:`torch.addmm`, except
  17471. :attr:`input` and :attr:`mat1` are sparse.
  17472. Args:
  17473. input (Tensor): a sparse matrix to be added
  17474. mat1 (Tensor): a sparse matrix to be matrix multiplied
  17475. mat2 (Tensor): a dense matrix to be matrix multiplied
  17476. Keyword args:
  17477. beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`)
  17478. alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
  17479. out (Tensor, optional): the output tensor.
  17480. """
  17481. ...
  17482. def stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int = 0, *, out: Optional[Tensor] = None) -> Tensor:
  17483. r"""
  17484. stack(tensors, dim=0, *, out=None) -> Tensor
  17485. Concatenates a sequence of tensors along a new dimension.
  17486. All tensors need to be of the same size.
  17487. .. seealso::
  17488. :func:`torch.cat` concatenates the given sequence along an existing dimension.
  17489. Arguments:
  17490. tensors (sequence of Tensors): sequence of tensors to concatenate
  17491. dim (int, optional): dimension to insert. Has to be between 0 and the number
  17492. of dimensions of concatenated tensors (inclusive). Default: 0
  17493. Keyword args:
  17494. out (Tensor, optional): the output tensor.
  17495. Example::
  17496. >>> x = torch.randn(2, 3)
  17497. >>> x
  17498. tensor([[ 0.3367, 0.1288, 0.2345],
  17499. [ 0.2303, -1.1229, -0.1863]])
  17500. >>> x = torch.stack((x, x)) # same as torch.stack((x, x), dim=0)
  17501. >>> x
  17502. tensor([[[ 0.3367, 0.1288, 0.2345],
  17503. [ 0.2303, -1.1229, -0.1863]],
  17504. [[ 0.3367, 0.1288, 0.2345],
  17505. [ 0.2303, -1.1229, -0.1863]]])
  17506. >>> x.size()
  17507. torch.Size([2, 2, 3])
  17508. >>> x = torch.stack((x, x), dim=1)
  17509. tensor([[[ 0.3367, 0.1288, 0.2345],
  17510. [ 0.3367, 0.1288, 0.2345]],
  17511. [[ 0.2303, -1.1229, -0.1863],
  17512. [ 0.2303, -1.1229, -0.1863]]])
  17513. >>> x = torch.stack((x, x), dim=2)
  17514. tensor([[[ 0.3367, 0.3367],
  17515. [ 0.1288, 0.1288],
  17516. [ 0.2345, 0.2345]],
  17517. [[ 0.2303, 0.2303],
  17518. [-1.1229, -1.1229],
  17519. [-0.1863, -0.1863]]])
  17520. >>> x = torch.stack((x, x), dim=-1)
  17521. tensor([[[ 0.3367, 0.3367],
  17522. [ 0.1288, 0.1288],
  17523. [ 0.2345, 0.2345]],
  17524. [[ 0.2303, 0.2303],
  17525. [-1.1229, -1.1229],
  17526. [-0.1863, -0.1863]]])
  17527. """
  17528. ...
  17529. @overload
  17530. def std(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
  17531. r"""
  17532. std(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
  17533. Calculates the standard deviation over the dimensions specified by :attr:`dim`.
  17534. :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
  17535. reduce over all dimensions.
  17536. The standard deviation (:math:`\sigma`) is calculated as
  17537. .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
  17538. where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
  17539. sample mean, :math:`N` is the number of samples and :math:`\delta N` is
  17540. the :attr:`correction`.
  17541. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  17542. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  17543. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  17544. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  17545. Args:
  17546. input (Tensor): the input tensor.
  17547. dim (int or tuple of ints): the dimension or dimensions to reduce.
  17548. Keyword args:
  17549. correction (int): difference between the sample size and sample degrees of freedom.
  17550. Defaults to `Bessel's correction`_, ``correction=1``.
  17551. .. versionchanged:: 2.0
  17552. Previously this argument was called ``unbiased`` and was a boolean
  17553. with ``True`` corresponding to ``correction=1`` and ``False`` being
  17554. ``correction=0``.
  17555. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  17556. out (Tensor, optional): the output tensor.
  17557. Example:
  17558. >>> a = torch.tensor(
  17559. ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
  17560. ... [ 1.5027, -0.3270, 0.5905, 0.6538],
  17561. ... [-1.5745, 1.3330, -0.5596, -0.6548],
  17562. ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
  17563. >>> torch.std(a, dim=1, keepdim=True)
  17564. tensor([[1.0311],
  17565. [0.7477],
  17566. [1.2204],
  17567. [0.9087]])
  17568. .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
  17569. """
  17570. ...
  17571. @overload
  17572. def std(input: Tensor, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False, out: Optional[Tensor] = None) -> Tensor:
  17573. r"""
  17574. std(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
  17575. Calculates the standard deviation over the dimensions specified by :attr:`dim`.
  17576. :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
  17577. reduce over all dimensions.
  17578. The standard deviation (:math:`\sigma`) is calculated as
  17579. .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
  17580. where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
  17581. sample mean, :math:`N` is the number of samples and :math:`\delta N` is
  17582. the :attr:`correction`.
  17583. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  17584. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  17585. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  17586. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  17587. Args:
  17588. input (Tensor): the input tensor.
  17589. dim (int or tuple of ints): the dimension or dimensions to reduce.
  17590. Keyword args:
  17591. correction (int): difference between the sample size and sample degrees of freedom.
  17592. Defaults to `Bessel's correction`_, ``correction=1``.
  17593. .. versionchanged:: 2.0
  17594. Previously this argument was called ``unbiased`` and was a boolean
  17595. with ``True`` corresponding to ``correction=1`` and ``False`` being
  17596. ``correction=0``.
  17597. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  17598. out (Tensor, optional): the output tensor.
  17599. Example:
  17600. >>> a = torch.tensor(
  17601. ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
  17602. ... [ 1.5027, -0.3270, 0.5905, 0.6538],
  17603. ... [-1.5745, 1.3330, -0.5596, -0.6548],
  17604. ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
  17605. >>> torch.std(a, dim=1, keepdim=True)
  17606. tensor([[1.0311],
  17607. [0.7477],
  17608. [1.2204],
  17609. [0.9087]])
  17610. .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
  17611. """
  17612. ...
  17613. @overload
  17614. def std(input: Tensor, unbiased: _bool = True) -> Tensor:
  17615. r"""
  17616. std(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
  17617. Calculates the standard deviation over the dimensions specified by :attr:`dim`.
  17618. :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
  17619. reduce over all dimensions.
  17620. The standard deviation (:math:`\sigma`) is calculated as
  17621. .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
  17622. where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
  17623. sample mean, :math:`N` is the number of samples and :math:`\delta N` is
  17624. the :attr:`correction`.
  17625. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  17626. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  17627. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  17628. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  17629. Args:
  17630. input (Tensor): the input tensor.
  17631. dim (int or tuple of ints): the dimension or dimensions to reduce.
  17632. Keyword args:
  17633. correction (int): difference between the sample size and sample degrees of freedom.
  17634. Defaults to `Bessel's correction`_, ``correction=1``.
  17635. .. versionchanged:: 2.0
  17636. Previously this argument was called ``unbiased`` and was a boolean
  17637. with ``True`` corresponding to ``correction=1`` and ``False`` being
  17638. ``correction=0``.
  17639. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  17640. out (Tensor, optional): the output tensor.
  17641. Example:
  17642. >>> a = torch.tensor(
  17643. ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
  17644. ... [ 1.5027, -0.3270, 0.5905, 0.6538],
  17645. ... [-1.5745, 1.3330, -0.5596, -0.6548],
  17646. ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
  17647. >>> torch.std(a, dim=1, keepdim=True)
  17648. tensor([[1.0311],
  17649. [0.7477],
  17650. [1.2204],
  17651. [0.9087]])
  17652. .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
  17653. """
  17654. ...
  17655. @overload
  17656. def std(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False, out: Optional[Tensor] = None) -> Tensor:
  17657. r"""
  17658. std(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
  17659. Calculates the standard deviation over the dimensions specified by :attr:`dim`.
  17660. :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
  17661. reduce over all dimensions.
  17662. The standard deviation (:math:`\sigma`) is calculated as
  17663. .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
  17664. where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
  17665. sample mean, :math:`N` is the number of samples and :math:`\delta N` is
  17666. the :attr:`correction`.
  17667. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  17668. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  17669. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  17670. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  17671. Args:
  17672. input (Tensor): the input tensor.
  17673. dim (int or tuple of ints): the dimension or dimensions to reduce.
  17674. Keyword args:
  17675. correction (int): difference between the sample size and sample degrees of freedom.
  17676. Defaults to `Bessel's correction`_, ``correction=1``.
  17677. .. versionchanged:: 2.0
  17678. Previously this argument was called ``unbiased`` and was a boolean
  17679. with ``True`` corresponding to ``correction=1`` and ``False`` being
  17680. ``correction=0``.
  17681. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  17682. out (Tensor, optional): the output tensor.
  17683. Example:
  17684. >>> a = torch.tensor(
  17685. ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
  17686. ... [ 1.5027, -0.3270, 0.5905, 0.6538],
  17687. ... [-1.5745, 1.3330, -0.5596, -0.6548],
  17688. ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
  17689. >>> torch.std(a, dim=1, keepdim=True)
  17690. tensor([[1.0311],
  17691. [0.7477],
  17692. [1.2204],
  17693. [0.9087]])
  17694. .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
  17695. """
  17696. ...
  17697. @overload
  17698. def std(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
  17699. r"""
  17700. std(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
  17701. Calculates the standard deviation over the dimensions specified by :attr:`dim`.
  17702. :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
  17703. reduce over all dimensions.
  17704. The standard deviation (:math:`\sigma`) is calculated as
  17705. .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
  17706. where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
  17707. sample mean, :math:`N` is the number of samples and :math:`\delta N` is
  17708. the :attr:`correction`.
  17709. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  17710. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  17711. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  17712. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  17713. Args:
  17714. input (Tensor): the input tensor.
  17715. dim (int or tuple of ints): the dimension or dimensions to reduce.
  17716. Keyword args:
  17717. correction (int): difference between the sample size and sample degrees of freedom.
  17718. Defaults to `Bessel's correction`_, ``correction=1``.
  17719. .. versionchanged:: 2.0
  17720. Previously this argument was called ``unbiased`` and was a boolean
  17721. with ``True`` corresponding to ``correction=1`` and ``False`` being
  17722. ``correction=0``.
  17723. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  17724. out (Tensor, optional): the output tensor.
  17725. Example:
  17726. >>> a = torch.tensor(
  17727. ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
  17728. ... [ 1.5027, -0.3270, 0.5905, 0.6538],
  17729. ... [-1.5745, 1.3330, -0.5596, -0.6548],
  17730. ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
  17731. >>> torch.std(a, dim=1, keepdim=True)
  17732. tensor([[1.0311],
  17733. [0.7477],
  17734. [1.2204],
  17735. [0.9087]])
  17736. .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
  17737. """
  17738. ...
  17739. @overload
  17740. def std_mean(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False) -> Tuple[Tensor, Tensor]:
  17741. r"""
  17742. std_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
  17743. Calculates the standard deviation and mean over the dimensions specified by
  17744. :attr:`dim`. :attr:`dim` can be a single dimension, list of dimensions, or
  17745. ``None`` to reduce over all dimensions.
  17746. The standard deviation (:math:`\sigma`) is calculated as
  17747. .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
  17748. where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
  17749. sample mean, :math:`N` is the number of samples and :math:`\delta N` is
  17750. the :attr:`correction`.
  17751. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  17752. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  17753. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  17754. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  17755. Args:
  17756. input (Tensor): the input tensor.
  17757. dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
  17758. If ``None``, all dimensions are reduced.
  17759. Keyword args:
  17760. correction (int): difference between the sample size and sample degrees of freedom.
  17761. Defaults to `Bessel's correction`_, ``correction=1``.
  17762. .. versionchanged:: 2.0
  17763. Previously this argument was called ``unbiased`` and was a boolean
  17764. with ``True`` corresponding to ``correction=1`` and ``False`` being
  17765. ``correction=0``.
  17766. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  17767. out (Tensor, optional): the output tensor.
  17768. Returns:
  17769. A tuple (std, mean) containing the standard deviation and mean.
  17770. Example:
  17771. >>> a = torch.tensor(
  17772. ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
  17773. ... [ 1.5027, -0.3270, 0.5905, 0.6538],
  17774. ... [-1.5745, 1.3330, -0.5596, -0.6548],
  17775. ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
  17776. >>> torch.std_mean(a, dim=0, keepdim=True)
  17777. (tensor([[1.2620, 1.0028, 1.0957, 0.6038]]),
  17778. tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
  17779. .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
  17780. """
  17781. ...
  17782. @overload
  17783. def std_mean(input: Tensor, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tuple[Tensor, Tensor]:
  17784. r"""
  17785. std_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
  17786. Calculates the standard deviation and mean over the dimensions specified by
  17787. :attr:`dim`. :attr:`dim` can be a single dimension, list of dimensions, or
  17788. ``None`` to reduce over all dimensions.
  17789. The standard deviation (:math:`\sigma`) is calculated as
  17790. .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
  17791. where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
  17792. sample mean, :math:`N` is the number of samples and :math:`\delta N` is
  17793. the :attr:`correction`.
  17794. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  17795. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  17796. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  17797. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  17798. Args:
  17799. input (Tensor): the input tensor.
  17800. dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
  17801. If ``None``, all dimensions are reduced.
  17802. Keyword args:
  17803. correction (int): difference between the sample size and sample degrees of freedom.
  17804. Defaults to `Bessel's correction`_, ``correction=1``.
  17805. .. versionchanged:: 2.0
  17806. Previously this argument was called ``unbiased`` and was a boolean
  17807. with ``True`` corresponding to ``correction=1`` and ``False`` being
  17808. ``correction=0``.
  17809. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  17810. out (Tensor, optional): the output tensor.
  17811. Returns:
  17812. A tuple (std, mean) containing the standard deviation and mean.
  17813. Example:
  17814. >>> a = torch.tensor(
  17815. ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
  17816. ... [ 1.5027, -0.3270, 0.5905, 0.6538],
  17817. ... [-1.5745, 1.3330, -0.5596, -0.6548],
  17818. ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
  17819. >>> torch.std_mean(a, dim=0, keepdim=True)
  17820. (tensor([[1.2620, 1.0028, 1.0957, 0.6038]]),
  17821. tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
  17822. .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
  17823. """
  17824. ...
  17825. @overload
  17826. def std_mean(input: Tensor, unbiased: _bool = True) -> Tuple[Tensor, Tensor]:
  17827. r"""
  17828. std_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
  17829. Calculates the standard deviation and mean over the dimensions specified by
  17830. :attr:`dim`. :attr:`dim` can be a single dimension, list of dimensions, or
  17831. ``None`` to reduce over all dimensions.
  17832. The standard deviation (:math:`\sigma`) is calculated as
  17833. .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
  17834. where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
  17835. sample mean, :math:`N` is the number of samples and :math:`\delta N` is
  17836. the :attr:`correction`.
  17837. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  17838. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  17839. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  17840. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  17841. Args:
  17842. input (Tensor): the input tensor.
  17843. dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
  17844. If ``None``, all dimensions are reduced.
  17845. Keyword args:
  17846. correction (int): difference between the sample size and sample degrees of freedom.
  17847. Defaults to `Bessel's correction`_, ``correction=1``.
  17848. .. versionchanged:: 2.0
  17849. Previously this argument was called ``unbiased`` and was a boolean
  17850. with ``True`` corresponding to ``correction=1`` and ``False`` being
  17851. ``correction=0``.
  17852. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  17853. out (Tensor, optional): the output tensor.
  17854. Returns:
  17855. A tuple (std, mean) containing the standard deviation and mean.
  17856. Example:
  17857. >>> a = torch.tensor(
  17858. ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
  17859. ... [ 1.5027, -0.3270, 0.5905, 0.6538],
  17860. ... [-1.5745, 1.3330, -0.5596, -0.6548],
  17861. ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
  17862. >>> torch.std_mean(a, dim=0, keepdim=True)
  17863. (tensor([[1.2620, 1.0028, 1.0957, 0.6038]]),
  17864. tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
  17865. .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
  17866. """
  17867. ...
  17868. @overload
  17869. def std_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tuple[Tensor, Tensor]:
  17870. r"""
  17871. std_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
  17872. Calculates the standard deviation and mean over the dimensions specified by
  17873. :attr:`dim`. :attr:`dim` can be a single dimension, list of dimensions, or
  17874. ``None`` to reduce over all dimensions.
  17875. The standard deviation (:math:`\sigma`) is calculated as
  17876. .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
  17877. where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
  17878. sample mean, :math:`N` is the number of samples and :math:`\delta N` is
  17879. the :attr:`correction`.
  17880. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  17881. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  17882. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  17883. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  17884. Args:
  17885. input (Tensor): the input tensor.
  17886. dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
  17887. If ``None``, all dimensions are reduced.
  17888. Keyword args:
  17889. correction (int): difference between the sample size and sample degrees of freedom.
  17890. Defaults to `Bessel's correction`_, ``correction=1``.
  17891. .. versionchanged:: 2.0
  17892. Previously this argument was called ``unbiased`` and was a boolean
  17893. with ``True`` corresponding to ``correction=1`` and ``False`` being
  17894. ``correction=0``.
  17895. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  17896. out (Tensor, optional): the output tensor.
  17897. Returns:
  17898. A tuple (std, mean) containing the standard deviation and mean.
  17899. Example:
  17900. >>> a = torch.tensor(
  17901. ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
  17902. ... [ 1.5027, -0.3270, 0.5905, 0.6538],
  17903. ... [-1.5745, 1.3330, -0.5596, -0.6548],
  17904. ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
  17905. >>> torch.std_mean(a, dim=0, keepdim=True)
  17906. (tensor([[1.2620, 1.0028, 1.0957, 0.6038]]),
  17907. tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
  17908. .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
  17909. """
  17910. ...
  17911. @overload
  17912. def std_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False) -> Tuple[Tensor, Tensor]:
  17913. r"""
  17914. std_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
  17915. Calculates the standard deviation and mean over the dimensions specified by
  17916. :attr:`dim`. :attr:`dim` can be a single dimension, list of dimensions, or
  17917. ``None`` to reduce over all dimensions.
  17918. The standard deviation (:math:`\sigma`) is calculated as
  17919. .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
  17920. where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
  17921. sample mean, :math:`N` is the number of samples and :math:`\delta N` is
  17922. the :attr:`correction`.
  17923. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  17924. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  17925. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  17926. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  17927. Args:
  17928. input (Tensor): the input tensor.
  17929. dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
  17930. If ``None``, all dimensions are reduced.
  17931. Keyword args:
  17932. correction (int): difference between the sample size and sample degrees of freedom.
  17933. Defaults to `Bessel's correction`_, ``correction=1``.
  17934. .. versionchanged:: 2.0
  17935. Previously this argument was called ``unbiased`` and was a boolean
  17936. with ``True`` corresponding to ``correction=1`` and ``False`` being
  17937. ``correction=0``.
  17938. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  17939. out (Tensor, optional): the output tensor.
  17940. Returns:
  17941. A tuple (std, mean) containing the standard deviation and mean.
  17942. Example:
  17943. >>> a = torch.tensor(
  17944. ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
  17945. ... [ 1.5027, -0.3270, 0.5905, 0.6538],
  17946. ... [-1.5745, 1.3330, -0.5596, -0.6548],
  17947. ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
  17948. >>> torch.std_mean(a, dim=0, keepdim=True)
  17949. (tensor([[1.2620, 1.0028, 1.0957, 0.6038]]),
  17950. tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
  17951. .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
  17952. """
  17953. ...
  17954. @overload
  17955. def sub(input: Union[Tensor, Number, _complex], other: Union[Tensor, Number, _complex], *, alpha: Optional[Union[Number, _complex]] = 1, out: Optional[Tensor] = None) -> Tensor:
  17956. r"""
  17957. sub(input, other, *, alpha=1, out=None) -> Tensor
  17958. Subtracts :attr:`other`, scaled by :attr:`alpha`, from :attr:`input`.
  17959. .. math::
  17960. \text{{out}}_i = \text{{input}}_i - \text{{alpha}} \times \text{{other}}_i
  17961. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
  17962. :ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
  17963. Args:
  17964. input (Tensor): the input tensor.
  17965. other (Tensor or Number): the tensor or number to subtract from :attr:`input`.
  17966. Keyword args:
  17967. alpha (Number): the multiplier for :attr:`other`.
  17968. out (Tensor, optional): the output tensor.
  17969. Example::
  17970. >>> a = torch.tensor((1, 2))
  17971. >>> b = torch.tensor((0, 1))
  17972. >>> torch.sub(a, b, alpha=2)
  17973. tensor([1, 0])
  17974. """
  17975. ...
  17976. @overload
  17977. def sub(self: Tensor, alpha: Union[Number, _complex], other: Tensor) -> Tensor:
  17978. r"""
  17979. sub(input, other, *, alpha=1, out=None) -> Tensor
  17980. Subtracts :attr:`other`, scaled by :attr:`alpha`, from :attr:`input`.
  17981. .. math::
  17982. \text{{out}}_i = \text{{input}}_i - \text{{alpha}} \times \text{{other}}_i
  17983. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
  17984. :ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
  17985. Args:
  17986. input (Tensor): the input tensor.
  17987. other (Tensor or Number): the tensor or number to subtract from :attr:`input`.
  17988. Keyword args:
  17989. alpha (Number): the multiplier for :attr:`other`.
  17990. out (Tensor, optional): the output tensor.
  17991. Example::
  17992. >>> a = torch.tensor((1, 2))
  17993. >>> b = torch.tensor((0, 1))
  17994. >>> torch.sub(a, b, alpha=2)
  17995. tensor([1, 0])
  17996. """
  17997. ...
  17998. @overload
  17999. def sub(self: Tensor, alpha: Union[Number, _complex], other: Tensor, *, out: Tensor) -> Tensor:
  18000. r"""
  18001. sub(input, other, *, alpha=1, out=None) -> Tensor
  18002. Subtracts :attr:`other`, scaled by :attr:`alpha`, from :attr:`input`.
  18003. .. math::
  18004. \text{{out}}_i = \text{{input}}_i - \text{{alpha}} \times \text{{other}}_i
  18005. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
  18006. :ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
  18007. Args:
  18008. input (Tensor): the input tensor.
  18009. other (Tensor or Number): the tensor or number to subtract from :attr:`input`.
  18010. Keyword args:
  18011. alpha (Number): the multiplier for :attr:`other`.
  18012. out (Tensor, optional): the output tensor.
  18013. Example::
  18014. >>> a = torch.tensor((1, 2))
  18015. >>> b = torch.tensor((0, 1))
  18016. >>> torch.sub(a, b, alpha=2)
  18017. tensor([1, 0])
  18018. """
  18019. ...
  18020. @overload
  18021. def subtract(input: Tensor, other: Tensor, *, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor:
  18022. r"""
  18023. subtract(input, other, *, alpha=1, out=None) -> Tensor
  18024. Alias for :func:`torch.sub`.
  18025. """
  18026. ...
  18027. @overload
  18028. def subtract(input: Tensor, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor:
  18029. r"""
  18030. subtract(input, other, *, alpha=1, out=None) -> Tensor
  18031. Alias for :func:`torch.sub`.
  18032. """
  18033. ...
  18034. @overload
  18035. def sum(input: Tensor, *, dtype: Optional[_dtype] = None) -> Tensor:
  18036. r"""
  18037. sum(input, *, dtype=None) -> Tensor
  18038. Returns the sum of all elements in the :attr:`input` tensor.
  18039. Args:
  18040. input (Tensor): the input tensor.
  18041. Keyword args:
  18042. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  18043. If specified, the input tensor is casted to :attr:`dtype` before the operation
  18044. is performed. This is useful for preventing data type overflows. Default: None.
  18045. Example::
  18046. >>> a = torch.randn(1, 3)
  18047. >>> a
  18048. tensor([[ 0.1133, -0.9567, 0.2958]])
  18049. >>> torch.sum(a)
  18050. tensor(-0.5475)
  18051. .. function:: sum(input, dim, keepdim=False, *, dtype=None) -> Tensor
  18052. :noindex:
  18053. Returns the sum of each row of the :attr:`input` tensor in the given
  18054. dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
  18055. reduce over all of them.
  18056. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  18057. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  18058. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  18059. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  18060. Args:
  18061. input (Tensor): the input tensor.
  18062. dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
  18063. If ``None``, all dimensions are reduced.
  18064. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  18065. Keyword args:
  18066. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  18067. If specified, the input tensor is casted to :attr:`dtype` before the operation
  18068. is performed. This is useful for preventing data type overflows. Default: None.
  18069. Example::
  18070. >>> a = torch.randn(4, 4)
  18071. >>> a
  18072. tensor([[ 0.0569, -0.2475, 0.0737, -0.3429],
  18073. [-0.2993, 0.9138, 0.9337, -1.6864],
  18074. [ 0.1132, 0.7892, -0.1003, 0.5688],
  18075. [ 0.3637, -0.9906, -0.4752, -1.5197]])
  18076. >>> torch.sum(a, 1)
  18077. tensor([-0.4598, -0.1381, 1.3708, -2.6217])
  18078. >>> b = torch.arange(4 * 5 * 6).view(4, 5, 6)
  18079. >>> torch.sum(b, (2, 1))
  18080. tensor([ 435., 1335., 2235., 3135.])
  18081. """
  18082. ...
  18083. @overload
  18084. def sum(input: Tensor, dim: Optional[Union[_int, _size]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
  18085. r"""
  18086. sum(input, *, dtype=None) -> Tensor
  18087. Returns the sum of all elements in the :attr:`input` tensor.
  18088. Args:
  18089. input (Tensor): the input tensor.
  18090. Keyword args:
  18091. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  18092. If specified, the input tensor is casted to :attr:`dtype` before the operation
  18093. is performed. This is useful for preventing data type overflows. Default: None.
  18094. Example::
  18095. >>> a = torch.randn(1, 3)
  18096. >>> a
  18097. tensor([[ 0.1133, -0.9567, 0.2958]])
  18098. >>> torch.sum(a)
  18099. tensor(-0.5475)
  18100. .. function:: sum(input, dim, keepdim=False, *, dtype=None) -> Tensor
  18101. :noindex:
  18102. Returns the sum of each row of the :attr:`input` tensor in the given
  18103. dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
  18104. reduce over all of them.
  18105. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  18106. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  18107. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  18108. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  18109. Args:
  18110. input (Tensor): the input tensor.
  18111. dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
  18112. If ``None``, all dimensions are reduced.
  18113. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  18114. Keyword args:
  18115. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  18116. If specified, the input tensor is casted to :attr:`dtype` before the operation
  18117. is performed. This is useful for preventing data type overflows. Default: None.
  18118. Example::
  18119. >>> a = torch.randn(4, 4)
  18120. >>> a
  18121. tensor([[ 0.0569, -0.2475, 0.0737, -0.3429],
  18122. [-0.2993, 0.9138, 0.9337, -1.6864],
  18123. [ 0.1132, 0.7892, -0.1003, 0.5688],
  18124. [ 0.3637, -0.9906, -0.4752, -1.5197]])
  18125. >>> torch.sum(a, 1)
  18126. tensor([-0.4598, -0.1381, 1.3708, -2.6217])
  18127. >>> b = torch.arange(4 * 5 * 6).view(4, 5, 6)
  18128. >>> torch.sum(b, (2, 1))
  18129. tensor([ 435., 1335., 2235., 3135.])
  18130. """
  18131. ...
  18132. @overload
  18133. def sum(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
  18134. r"""
  18135. sum(input, *, dtype=None) -> Tensor
  18136. Returns the sum of all elements in the :attr:`input` tensor.
  18137. Args:
  18138. input (Tensor): the input tensor.
  18139. Keyword args:
  18140. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  18141. If specified, the input tensor is casted to :attr:`dtype` before the operation
  18142. is performed. This is useful for preventing data type overflows. Default: None.
  18143. Example::
  18144. >>> a = torch.randn(1, 3)
  18145. >>> a
  18146. tensor([[ 0.1133, -0.9567, 0.2958]])
  18147. >>> torch.sum(a)
  18148. tensor(-0.5475)
  18149. .. function:: sum(input, dim, keepdim=False, *, dtype=None) -> Tensor
  18150. :noindex:
  18151. Returns the sum of each row of the :attr:`input` tensor in the given
  18152. dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
  18153. reduce over all of them.
  18154. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  18155. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  18156. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  18157. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  18158. Args:
  18159. input (Tensor): the input tensor.
  18160. dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
  18161. If ``None``, all dimensions are reduced.
  18162. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  18163. Keyword args:
  18164. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  18165. If specified, the input tensor is casted to :attr:`dtype` before the operation
  18166. is performed. This is useful for preventing data type overflows. Default: None.
  18167. Example::
  18168. >>> a = torch.randn(4, 4)
  18169. >>> a
  18170. tensor([[ 0.0569, -0.2475, 0.0737, -0.3429],
  18171. [-0.2993, 0.9138, 0.9337, -1.6864],
  18172. [ 0.1132, 0.7892, -0.1003, 0.5688],
  18173. [ 0.3637, -0.9906, -0.4752, -1.5197]])
  18174. >>> torch.sum(a, 1)
  18175. tensor([-0.4598, -0.1381, 1.3708, -2.6217])
  18176. >>> b = torch.arange(4 * 5 * 6).view(4, 5, 6)
  18177. >>> torch.sum(b, (2, 1))
  18178. tensor([ 435., 1335., 2235., 3135.])
  18179. """
  18180. ...
  18181. def svd(input: Tensor, some: _bool = True, compute_uv: _bool = True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.svd:
  18182. r"""
  18183. svd(input, some=True, compute_uv=True, *, out=None) -> (Tensor, Tensor, Tensor)
  18184. Computes the singular value decomposition of either a matrix or batch of
  18185. matrices :attr:`input`. The singular value decomposition is represented as a
  18186. namedtuple `(U, S, V)`, such that :attr:`input` :math:`= U \text{diag}(S) V^{\text{H}}`.
  18187. where :math:`V^{\text{H}}` is the transpose of `V` for real inputs,
  18188. and the conjugate transpose of `V` for complex inputs.
  18189. If :attr:`input` is a batch of matrices, then `U`, `S`, and `V` are also
  18190. batched with the same batch dimensions as :attr:`input`.
  18191. If :attr:`some` is `True` (default), the method returns the reduced singular
  18192. value decomposition. In this case, if the last two dimensions of :attr:`input` are
  18193. `m` and `n`, then the returned `U` and `V` matrices will contain only
  18194. `min(n, m)` orthonormal columns.
  18195. If :attr:`compute_uv` is `False`, the returned `U` and `V` will be
  18196. zero-filled matrices of shape `(m, m)` and `(n, n)`
  18197. respectively, and the same device as :attr:`input`. The argument :attr:`some`
  18198. has no effect when :attr:`compute_uv` is `False`.
  18199. Supports :attr:`input` of float, double, cfloat and cdouble data types.
  18200. The dtypes of `U` and `V` are the same as :attr:`input`'s. `S` will
  18201. always be real-valued, even if :attr:`input` is complex.
  18202. .. warning::
  18203. :func:`torch.svd` is deprecated in favor of :func:`torch.linalg.svd`
  18204. and will be removed in a future PyTorch release.
  18205. ``U, S, V = torch.svd(A, some=some, compute_uv=True)`` (default) should be replaced with
  18206. .. code:: python
  18207. U, S, Vh = torch.linalg.svd(A, full_matrices=not some)
  18208. V = Vh.mH
  18209. ``_, S, _ = torch.svd(A, some=some, compute_uv=False)`` should be replaced with
  18210. .. code:: python
  18211. S = torch.linalg.svdvals(A)
  18212. .. note:: Differences with :func:`torch.linalg.svd`:
  18213. * :attr:`some` is the opposite of
  18214. :func:`torch.linalg.svd`'s :attr:`full_matrices`. Note that
  18215. default value for both is `True`, so the default behavior is
  18216. effectively the opposite.
  18217. * :func:`torch.svd` returns `V`, whereas :func:`torch.linalg.svd` returns
  18218. `Vh`, that is, :math:`V^{\text{H}}`.
  18219. * If :attr:`compute_uv` is `False`, :func:`torch.svd` returns zero-filled
  18220. tensors for `U` and `Vh`, whereas :func:`torch.linalg.svd` returns
  18221. empty tensors.
  18222. .. note:: The singular values are returned in descending order. If :attr:`input` is a batch of matrices,
  18223. then the singular values of each matrix in the batch are returned in descending order.
  18224. .. note:: The `S` tensor can only be used to compute gradients if :attr:`compute_uv` is `True`.
  18225. .. note:: When :attr:`some` is `False`, the gradients on `U[..., :, min(m, n):]`
  18226. and `V[..., :, min(m, n):]` will be ignored in the backward pass, as those vectors
  18227. can be arbitrary bases of the corresponding subspaces.
  18228. .. note:: The implementation of :func:`torch.linalg.svd` on CPU uses LAPACK's routine `?gesdd`
  18229. (a divide-and-conquer algorithm) instead of `?gesvd` for speed. Analogously,
  18230. on GPU, it uses cuSOLVER's routines `gesvdj` and `gesvdjBatched` on CUDA 10.1.243
  18231. and later, and MAGMA's routine `gesdd` on earlier versions of CUDA.
  18232. .. note:: The returned `U` will not be contiguous. The matrix (or batch of matrices) will
  18233. be represented as a column-major matrix (i.e. Fortran-contiguous).
  18234. .. warning:: The gradients with respect to `U` and `V` will only be finite when the input does not
  18235. have zero nor repeated singular values.
  18236. .. warning:: If the distance between any two singular values is close to zero, the gradients with respect to
  18237. `U` and `V` will be numerically unstable, as they depends on
  18238. :math:`\frac{1}{\min_{i \neq j} \sigma_i^2 - \sigma_j^2}`. The same happens when the matrix
  18239. has small singular values, as these gradients also depend on `S^{-1}`.
  18240. .. warning:: For complex-valued :attr:`input` the singular value decomposition is not unique,
  18241. as `U` and `V` may be multiplied by an arbitrary phase factor :math:`e^{i \phi}` on every column.
  18242. The same happens when :attr:`input` has repeated singular values, where one may multiply
  18243. the columns of the spanning subspace in `U` and `V` by a rotation matrix
  18244. and `the resulting vectors will span the same subspace`_.
  18245. Different platforms, like NumPy, or inputs on different device types,
  18246. may produce different `U` and `V` tensors.
  18247. Args:
  18248. input (Tensor): the input tensor of size `(*, m, n)` where `*` is zero or more
  18249. batch dimensions consisting of `(m, n)` matrices.
  18250. some (bool, optional): controls whether to compute the reduced or full decomposition, and
  18251. consequently, the shape of returned `U` and `V`. Default: `True`.
  18252. compute_uv (bool, optional): controls whether to compute `U` and `V`. Default: `True`.
  18253. Keyword args:
  18254. out (tuple, optional): the output tuple of tensors
  18255. Example::
  18256. >>> a = torch.randn(5, 3)
  18257. >>> a
  18258. tensor([[ 0.2364, -0.7752, 0.6372],
  18259. [ 1.7201, 0.7394, -0.0504],
  18260. [-0.3371, -1.0584, 0.5296],
  18261. [ 0.3550, -0.4022, 1.5569],
  18262. [ 0.2445, -0.0158, 1.1414]])
  18263. >>> u, s, v = torch.svd(a)
  18264. >>> u
  18265. tensor([[ 0.4027, 0.0287, 0.5434],
  18266. [-0.1946, 0.8833, 0.3679],
  18267. [ 0.4296, -0.2890, 0.5261],
  18268. [ 0.6604, 0.2717, -0.2618],
  18269. [ 0.4234, 0.2481, -0.4733]])
  18270. >>> s
  18271. tensor([2.3289, 2.0315, 0.7806])
  18272. >>> v
  18273. tensor([[-0.0199, 0.8766, 0.4809],
  18274. [-0.5080, 0.4054, -0.7600],
  18275. [ 0.8611, 0.2594, -0.4373]])
  18276. >>> torch.dist(a, torch.mm(torch.mm(u, torch.diag(s)), v.t()))
  18277. tensor(8.6531e-07)
  18278. >>> a_big = torch.randn(7, 5, 3)
  18279. >>> u, s, v = torch.svd(a_big)
  18280. >>> torch.dist(a_big, torch.matmul(torch.matmul(u, torch.diag_embed(s)), v.mT))
  18281. tensor(2.6503e-06)
  18282. .. _the resulting vectors will span the same subspace:
  18283. (https://en.wikipedia.org/wiki/Singular_value_decomposition#Singular_values,_singular_vectors,_and_their_relation_to_the_SVD)
  18284. """
  18285. ...
  18286. def swapaxes(input: Tensor, axis0: _int, axis1: _int) -> Tensor:
  18287. r"""
  18288. swapaxes(input, axis0, axis1) -> Tensor
  18289. Alias for :func:`torch.transpose`.
  18290. This function is equivalent to NumPy's swapaxes function.
  18291. Examples::
  18292. >>> x = torch.tensor([[[0,1],[2,3]],[[4,5],[6,7]]])
  18293. >>> x
  18294. tensor([[[0, 1],
  18295. [2, 3]],
  18296. [[4, 5],
  18297. [6, 7]]])
  18298. >>> torch.swapaxes(x, 0, 1)
  18299. tensor([[[0, 1],
  18300. [4, 5]],
  18301. [[2, 3],
  18302. [6, 7]]])
  18303. >>> torch.swapaxes(x, 0, 2)
  18304. tensor([[[0, 4],
  18305. [2, 6]],
  18306. [[1, 5],
  18307. [3, 7]]])
  18308. """
  18309. ...
  18310. def swapdims(input: Tensor, dim0: _int, dim1: _int) -> Tensor:
  18311. r"""
  18312. swapdims(input, dim0, dim1) -> Tensor
  18313. Alias for :func:`torch.transpose`.
  18314. This function is equivalent to NumPy's swapaxes function.
  18315. Examples::
  18316. >>> x = torch.tensor([[[0,1],[2,3]],[[4,5],[6,7]]])
  18317. >>> x
  18318. tensor([[[0, 1],
  18319. [2, 3]],
  18320. [[4, 5],
  18321. [6, 7]]])
  18322. >>> torch.swapdims(x, 0, 1)
  18323. tensor([[[0, 1],
  18324. [4, 5]],
  18325. [[2, 3],
  18326. [6, 7]]])
  18327. >>> torch.swapdims(x, 0, 2)
  18328. tensor([[[0, 4],
  18329. [2, 6]],
  18330. [[1, 5],
  18331. [3, 7]]])
  18332. """
  18333. ...
  18334. def sym_constrain_range(size: Union[Number, _complex], *, min: Optional[_int] = None, max: Optional[_int] = None) -> None: ...
  18335. def sym_constrain_range_for_size(size: Union[Number, _complex], *, min: Optional[_int] = None, max: Optional[_int] = None) -> None: ...
  18336. def t(input: Tensor) -> Tensor:
  18337. r"""
  18338. t(input) -> Tensor
  18339. Expects :attr:`input` to be <= 2-D tensor and transposes dimensions 0
  18340. and 1.
  18341. 0-D and 1-D tensors are returned as is. When input is a 2-D tensor this
  18342. is equivalent to ``transpose(input, 0, 1)``.
  18343. Args:
  18344. input (Tensor): the input tensor.
  18345. Example::
  18346. >>> x = torch.randn(())
  18347. >>> x
  18348. tensor(0.1995)
  18349. >>> torch.t(x)
  18350. tensor(0.1995)
  18351. >>> x = torch.randn(3)
  18352. >>> x
  18353. tensor([ 2.4320, -0.4608, 0.7702])
  18354. >>> torch.t(x)
  18355. tensor([ 2.4320, -0.4608, 0.7702])
  18356. >>> x = torch.randn(2, 3)
  18357. >>> x
  18358. tensor([[ 0.4875, 0.9158, -0.5872],
  18359. [ 0.3938, -0.6929, 0.6932]])
  18360. >>> torch.t(x)
  18361. tensor([[ 0.4875, 0.3938],
  18362. [ 0.9158, -0.6929],
  18363. [-0.5872, 0.6932]])
  18364. See also :func:`torch.transpose`.
  18365. """
  18366. ...
  18367. def t_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  18368. r"""
  18369. Performs the same operation as :func:`torch.t`, but all output tensors
  18370. are freshly created instead of aliasing the input.
  18371. """
  18372. ...
  18373. def take(input: Tensor, index: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  18374. r"""
  18375. take(input, index) -> Tensor
  18376. Returns a new tensor with the elements of :attr:`input` at the given indices.
  18377. The input tensor is treated as if it were viewed as a 1-D tensor. The result
  18378. takes the same shape as the indices.
  18379. Args:
  18380. input (Tensor): the input tensor.
  18381. index (LongTensor): the indices into tensor
  18382. Example::
  18383. >>> src = torch.tensor([[4, 3, 5],
  18384. ... [6, 7, 8]])
  18385. >>> torch.take(src, torch.tensor([0, 2, 5]))
  18386. tensor([ 4, 5, 8])
  18387. """
  18388. ...
  18389. def take_along_dim(input: Tensor, indices: Tensor, dim: Optional[_int] = None, *, out: Optional[Tensor] = None) -> Tensor:
  18390. r"""
  18391. take_along_dim(input, indices, dim=None, *, out=None) -> Tensor
  18392. Selects values from :attr:`input` at the 1-dimensional indices from :attr:`indices` along the given :attr:`dim`.
  18393. If :attr:`dim` is None, the input array is treated as if it has been flattened to 1d.
  18394. Functions that return indices along a dimension, like :func:`torch.argmax` and :func:`torch.argsort`,
  18395. are designed to work with this function. See the examples below.
  18396. .. note::
  18397. This function is similar to NumPy's `take_along_axis`.
  18398. See also :func:`torch.gather`.
  18399. Args:
  18400. input (Tensor): the input tensor.
  18401. indices (tensor): the indices into :attr:`input`. Must have long dtype.
  18402. dim (int, optional): dimension to select along.
  18403. Keyword args:
  18404. out (Tensor, optional): the output tensor.
  18405. Example::
  18406. >>> t = torch.tensor([[10, 30, 20], [60, 40, 50]])
  18407. >>> max_idx = torch.argmax(t)
  18408. >>> torch.take_along_dim(t, max_idx)
  18409. tensor([60])
  18410. >>> sorted_idx = torch.argsort(t, dim=1)
  18411. >>> torch.take_along_dim(t, sorted_idx, dim=1)
  18412. tensor([[10, 20, 30],
  18413. [40, 50, 60]])
  18414. """
  18415. ...
  18416. def tan(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  18417. r"""
  18418. tan(input, *, out=None) -> Tensor
  18419. Returns a new tensor with the tangent of the elements of :attr:`input`.
  18420. .. math::
  18421. \text{out}_{i} = \tan(\text{input}_{i})
  18422. Args:
  18423. input (Tensor): the input tensor.
  18424. Keyword args:
  18425. out (Tensor, optional): the output tensor.
  18426. Example::
  18427. >>> a = torch.randn(4)
  18428. >>> a
  18429. tensor([-1.2027, -1.7687, 0.4412, -1.3856])
  18430. >>> torch.tan(a)
  18431. tensor([-2.5930, 4.9859, 0.4722, -5.3366])
  18432. """
  18433. ...
  18434. def tan_(input: Tensor) -> Tensor: ...
  18435. def tanh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  18436. r"""
  18437. tanh(input, *, out=None) -> Tensor
  18438. Returns a new tensor with the hyperbolic tangent of the elements
  18439. of :attr:`input`.
  18440. .. math::
  18441. \text{out}_{i} = \tanh(\text{input}_{i})
  18442. Args:
  18443. input (Tensor): the input tensor.
  18444. Keyword args:
  18445. out (Tensor, optional): the output tensor.
  18446. Example::
  18447. >>> a = torch.randn(4)
  18448. >>> a
  18449. tensor([ 0.8986, -0.7279, 1.1745, 0.2611])
  18450. >>> torch.tanh(a)
  18451. tensor([ 0.7156, -0.6218, 0.8257, 0.2553])
  18452. """
  18453. ...
  18454. def tanh_(input: Tensor) -> Tensor: ...
  18455. def tensor(data: Any, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
  18456. r"""
  18457. tensor(data, *, dtype=None, device=None, requires_grad=False, pin_memory=False) -> Tensor
  18458. Constructs a tensor with no autograd history (also known as a "leaf tensor", see :doc:`/notes/autograd`) by copying :attr:`data`.
  18459. .. warning::
  18460. When working with tensors prefer using :func:`torch.Tensor.clone`,
  18461. :func:`torch.Tensor.detach`, and :func:`torch.Tensor.requires_grad_` for
  18462. readability. Letting `t` be a tensor, ``torch.tensor(t)`` is equivalent to
  18463. ``t.clone().detach()``, and ``torch.tensor(t, requires_grad=True)``
  18464. is equivalent to ``t.clone().detach().requires_grad_(True)``.
  18465. .. seealso::
  18466. :func:`torch.as_tensor` preserves autograd history and avoids copies where possible.
  18467. :func:`torch.from_numpy` creates a tensor that shares storage with a NumPy array.
  18468. Args:
  18469. data (array_like): Initial data for the tensor. Can be a list, tuple,
  18470. NumPy ``ndarray``, scalar, and other types.
  18471. Keyword args:
  18472. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  18473. Default: if ``None``, infers data type from :attr:`data`.
  18474. device (:class:`torch.device`, optional): the device of the constructed tensor. If None and data is a tensor
  18475. then the device of data is used. If None and data is not a tensor then
  18476. the result tensor is constructed on the current device.
  18477. requires_grad (bool, optional): If autograd should record operations on the
  18478. returned tensor. Default: ``False``.
  18479. pin_memory (bool, optional): If set, returned tensor would be allocated in
  18480. the pinned memory. Works only for CPU tensors. Default: ``False``.
  18481. Example::
  18482. >>> torch.tensor([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]])
  18483. tensor([[ 0.1000, 1.2000],
  18484. [ 2.2000, 3.1000],
  18485. [ 4.9000, 5.2000]])
  18486. >>> torch.tensor([0, 1]) # Type inference on data
  18487. tensor([ 0, 1])
  18488. >>> torch.tensor([[0.11111, 0.222222, 0.3333333]],
  18489. ... dtype=torch.float64,
  18490. ... device=torch.device('cuda:0')) # creates a double tensor on a CUDA device
  18491. tensor([[ 0.1111, 0.2222, 0.3333]], dtype=torch.float64, device='cuda:0')
  18492. >>> torch.tensor(3.14159) # Create a zero-dimensional (scalar) tensor
  18493. tensor(3.1416)
  18494. >>> torch.tensor([]) # Create an empty tensor (of size (0,))
  18495. tensor([])
  18496. """
  18497. ...
  18498. @overload
  18499. def tensor_split(input: Tensor, tensor_indices_or_sections: Tensor, dim: _int = 0) -> Tuple[Tensor, ...]:
  18500. r"""
  18501. tensor_split(input, indices_or_sections, dim=0) -> List of Tensors
  18502. Splits a tensor into multiple sub-tensors, all of which are views of :attr:`input`,
  18503. along dimension :attr:`dim` according to the indices or number of sections specified
  18504. by :attr:`indices_or_sections`. This function is based on NumPy's
  18505. :func:`numpy.array_split`.
  18506. Args:
  18507. input (Tensor): the tensor to split
  18508. indices_or_sections (Tensor, int or list or tuple of ints):
  18509. If :attr:`indices_or_sections` is an integer ``n`` or a zero dimensional long tensor
  18510. with value ``n``, :attr:`input` is split into ``n`` sections along dimension :attr:`dim`.
  18511. If :attr:`input` is divisible by ``n`` along dimension :attr:`dim`, each
  18512. section will be of equal size, :code:`input.size(dim) / n`. If :attr:`input`
  18513. is not divisible by ``n``, the sizes of the first :code:`int(input.size(dim) % n)`
  18514. sections will have size :code:`int(input.size(dim) / n) + 1`, and the rest will
  18515. have size :code:`int(input.size(dim) / n)`.
  18516. If :attr:`indices_or_sections` is a list or tuple of ints, or a one-dimensional long
  18517. tensor, then :attr:`input` is split along dimension :attr:`dim` at each of the indices
  18518. in the list, tuple or tensor. For instance, :code:`indices_or_sections=[2, 3]` and :code:`dim=0`
  18519. would result in the tensors :code:`input[:2]`, :code:`input[2:3]`, and :code:`input[3:]`.
  18520. If :attr:`indices_or_sections` is a tensor, it must be a zero-dimensional or one-dimensional
  18521. long tensor on the CPU.
  18522. dim (int, optional): dimension along which to split the tensor. Default: ``0``
  18523. Example::
  18524. >>> x = torch.arange(8)
  18525. >>> torch.tensor_split(x, 3)
  18526. (tensor([0, 1, 2]), tensor([3, 4, 5]), tensor([6, 7]))
  18527. >>> x = torch.arange(7)
  18528. >>> torch.tensor_split(x, 3)
  18529. (tensor([0, 1, 2]), tensor([3, 4]), tensor([5, 6]))
  18530. >>> torch.tensor_split(x, (1, 6))
  18531. (tensor([0]), tensor([1, 2, 3, 4, 5]), tensor([6]))
  18532. >>> x = torch.arange(14).reshape(2, 7)
  18533. >>> x
  18534. tensor([[ 0, 1, 2, 3, 4, 5, 6],
  18535. [ 7, 8, 9, 10, 11, 12, 13]])
  18536. >>> torch.tensor_split(x, 3, dim=1)
  18537. (tensor([[0, 1, 2],
  18538. [7, 8, 9]]),
  18539. tensor([[ 3, 4],
  18540. [10, 11]]),
  18541. tensor([[ 5, 6],
  18542. [12, 13]]))
  18543. >>> torch.tensor_split(x, (1, 6), dim=1)
  18544. (tensor([[0],
  18545. [7]]),
  18546. tensor([[ 1, 2, 3, 4, 5],
  18547. [ 8, 9, 10, 11, 12]]),
  18548. tensor([[ 6],
  18549. [13]]))
  18550. """
  18551. ...
  18552. @overload
  18553. def tensor_split(input: Tensor, sections: Union[_int, SymInt], dim: _int = 0) -> Tuple[Tensor, ...]:
  18554. r"""
  18555. tensor_split(input, indices_or_sections, dim=0) -> List of Tensors
  18556. Splits a tensor into multiple sub-tensors, all of which are views of :attr:`input`,
  18557. along dimension :attr:`dim` according to the indices or number of sections specified
  18558. by :attr:`indices_or_sections`. This function is based on NumPy's
  18559. :func:`numpy.array_split`.
  18560. Args:
  18561. input (Tensor): the tensor to split
  18562. indices_or_sections (Tensor, int or list or tuple of ints):
  18563. If :attr:`indices_or_sections` is an integer ``n`` or a zero dimensional long tensor
  18564. with value ``n``, :attr:`input` is split into ``n`` sections along dimension :attr:`dim`.
  18565. If :attr:`input` is divisible by ``n`` along dimension :attr:`dim`, each
  18566. section will be of equal size, :code:`input.size(dim) / n`. If :attr:`input`
  18567. is not divisible by ``n``, the sizes of the first :code:`int(input.size(dim) % n)`
  18568. sections will have size :code:`int(input.size(dim) / n) + 1`, and the rest will
  18569. have size :code:`int(input.size(dim) / n)`.
  18570. If :attr:`indices_or_sections` is a list or tuple of ints, or a one-dimensional long
  18571. tensor, then :attr:`input` is split along dimension :attr:`dim` at each of the indices
  18572. in the list, tuple or tensor. For instance, :code:`indices_or_sections=[2, 3]` and :code:`dim=0`
  18573. would result in the tensors :code:`input[:2]`, :code:`input[2:3]`, and :code:`input[3:]`.
  18574. If :attr:`indices_or_sections` is a tensor, it must be a zero-dimensional or one-dimensional
  18575. long tensor on the CPU.
  18576. dim (int, optional): dimension along which to split the tensor. Default: ``0``
  18577. Example::
  18578. >>> x = torch.arange(8)
  18579. >>> torch.tensor_split(x, 3)
  18580. (tensor([0, 1, 2]), tensor([3, 4, 5]), tensor([6, 7]))
  18581. >>> x = torch.arange(7)
  18582. >>> torch.tensor_split(x, 3)
  18583. (tensor([0, 1, 2]), tensor([3, 4]), tensor([5, 6]))
  18584. >>> torch.tensor_split(x, (1, 6))
  18585. (tensor([0]), tensor([1, 2, 3, 4, 5]), tensor([6]))
  18586. >>> x = torch.arange(14).reshape(2, 7)
  18587. >>> x
  18588. tensor([[ 0, 1, 2, 3, 4, 5, 6],
  18589. [ 7, 8, 9, 10, 11, 12, 13]])
  18590. >>> torch.tensor_split(x, 3, dim=1)
  18591. (tensor([[0, 1, 2],
  18592. [7, 8, 9]]),
  18593. tensor([[ 3, 4],
  18594. [10, 11]]),
  18595. tensor([[ 5, 6],
  18596. [12, 13]]))
  18597. >>> torch.tensor_split(x, (1, 6), dim=1)
  18598. (tensor([[0],
  18599. [7]]),
  18600. tensor([[ 1, 2, 3, 4, 5],
  18601. [ 8, 9, 10, 11, 12]]),
  18602. tensor([[ 6],
  18603. [13]]))
  18604. """
  18605. ...
  18606. @overload
  18607. def tensor_split(input: Tensor, indices: Sequence[Union[_int, SymInt]], dim: _int = 0) -> Tuple[Tensor, ...]:
  18608. r"""
  18609. tensor_split(input, indices_or_sections, dim=0) -> List of Tensors
  18610. Splits a tensor into multiple sub-tensors, all of which are views of :attr:`input`,
  18611. along dimension :attr:`dim` according to the indices or number of sections specified
  18612. by :attr:`indices_or_sections`. This function is based on NumPy's
  18613. :func:`numpy.array_split`.
  18614. Args:
  18615. input (Tensor): the tensor to split
  18616. indices_or_sections (Tensor, int or list or tuple of ints):
  18617. If :attr:`indices_or_sections` is an integer ``n`` or a zero dimensional long tensor
  18618. with value ``n``, :attr:`input` is split into ``n`` sections along dimension :attr:`dim`.
  18619. If :attr:`input` is divisible by ``n`` along dimension :attr:`dim`, each
  18620. section will be of equal size, :code:`input.size(dim) / n`. If :attr:`input`
  18621. is not divisible by ``n``, the sizes of the first :code:`int(input.size(dim) % n)`
  18622. sections will have size :code:`int(input.size(dim) / n) + 1`, and the rest will
  18623. have size :code:`int(input.size(dim) / n)`.
  18624. If :attr:`indices_or_sections` is a list or tuple of ints, or a one-dimensional long
  18625. tensor, then :attr:`input` is split along dimension :attr:`dim` at each of the indices
  18626. in the list, tuple or tensor. For instance, :code:`indices_or_sections=[2, 3]` and :code:`dim=0`
  18627. would result in the tensors :code:`input[:2]`, :code:`input[2:3]`, and :code:`input[3:]`.
  18628. If :attr:`indices_or_sections` is a tensor, it must be a zero-dimensional or one-dimensional
  18629. long tensor on the CPU.
  18630. dim (int, optional): dimension along which to split the tensor. Default: ``0``
  18631. Example::
  18632. >>> x = torch.arange(8)
  18633. >>> torch.tensor_split(x, 3)
  18634. (tensor([0, 1, 2]), tensor([3, 4, 5]), tensor([6, 7]))
  18635. >>> x = torch.arange(7)
  18636. >>> torch.tensor_split(x, 3)
  18637. (tensor([0, 1, 2]), tensor([3, 4]), tensor([5, 6]))
  18638. >>> torch.tensor_split(x, (1, 6))
  18639. (tensor([0]), tensor([1, 2, 3, 4, 5]), tensor([6]))
  18640. >>> x = torch.arange(14).reshape(2, 7)
  18641. >>> x
  18642. tensor([[ 0, 1, 2, 3, 4, 5, 6],
  18643. [ 7, 8, 9, 10, 11, 12, 13]])
  18644. >>> torch.tensor_split(x, 3, dim=1)
  18645. (tensor([[0, 1, 2],
  18646. [7, 8, 9]]),
  18647. tensor([[ 3, 4],
  18648. [10, 11]]),
  18649. tensor([[ 5, 6],
  18650. [12, 13]]))
  18651. >>> torch.tensor_split(x, (1, 6), dim=1)
  18652. (tensor([[0],
  18653. [7]]),
  18654. tensor([[ 1, 2, 3, 4, 5],
  18655. [ 8, 9, 10, 11, 12]]),
  18656. tensor([[ 6],
  18657. [13]]))
  18658. """
  18659. ...
  18660. def threshold(input: Tensor, threshold: Union[Number, _complex], value: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
  18661. def threshold_(input: Tensor, threshold: Union[Number, _complex], value: Union[Number, _complex]) -> Tensor: ...
  18662. def tile(input: Tensor, dims: Sequence[Union[_int, SymInt]]) -> Tensor:
  18663. r"""
  18664. tile(input, dims) -> Tensor
  18665. Constructs a tensor by repeating the elements of :attr:`input`.
  18666. The :attr:`dims` argument specifies the number of repetitions
  18667. in each dimension.
  18668. If :attr:`dims` specifies fewer dimensions than :attr:`input` has, then
  18669. ones are prepended to :attr:`dims` until all dimensions are specified.
  18670. For example, if :attr:`input` has shape (8, 6, 4, 2) and :attr:`dims`
  18671. is (2, 2), then :attr:`dims` is treated as (1, 1, 2, 2).
  18672. Analogously, if :attr:`input` has fewer dimensions than :attr:`dims`
  18673. specifies, then :attr:`input` is treated as if it were unsqueezed at
  18674. dimension zero until it has as many dimensions as :attr:`dims` specifies.
  18675. For example, if :attr:`input` has shape (4, 2) and :attr:`dims`
  18676. is (3, 3, 2, 2), then :attr:`input` is treated as if it had the
  18677. shape (1, 1, 4, 2).
  18678. .. note::
  18679. This function is similar to NumPy's tile function.
  18680. Args:
  18681. input (Tensor): the tensor whose elements to repeat.
  18682. dims (tuple): the number of repetitions per dimension.
  18683. Example::
  18684. >>> x = torch.tensor([1, 2, 3])
  18685. >>> x.tile((2,))
  18686. tensor([1, 2, 3, 1, 2, 3])
  18687. >>> y = torch.tensor([[1, 2], [3, 4]])
  18688. >>> torch.tile(y, (2, 2))
  18689. tensor([[1, 2, 1, 2],
  18690. [3, 4, 3, 4],
  18691. [1, 2, 1, 2],
  18692. [3, 4, 3, 4]])
  18693. """
  18694. ...
  18695. def topk(input: Tensor, k: Union[_int, SymInt], dim: _int = -1, largest: _bool = True, sorted: _bool = True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.topk:
  18696. r"""
  18697. topk(input, k, dim=None, largest=True, sorted=True, *, out=None) -> (Tensor, LongTensor)
  18698. Returns the :attr:`k` largest elements of the given :attr:`input` tensor along
  18699. a given dimension.
  18700. If :attr:`dim` is not given, the last dimension of the `input` is chosen.
  18701. If :attr:`largest` is ``False`` then the `k` smallest elements are returned.
  18702. A namedtuple of `(values, indices)` is returned with the `values` and
  18703. `indices` of the largest `k` elements of each row of the `input` tensor in the
  18704. given dimension `dim`.
  18705. The boolean option :attr:`sorted` if ``True``, will make sure that the returned
  18706. `k` elements are themselves sorted
  18707. Args:
  18708. input (Tensor): the input tensor.
  18709. k (int): the k in "top-k"
  18710. dim (int, optional): the dimension to sort along
  18711. largest (bool, optional): controls whether to return largest or
  18712. smallest elements
  18713. sorted (bool, optional): controls whether to return the elements
  18714. in sorted order
  18715. Keyword args:
  18716. out (tuple, optional): the output tuple of (Tensor, LongTensor) that can be
  18717. optionally given to be used as output buffers
  18718. Example::
  18719. >>> x = torch.arange(1., 6.)
  18720. >>> x
  18721. tensor([ 1., 2., 3., 4., 5.])
  18722. >>> torch.topk(x, 3)
  18723. torch.return_types.topk(values=tensor([5., 4., 3.]), indices=tensor([4, 3, 2]))
  18724. """
  18725. ...
  18726. def trace(input: Tensor) -> Tensor:
  18727. r"""
  18728. trace(input) -> Tensor
  18729. Returns the sum of the elements of the diagonal of the input 2-D matrix.
  18730. Example::
  18731. >>> x = torch.arange(1., 10.).view(3, 3)
  18732. >>> x
  18733. tensor([[ 1., 2., 3.],
  18734. [ 4., 5., 6.],
  18735. [ 7., 8., 9.]])
  18736. >>> torch.trace(x)
  18737. tensor(15.)
  18738. """
  18739. ...
  18740. @overload
  18741. def transpose(input: Tensor, dim0: _int, dim1: _int) -> Tensor:
  18742. r"""
  18743. transpose(input, dim0, dim1) -> Tensor
  18744. Returns a tensor that is a transposed version of :attr:`input`.
  18745. The given dimensions :attr:`dim0` and :attr:`dim1` are swapped.
  18746. If :attr:`input` is a strided tensor then the resulting :attr:`out`
  18747. tensor shares its underlying storage with the :attr:`input` tensor, so
  18748. changing the content of one would change the content of the other.
  18749. If :attr:`input` is a :ref:`sparse tensor <sparse-docs>` then the
  18750. resulting :attr:`out` tensor *does not* share the underlying storage
  18751. with the :attr:`input` tensor.
  18752. If :attr:`input` is a :ref:`sparse tensor <sparse-docs>` with compressed
  18753. layout (SparseCSR, SparseBSR, SparseCSC or SparseBSC) the arguments
  18754. :attr:`dim0` and :attr:`dim1` must be both batch dimensions, or must
  18755. both be sparse dimensions. The batch dimensions of a sparse tensor are the
  18756. dimensions preceding the sparse dimensions.
  18757. .. note::
  18758. Transpositions which interchange the sparse dimensions of a `SparseCSR`
  18759. or `SparseCSC` layout tensor will result in the layout changing between
  18760. the two options. Transposition of the sparse dimensions of a ` SparseBSR`
  18761. or `SparseBSC` layout tensor will likewise generate a result with the
  18762. opposite layout.
  18763. Args:
  18764. input (Tensor): the input tensor.
  18765. dim0 (int): the first dimension to be transposed
  18766. dim1 (int): the second dimension to be transposed
  18767. Example::
  18768. >>> x = torch.randn(2, 3)
  18769. >>> x
  18770. tensor([[ 1.0028, -0.9893, 0.5809],
  18771. [-0.1669, 0.7299, 0.4942]])
  18772. >>> torch.transpose(x, 0, 1)
  18773. tensor([[ 1.0028, -0.1669],
  18774. [-0.9893, 0.7299],
  18775. [ 0.5809, 0.4942]])
  18776. See also :func:`torch.t`.
  18777. """
  18778. ...
  18779. @overload
  18780. def transpose(input: Tensor, dim0: Union[str, ellipsis, None], dim1: Union[str, ellipsis, None]) -> Tensor:
  18781. r"""
  18782. transpose(input, dim0, dim1) -> Tensor
  18783. Returns a tensor that is a transposed version of :attr:`input`.
  18784. The given dimensions :attr:`dim0` and :attr:`dim1` are swapped.
  18785. If :attr:`input` is a strided tensor then the resulting :attr:`out`
  18786. tensor shares its underlying storage with the :attr:`input` tensor, so
  18787. changing the content of one would change the content of the other.
  18788. If :attr:`input` is a :ref:`sparse tensor <sparse-docs>` then the
  18789. resulting :attr:`out` tensor *does not* share the underlying storage
  18790. with the :attr:`input` tensor.
  18791. If :attr:`input` is a :ref:`sparse tensor <sparse-docs>` with compressed
  18792. layout (SparseCSR, SparseBSR, SparseCSC or SparseBSC) the arguments
  18793. :attr:`dim0` and :attr:`dim1` must be both batch dimensions, or must
  18794. both be sparse dimensions. The batch dimensions of a sparse tensor are the
  18795. dimensions preceding the sparse dimensions.
  18796. .. note::
  18797. Transpositions which interchange the sparse dimensions of a `SparseCSR`
  18798. or `SparseCSC` layout tensor will result in the layout changing between
  18799. the two options. Transposition of the sparse dimensions of a ` SparseBSR`
  18800. or `SparseBSC` layout tensor will likewise generate a result with the
  18801. opposite layout.
  18802. Args:
  18803. input (Tensor): the input tensor.
  18804. dim0 (int): the first dimension to be transposed
  18805. dim1 (int): the second dimension to be transposed
  18806. Example::
  18807. >>> x = torch.randn(2, 3)
  18808. >>> x
  18809. tensor([[ 1.0028, -0.9893, 0.5809],
  18810. [-0.1669, 0.7299, 0.4942]])
  18811. >>> torch.transpose(x, 0, 1)
  18812. tensor([[ 1.0028, -0.1669],
  18813. [-0.9893, 0.7299],
  18814. [ 0.5809, 0.4942]])
  18815. See also :func:`torch.t`.
  18816. """
  18817. ...
  18818. def transpose_copy(input: Tensor, dim0: _int, dim1: _int, *, out: Optional[Tensor] = None) -> Tensor:
  18819. r"""
  18820. Performs the same operation as :func:`torch.transpose`, but all output tensors
  18821. are freshly created instead of aliasing the input.
  18822. """
  18823. ...
  18824. @overload
  18825. def trapezoid(y: Tensor, x: Tensor, *, dim: _int = -1) -> Tensor:
  18826. r"""
  18827. trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor
  18828. Computes the `trapezoidal rule <https://en.wikipedia.org/wiki/Trapezoidal_rule>`_ along
  18829. :attr:`dim`. By default the spacing between elements is assumed to be 1, but
  18830. :attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be
  18831. used to specify arbitrary spacing along :attr:`dim`.
  18832. Assuming :attr:`y` is a one-dimensional tensor with elements :math:`{y_0, y_1, ..., y_n}`,
  18833. the default computation is
  18834. .. math::
  18835. \begin{aligned}
  18836. \sum_{i = 1}^{n-1} \frac{1}{2} (y_i + y_{i-1})
  18837. \end{aligned}
  18838. When :attr:`dx` is specified the computation becomes
  18839. .. math::
  18840. \begin{aligned}
  18841. \sum_{i = 1}^{n-1} \frac{\Delta x}{2} (y_i + y_{i-1})
  18842. \end{aligned}
  18843. effectively multiplying the result by :attr:`dx`. When :attr:`x` is specified,
  18844. assuming :attr:`x` is also a one-dimensional tensor with
  18845. elements :math:`{x_0, x_1, ..., x_n}`, the computation becomes
  18846. .. math::
  18847. \begin{aligned}
  18848. \sum_{i = 1}^{n-1} \frac{(x_i - x_{i-1})}{2} (y_i + y_{i-1})
  18849. \end{aligned}
  18850. When :attr:`x` and :attr:`y` have the same size, the computation is as described above and no broadcasting is needed.
  18851. The broadcasting behavior of this function is as follows when their sizes are different. For both :attr:`x`
  18852. and :attr:`y`, the function computes the difference between consecutive elements along
  18853. dimension :attr:`dim`. This effectively creates two tensors, `x_diff` and `y_diff`, that have
  18854. the same shape as the original tensors except their lengths along the dimension :attr:`dim` is reduced by 1.
  18855. After that, those two tensors are broadcast together to compute final output as part of the trapezoidal rule.
  18856. See the examples below for details.
  18857. .. note::
  18858. The trapezoidal rule is a technique for approximating the definite integral of a function
  18859. by averaging its left and right Riemann sums. The approximation becomes more accurate as
  18860. the resolution of the partition increases.
  18861. Arguments:
  18862. y (Tensor): Values to use when computing the trapezoidal rule.
  18863. x (Tensor): If specified, defines spacing between values as specified above.
  18864. Keyword arguments:
  18865. dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx`
  18866. are specified then this defaults to 1. Effectively multiplies the result by its value.
  18867. dim (int): The dimension along which to compute the trapezoidal rule.
  18868. The last (inner-most) dimension by default.
  18869. Examples::
  18870. >>> # Computes the trapezoidal rule in 1D, spacing is implicitly 1
  18871. >>> y = torch.tensor([1, 5, 10])
  18872. >>> torch.trapezoid(y)
  18873. tensor(10.5)
  18874. >>> # Computes the same trapezoidal rule directly to verify
  18875. >>> (1 + 10 + 10) / 2
  18876. 10.5
  18877. >>> # Computes the trapezoidal rule in 1D with constant spacing of 2
  18878. >>> # NOTE: the result is the same as before, but multiplied by 2
  18879. >>> torch.trapezoid(y, dx=2)
  18880. 21.0
  18881. >>> # Computes the trapezoidal rule in 1D with arbitrary spacing
  18882. >>> x = torch.tensor([1, 3, 6])
  18883. >>> torch.trapezoid(y, x)
  18884. 28.5
  18885. >>> # Computes the same trapezoidal rule directly to verify
  18886. >>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2
  18887. 28.5
  18888. >>> # Computes the trapezoidal rule for each row of a 3x3 matrix
  18889. >>> y = torch.arange(9).reshape(3, 3)
  18890. tensor([[0, 1, 2],
  18891. [3, 4, 5],
  18892. [6, 7, 8]])
  18893. >>> torch.trapezoid(y)
  18894. tensor([ 2., 8., 14.])
  18895. >>> # Computes the trapezoidal rule for each column of the matrix
  18896. >>> torch.trapezoid(y, dim=0)
  18897. tensor([ 6., 8., 10.])
  18898. >>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix
  18899. >>> # with the same arbitrary spacing
  18900. >>> y = torch.ones(3, 3)
  18901. >>> x = torch.tensor([1, 3, 6])
  18902. >>> torch.trapezoid(y, x)
  18903. array([5., 5., 5.])
  18904. >>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix
  18905. >>> # with different arbitrary spacing per row
  18906. >>> y = torch.ones(3, 3)
  18907. >>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]])
  18908. >>> torch.trapezoid(y, x)
  18909. array([2., 4., 6.])
  18910. """
  18911. ...
  18912. @overload
  18913. def trapezoid(y: Tensor, *, dx: Union[Number, _complex] = 1, dim: _int = -1) -> Tensor:
  18914. r"""
  18915. trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor
  18916. Computes the `trapezoidal rule <https://en.wikipedia.org/wiki/Trapezoidal_rule>`_ along
  18917. :attr:`dim`. By default the spacing between elements is assumed to be 1, but
  18918. :attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be
  18919. used to specify arbitrary spacing along :attr:`dim`.
  18920. Assuming :attr:`y` is a one-dimensional tensor with elements :math:`{y_0, y_1, ..., y_n}`,
  18921. the default computation is
  18922. .. math::
  18923. \begin{aligned}
  18924. \sum_{i = 1}^{n-1} \frac{1}{2} (y_i + y_{i-1})
  18925. \end{aligned}
  18926. When :attr:`dx` is specified the computation becomes
  18927. .. math::
  18928. \begin{aligned}
  18929. \sum_{i = 1}^{n-1} \frac{\Delta x}{2} (y_i + y_{i-1})
  18930. \end{aligned}
  18931. effectively multiplying the result by :attr:`dx`. When :attr:`x` is specified,
  18932. assuming :attr:`x` is also a one-dimensional tensor with
  18933. elements :math:`{x_0, x_1, ..., x_n}`, the computation becomes
  18934. .. math::
  18935. \begin{aligned}
  18936. \sum_{i = 1}^{n-1} \frac{(x_i - x_{i-1})}{2} (y_i + y_{i-1})
  18937. \end{aligned}
  18938. When :attr:`x` and :attr:`y` have the same size, the computation is as described above and no broadcasting is needed.
  18939. The broadcasting behavior of this function is as follows when their sizes are different. For both :attr:`x`
  18940. and :attr:`y`, the function computes the difference between consecutive elements along
  18941. dimension :attr:`dim`. This effectively creates two tensors, `x_diff` and `y_diff`, that have
  18942. the same shape as the original tensors except their lengths along the dimension :attr:`dim` is reduced by 1.
  18943. After that, those two tensors are broadcast together to compute final output as part of the trapezoidal rule.
  18944. See the examples below for details.
  18945. .. note::
  18946. The trapezoidal rule is a technique for approximating the definite integral of a function
  18947. by averaging its left and right Riemann sums. The approximation becomes more accurate as
  18948. the resolution of the partition increases.
  18949. Arguments:
  18950. y (Tensor): Values to use when computing the trapezoidal rule.
  18951. x (Tensor): If specified, defines spacing between values as specified above.
  18952. Keyword arguments:
  18953. dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx`
  18954. are specified then this defaults to 1. Effectively multiplies the result by its value.
  18955. dim (int): The dimension along which to compute the trapezoidal rule.
  18956. The last (inner-most) dimension by default.
  18957. Examples::
  18958. >>> # Computes the trapezoidal rule in 1D, spacing is implicitly 1
  18959. >>> y = torch.tensor([1, 5, 10])
  18960. >>> torch.trapezoid(y)
  18961. tensor(10.5)
  18962. >>> # Computes the same trapezoidal rule directly to verify
  18963. >>> (1 + 10 + 10) / 2
  18964. 10.5
  18965. >>> # Computes the trapezoidal rule in 1D with constant spacing of 2
  18966. >>> # NOTE: the result is the same as before, but multiplied by 2
  18967. >>> torch.trapezoid(y, dx=2)
  18968. 21.0
  18969. >>> # Computes the trapezoidal rule in 1D with arbitrary spacing
  18970. >>> x = torch.tensor([1, 3, 6])
  18971. >>> torch.trapezoid(y, x)
  18972. 28.5
  18973. >>> # Computes the same trapezoidal rule directly to verify
  18974. >>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2
  18975. 28.5
  18976. >>> # Computes the trapezoidal rule for each row of a 3x3 matrix
  18977. >>> y = torch.arange(9).reshape(3, 3)
  18978. tensor([[0, 1, 2],
  18979. [3, 4, 5],
  18980. [6, 7, 8]])
  18981. >>> torch.trapezoid(y)
  18982. tensor([ 2., 8., 14.])
  18983. >>> # Computes the trapezoidal rule for each column of the matrix
  18984. >>> torch.trapezoid(y, dim=0)
  18985. tensor([ 6., 8., 10.])
  18986. >>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix
  18987. >>> # with the same arbitrary spacing
  18988. >>> y = torch.ones(3, 3)
  18989. >>> x = torch.tensor([1, 3, 6])
  18990. >>> torch.trapezoid(y, x)
  18991. array([5., 5., 5.])
  18992. >>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix
  18993. >>> # with different arbitrary spacing per row
  18994. >>> y = torch.ones(3, 3)
  18995. >>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]])
  18996. >>> torch.trapezoid(y, x)
  18997. array([2., 4., 6.])
  18998. """
  18999. ...
  19000. @overload
  19001. def trapz(y: Tensor, *, dx: _float = 1, dim: _int = -1) -> Tensor:
  19002. r"""
  19003. trapz(y, x, *, dim=-1) -> Tensor
  19004. Alias for :func:`torch.trapezoid`.
  19005. """
  19006. ...
  19007. @overload
  19008. def trapz(y: Tensor, x: Tensor, *, dim: _int = -1) -> Tensor:
  19009. r"""
  19010. trapz(y, x, *, dim=-1) -> Tensor
  19011. Alias for :func:`torch.trapezoid`.
  19012. """
  19013. ...
  19014. def triangular_solve(input: Tensor, A: Tensor, upper: _bool = True, transpose: _bool = False, unitriangular: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.triangular_solve:
  19015. r"""
  19016. triangular_solve(b, A, upper=True, transpose=False, unitriangular=False, *, out=None) -> (Tensor, Tensor)
  19017. Solves a system of equations with a square upper or lower triangular invertible matrix :math:`A`
  19018. and multiple right-hand sides :math:`b`.
  19019. In symbols, it solves :math:`AX = b` and assumes :math:`A` is square upper-triangular
  19020. (or lower-triangular if :attr:`upper`\ `= False`) and does not have zeros on the diagonal.
  19021. `torch.triangular_solve(b, A)` can take in 2D inputs `b, A` or inputs that are
  19022. batches of 2D matrices. If the inputs are batches, then returns
  19023. batched outputs `X`
  19024. If the diagonal of :attr:`A` contains zeros or elements that are very close to zero and
  19025. :attr:`unitriangular`\ `= False` (default) or if the input matrix is badly conditioned,
  19026. the result may contain `NaN` s.
  19027. Supports input of float, double, cfloat and cdouble data types.
  19028. .. warning::
  19029. :func:`torch.triangular_solve` is deprecated in favor of :func:`torch.linalg.solve_triangular`
  19030. and will be removed in a future PyTorch release.
  19031. :func:`torch.linalg.solve_triangular` has its arguments reversed and does not return a
  19032. copy of one of the inputs.
  19033. ``X = torch.triangular_solve(B, A).solution`` should be replaced with
  19034. .. code:: python
  19035. X = torch.linalg.solve_triangular(A, B)
  19036. Args:
  19037. b (Tensor): multiple right-hand sides of size :math:`(*, m, k)` where
  19038. :math:`*` is zero of more batch dimensions
  19039. A (Tensor): the input triangular coefficient matrix of size :math:`(*, m, m)`
  19040. where :math:`*` is zero or more batch dimensions
  19041. upper (bool, optional): whether :math:`A` is upper or lower triangular. Default: ``True``.
  19042. transpose (bool, optional): solves `op(A)X = b` where `op(A) = A^T` if this flag is ``True``,
  19043. and `op(A) = A` if it is ``False``. Default: ``False``.
  19044. unitriangular (bool, optional): whether :math:`A` is unit triangular.
  19045. If True, the diagonal elements of :math:`A` are assumed to be
  19046. 1 and not referenced from :math:`A`. Default: ``False``.
  19047. Keyword args:
  19048. out ((Tensor, Tensor), optional): tuple of two tensors to write
  19049. the output to. Ignored if `None`. Default: `None`.
  19050. Returns:
  19051. A namedtuple `(solution, cloned_coefficient)` where `cloned_coefficient`
  19052. is a clone of :math:`A` and `solution` is the solution :math:`X` to :math:`AX = b`
  19053. (or whatever variant of the system of equations, depending on the keyword arguments.)
  19054. Examples::
  19055. >>> A = torch.randn(2, 2).triu()
  19056. >>> A
  19057. tensor([[ 1.1527, -1.0753],
  19058. [ 0.0000, 0.7986]])
  19059. >>> b = torch.randn(2, 3)
  19060. >>> b
  19061. tensor([[-0.0210, 2.3513, -1.5492],
  19062. [ 1.5429, 0.7403, -1.0243]])
  19063. >>> torch.triangular_solve(b, A)
  19064. torch.return_types.triangular_solve(
  19065. solution=tensor([[ 1.7841, 2.9046, -2.5405],
  19066. [ 1.9320, 0.9270, -1.2826]]),
  19067. cloned_coefficient=tensor([[ 1.1527, -1.0753],
  19068. [ 0.0000, 0.7986]]))
  19069. """
  19070. ...
  19071. def tril(input: Tensor, diagonal: _int = 0, *, out: Optional[Tensor] = None) -> Tensor:
  19072. r"""
  19073. tril(input, diagonal=0, *, out=None) -> Tensor
  19074. Returns the lower triangular part of the matrix (2-D tensor) or batch of matrices
  19075. :attr:`input`, the other elements of the result tensor :attr:`out` are set to 0.
  19076. The lower triangular part of the matrix is defined as the elements on and
  19077. below the diagonal.
  19078. The argument :attr:`diagonal` controls which diagonal to consider. If
  19079. :attr:`diagonal` = 0, all elements on and below the main diagonal are
  19080. retained. A positive value includes just as many diagonals above the main
  19081. diagonal, and similarly a negative value excludes just as many diagonals below
  19082. the main diagonal. The main diagonal are the set of indices
  19083. :math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where
  19084. :math:`d_{1}, d_{2}` are the dimensions of the matrix.
  19085. Args:
  19086. input (Tensor): the input tensor.
  19087. diagonal (int, optional): the diagonal to consider
  19088. Keyword args:
  19089. out (Tensor, optional): the output tensor.
  19090. Example::
  19091. >>> a = torch.randn(3, 3)
  19092. >>> a
  19093. tensor([[-1.0813, -0.8619, 0.7105],
  19094. [ 0.0935, 0.1380, 2.2112],
  19095. [-0.3409, -0.9828, 0.0289]])
  19096. >>> torch.tril(a)
  19097. tensor([[-1.0813, 0.0000, 0.0000],
  19098. [ 0.0935, 0.1380, 0.0000],
  19099. [-0.3409, -0.9828, 0.0289]])
  19100. >>> b = torch.randn(4, 6)
  19101. >>> b
  19102. tensor([[ 1.2219, 0.5653, -0.2521, -0.2345, 1.2544, 0.3461],
  19103. [ 0.4785, -0.4477, 0.6049, 0.6368, 0.8775, 0.7145],
  19104. [ 1.1502, 3.2716, -1.1243, -0.5413, 0.3615, 0.6864],
  19105. [-0.0614, -0.7344, -1.3164, -0.7648, -1.4024, 0.0978]])
  19106. >>> torch.tril(b, diagonal=1)
  19107. tensor([[ 1.2219, 0.5653, 0.0000, 0.0000, 0.0000, 0.0000],
  19108. [ 0.4785, -0.4477, 0.6049, 0.0000, 0.0000, 0.0000],
  19109. [ 1.1502, 3.2716, -1.1243, -0.5413, 0.0000, 0.0000],
  19110. [-0.0614, -0.7344, -1.3164, -0.7648, -1.4024, 0.0000]])
  19111. >>> torch.tril(b, diagonal=-1)
  19112. tensor([[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
  19113. [ 0.4785, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
  19114. [ 1.1502, 3.2716, 0.0000, 0.0000, 0.0000, 0.0000],
  19115. [-0.0614, -0.7344, -1.3164, 0.0000, 0.0000, 0.0000]])
  19116. """
  19117. ...
  19118. def tril_indices(row: _int, col: _int, offset: _int = 0, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  19119. r"""
  19120. tril_indices(row, col, offset=0, *, dtype=torch.long, device='cpu', layout=torch.strided) -> Tensor
  19121. Returns the indices of the lower triangular part of a :attr:`row`-by-
  19122. :attr:`col` matrix in a 2-by-N Tensor, where the first row contains row
  19123. coordinates of all indices and the second row contains column coordinates.
  19124. Indices are ordered based on rows and then columns.
  19125. The lower triangular part of the matrix is defined as the elements on and
  19126. below the diagonal.
  19127. The argument :attr:`offset` controls which diagonal to consider. If
  19128. :attr:`offset` = 0, all elements on and below the main diagonal are
  19129. retained. A positive value includes just as many diagonals above the main
  19130. diagonal, and similarly a negative value excludes just as many diagonals below
  19131. the main diagonal. The main diagonal are the set of indices
  19132. :math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]`
  19133. where :math:`d_{1}, d_{2}` are the dimensions of the matrix.
  19134. .. note::
  19135. When running on CUDA, ``row * col`` must be less than :math:`2^{59}` to
  19136. prevent overflow during calculation.
  19137. Args:
  19138. row (``int``): number of rows in the 2-D matrix.
  19139. col (``int``): number of columns in the 2-D matrix.
  19140. offset (``int``): diagonal offset from the main diagonal.
  19141. Default: if not provided, 0.
  19142. Keyword args:
  19143. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  19144. Default: if ``None``, ``torch.long``.
  19145. device (:class:`torch.device`, optional): the desired device of returned tensor.
  19146. Default: if ``None``, uses the current device for the default tensor type
  19147. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  19148. for CPU tensor types and the current CUDA device for CUDA tensor types.
  19149. layout (:class:`torch.layout`, optional): currently only support ``torch.strided``.
  19150. Example::
  19151. >>> a = torch.tril_indices(3, 3)
  19152. >>> a
  19153. tensor([[0, 1, 1, 2, 2, 2],
  19154. [0, 0, 1, 0, 1, 2]])
  19155. >>> a = torch.tril_indices(4, 3, -1)
  19156. >>> a
  19157. tensor([[1, 2, 2, 3, 3, 3],
  19158. [0, 0, 1, 0, 1, 2]])
  19159. >>> a = torch.tril_indices(4, 3, 1)
  19160. >>> a
  19161. tensor([[0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3],
  19162. [0, 1, 0, 1, 2, 0, 1, 2, 0, 1, 2]])
  19163. """
  19164. ...
  19165. def triplet_margin_loss(anchor: Tensor, positive: Tensor, negative: Tensor, margin: _float = 1.0, p: _float = 2, eps: _float = 1e-06, swap: _bool = False, reduction: _int = 1) -> Tensor: ...
  19166. def triu(input: Tensor, diagonal: _int = 0, *, out: Optional[Tensor] = None) -> Tensor:
  19167. r"""
  19168. triu(input, diagonal=0, *, out=None) -> Tensor
  19169. Returns the upper triangular part of a matrix (2-D tensor) or batch of matrices
  19170. :attr:`input`, the other elements of the result tensor :attr:`out` are set to 0.
  19171. The upper triangular part of the matrix is defined as the elements on and
  19172. above the diagonal.
  19173. The argument :attr:`diagonal` controls which diagonal to consider. If
  19174. :attr:`diagonal` = 0, all elements on and above the main diagonal are
  19175. retained. A positive value excludes just as many diagonals above the main
  19176. diagonal, and similarly a negative value includes just as many diagonals below
  19177. the main diagonal. The main diagonal are the set of indices
  19178. :math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where
  19179. :math:`d_{1}, d_{2}` are the dimensions of the matrix.
  19180. Args:
  19181. input (Tensor): the input tensor.
  19182. diagonal (int, optional): the diagonal to consider
  19183. Keyword args:
  19184. out (Tensor, optional): the output tensor.
  19185. Example::
  19186. >>> a = torch.randn(3, 3)
  19187. >>> a
  19188. tensor([[ 0.2309, 0.5207, 2.0049],
  19189. [ 0.2072, -1.0680, 0.6602],
  19190. [ 0.3480, -0.5211, -0.4573]])
  19191. >>> torch.triu(a)
  19192. tensor([[ 0.2309, 0.5207, 2.0049],
  19193. [ 0.0000, -1.0680, 0.6602],
  19194. [ 0.0000, 0.0000, -0.4573]])
  19195. >>> torch.triu(a, diagonal=1)
  19196. tensor([[ 0.0000, 0.5207, 2.0049],
  19197. [ 0.0000, 0.0000, 0.6602],
  19198. [ 0.0000, 0.0000, 0.0000]])
  19199. >>> torch.triu(a, diagonal=-1)
  19200. tensor([[ 0.2309, 0.5207, 2.0049],
  19201. [ 0.2072, -1.0680, 0.6602],
  19202. [ 0.0000, -0.5211, -0.4573]])
  19203. >>> b = torch.randn(4, 6)
  19204. >>> b
  19205. tensor([[ 0.5876, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235],
  19206. [-0.2447, 0.9556, -1.2919, 1.3378, -0.1768, -1.0857],
  19207. [ 0.4333, 0.3146, 0.6576, -1.0432, 0.9348, -0.4410],
  19208. [-0.9888, 1.0679, -1.3337, -1.6556, 0.4798, 0.2830]])
  19209. >>> torch.triu(b, diagonal=1)
  19210. tensor([[ 0.0000, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235],
  19211. [ 0.0000, 0.0000, -1.2919, 1.3378, -0.1768, -1.0857],
  19212. [ 0.0000, 0.0000, 0.0000, -1.0432, 0.9348, -0.4410],
  19213. [ 0.0000, 0.0000, 0.0000, 0.0000, 0.4798, 0.2830]])
  19214. >>> torch.triu(b, diagonal=-1)
  19215. tensor([[ 0.5876, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235],
  19216. [-0.2447, 0.9556, -1.2919, 1.3378, -0.1768, -1.0857],
  19217. [ 0.0000, 0.3146, 0.6576, -1.0432, 0.9348, -0.4410],
  19218. [ 0.0000, 0.0000, -1.3337, -1.6556, 0.4798, 0.2830]])
  19219. """
  19220. ...
  19221. def triu_indices(row: _int, col: _int, offset: _int = 0, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  19222. r"""
  19223. triu_indices(row, col, offset=0, *, dtype=torch.long, device='cpu', layout=torch.strided) -> Tensor
  19224. Returns the indices of the upper triangular part of a :attr:`row` by
  19225. :attr:`col` matrix in a 2-by-N Tensor, where the first row contains row
  19226. coordinates of all indices and the second row contains column coordinates.
  19227. Indices are ordered based on rows and then columns.
  19228. The upper triangular part of the matrix is defined as the elements on and
  19229. above the diagonal.
  19230. The argument :attr:`offset` controls which diagonal to consider. If
  19231. :attr:`offset` = 0, all elements on and above the main diagonal are
  19232. retained. A positive value excludes just as many diagonals above the main
  19233. diagonal, and similarly a negative value includes just as many diagonals below
  19234. the main diagonal. The main diagonal are the set of indices
  19235. :math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]`
  19236. where :math:`d_{1}, d_{2}` are the dimensions of the matrix.
  19237. .. note::
  19238. When running on CUDA, ``row * col`` must be less than :math:`2^{59}` to
  19239. prevent overflow during calculation.
  19240. Args:
  19241. row (``int``): number of rows in the 2-D matrix.
  19242. col (``int``): number of columns in the 2-D matrix.
  19243. offset (``int``): diagonal offset from the main diagonal.
  19244. Default: if not provided, 0.
  19245. Keyword args:
  19246. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  19247. Default: if ``None``, ``torch.long``.
  19248. device (:class:`torch.device`, optional): the desired device of returned tensor.
  19249. Default: if ``None``, uses the current device for the default tensor type
  19250. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  19251. for CPU tensor types and the current CUDA device for CUDA tensor types.
  19252. layout (:class:`torch.layout`, optional): currently only support ``torch.strided``.
  19253. Example::
  19254. >>> a = torch.triu_indices(3, 3)
  19255. >>> a
  19256. tensor([[0, 0, 0, 1, 1, 2],
  19257. [0, 1, 2, 1, 2, 2]])
  19258. >>> a = torch.triu_indices(4, 3, -1)
  19259. >>> a
  19260. tensor([[0, 0, 0, 1, 1, 1, 2, 2, 3],
  19261. [0, 1, 2, 0, 1, 2, 1, 2, 2]])
  19262. >>> a = torch.triu_indices(4, 3, 1)
  19263. >>> a
  19264. tensor([[0, 0, 1],
  19265. [1, 2, 2]])
  19266. """
  19267. ...
  19268. def true_divide(input: Union[Tensor, Number], other: Union[Tensor, Number], *, out: Optional[Tensor] = None) -> Tensor:
  19269. r"""
  19270. true_divide(dividend, divisor, *, out) -> Tensor
  19271. Alias for :func:`torch.div` with ``rounding_mode=None``.
  19272. """
  19273. ...
  19274. def trunc(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  19275. r"""
  19276. trunc(input, *, out=None) -> Tensor
  19277. Returns a new tensor with the truncated integer values of
  19278. the elements of :attr:`input`.
  19279. For integer inputs, follows the array-api convention of returning a
  19280. copy of the input tensor.
  19281. Args:
  19282. input (Tensor): the input tensor.
  19283. Keyword args:
  19284. out (Tensor, optional): the output tensor.
  19285. Example::
  19286. >>> a = torch.randn(4)
  19287. >>> a
  19288. tensor([ 3.4742, 0.5466, -0.8008, -0.9079])
  19289. >>> torch.trunc(a)
  19290. tensor([ 3., 0., -0., -0.])
  19291. """
  19292. ...
  19293. def trunc_(input: Tensor) -> Tensor: ...
  19294. @overload
  19295. def unbind(input: Tensor, dim: _int = 0) -> Tuple[Tensor, ...]:
  19296. r"""
  19297. unbind(input, dim=0) -> seq
  19298. Removes a tensor dimension.
  19299. Returns a tuple of all slices along a given dimension, already without it.
  19300. Arguments:
  19301. input (Tensor): the tensor to unbind
  19302. dim (int): dimension to remove
  19303. Example::
  19304. >>> torch.unbind(torch.tensor([[1, 2, 3],
  19305. >>> [4, 5, 6],
  19306. >>> [7, 8, 9]]))
  19307. (tensor([1, 2, 3]), tensor([4, 5, 6]), tensor([7, 8, 9]))
  19308. """
  19309. ...
  19310. @overload
  19311. def unbind(input: Tensor, dim: Union[str, ellipsis, None]) -> Tuple[Tensor, ...]:
  19312. r"""
  19313. unbind(input, dim=0) -> seq
  19314. Removes a tensor dimension.
  19315. Returns a tuple of all slices along a given dimension, already without it.
  19316. Arguments:
  19317. input (Tensor): the tensor to unbind
  19318. dim (int): dimension to remove
  19319. Example::
  19320. >>> torch.unbind(torch.tensor([[1, 2, 3],
  19321. >>> [4, 5, 6],
  19322. >>> [7, 8, 9]]))
  19323. (tensor([1, 2, 3]), tensor([4, 5, 6]), tensor([7, 8, 9]))
  19324. """
  19325. ...
  19326. def unbind_copy(input: Tensor, dim: _int = 0, *, out: Union[Tuple[Tensor, ...], List[Tensor], None] = None) -> None:
  19327. r"""
  19328. Performs the same operation as :func:`torch.unbind`, but all output tensors
  19329. are freshly created instead of aliasing the input.
  19330. """
  19331. ...
  19332. @overload
  19333. def unflatten(input: Tensor, dim: Union[str, ellipsis, None], sizes: Sequence[Union[_int, SymInt]], names: Sequence[Union[str, ellipsis, None]]) -> Tensor:
  19334. r"""
  19335. unflatten(input, dim, sizes) -> Tensor
  19336. Expands a dimension of the input tensor over multiple dimensions.
  19337. .. seealso::
  19338. :func:`torch.flatten` the inverse of this function. It coalesces several dimensions into one.
  19339. Args:
  19340. input (Tensor): the input tensor.
  19341. dim (int): Dimension to be unflattened, specified as an index into
  19342. ``input.shape``.
  19343. sizes (Tuple[int]): New shape of the unflattened dimension.
  19344. One of its elements can be `-1` in which case the corresponding output
  19345. dimension is inferred. Otherwise, the product of ``sizes`` *must*
  19346. equal ``input.shape[dim]``.
  19347. Returns:
  19348. A View of input with the specified dimension unflattened.
  19349. Examples::
  19350. >>> torch.unflatten(torch.randn(3, 4, 1), 1, (2, 2)).shape
  19351. torch.Size([3, 2, 2, 1])
  19352. >>> torch.unflatten(torch.randn(3, 4, 1), 1, (-1, 2)).shape
  19353. torch.Size([3, 2, 2, 1])
  19354. >>> torch.unflatten(torch.randn(5, 12, 3), -2, (2, 2, 3, 1, 1)).shape
  19355. torch.Size([5, 2, 2, 3, 1, 1, 3])
  19356. """
  19357. ...
  19358. @overload
  19359. def unflatten(input: Tensor, dim: _int, sizes: Sequence[Union[_int, SymInt]]) -> Tensor:
  19360. r"""
  19361. unflatten(input, dim, sizes) -> Tensor
  19362. Expands a dimension of the input tensor over multiple dimensions.
  19363. .. seealso::
  19364. :func:`torch.flatten` the inverse of this function. It coalesces several dimensions into one.
  19365. Args:
  19366. input (Tensor): the input tensor.
  19367. dim (int): Dimension to be unflattened, specified as an index into
  19368. ``input.shape``.
  19369. sizes (Tuple[int]): New shape of the unflattened dimension.
  19370. One of its elements can be `-1` in which case the corresponding output
  19371. dimension is inferred. Otherwise, the product of ``sizes`` *must*
  19372. equal ``input.shape[dim]``.
  19373. Returns:
  19374. A View of input with the specified dimension unflattened.
  19375. Examples::
  19376. >>> torch.unflatten(torch.randn(3, 4, 1), 1, (2, 2)).shape
  19377. torch.Size([3, 2, 2, 1])
  19378. >>> torch.unflatten(torch.randn(3, 4, 1), 1, (-1, 2)).shape
  19379. torch.Size([3, 2, 2, 1])
  19380. >>> torch.unflatten(torch.randn(5, 12, 3), -2, (2, 2, 3, 1, 1)).shape
  19381. torch.Size([5, 2, 2, 3, 1, 1, 3])
  19382. """
  19383. ...
  19384. def unfold_copy(input: Tensor, dimension: _int, size: _int, step: _int, *, out: Optional[Tensor] = None) -> Tensor:
  19385. r"""
  19386. Performs the same operation as :func:`torch.unfold`, but all output tensors
  19387. are freshly created instead of aliasing the input.
  19388. """
  19389. ...
  19390. def unique_dim(input: Tensor, dim: _int, sorted: _bool = True, return_inverse: _bool = False, return_counts: _bool = False) -> Tuple[Tensor, Tensor, Tensor]: ...
  19391. def unsafe_chunk(input: Tensor, chunks: _int, dim: _int = 0) -> Tuple[Tensor, ...]:
  19392. r"""
  19393. unsafe_chunk(input, chunks, dim=0) -> List of Tensors
  19394. Works like :func:`torch.chunk` but without enforcing the autograd restrictions
  19395. on inplace modification of the outputs.
  19396. .. warning::
  19397. This function is safe to use as long as only the input, or only the outputs
  19398. are modified inplace after calling this function. It is user's
  19399. responsibility to ensure that is the case. If both the input and one or more
  19400. of the outputs are modified inplace, gradients computed by autograd will be
  19401. silently incorrect.
  19402. """
  19403. ...
  19404. def unsafe_split(input: Tensor, split_size: Union[_int, SymInt], dim: _int = 0) -> Tuple[Tensor, ...]:
  19405. r"""
  19406. unsafe_split(tensor, split_size_or_sections, dim=0) -> List of Tensors
  19407. Works like :func:`torch.split` but without enforcing the autograd restrictions
  19408. on inplace modification of the outputs.
  19409. .. warning::
  19410. This function is safe to use as long as only the input, or only the outputs
  19411. are modified inplace after calling this function. It is user's
  19412. responsibility to ensure that is the case. If both the input and one or more
  19413. of the outputs are modified inplace, gradients computed by autograd will be
  19414. silently incorrect.
  19415. """
  19416. ...
  19417. def unsafe_split_with_sizes(input: Tensor, split_sizes: Sequence[Union[_int, SymInt]], dim: _int = 0) -> Tuple[Tensor, ...]: ...
  19418. def unsqueeze(input: Tensor, dim: _int) -> Tensor:
  19419. r"""
  19420. unsqueeze(input, dim) -> Tensor
  19421. Returns a new tensor with a dimension of size one inserted at the
  19422. specified position.
  19423. The returned tensor shares the same underlying data with this tensor.
  19424. A :attr:`dim` value within the range ``[-input.dim() - 1, input.dim() + 1)``
  19425. can be used. Negative :attr:`dim` will correspond to :meth:`unsqueeze`
  19426. applied at :attr:`dim` = ``dim + input.dim() + 1``.
  19427. Args:
  19428. input (Tensor): the input tensor.
  19429. dim (int): the index at which to insert the singleton dimension
  19430. Example::
  19431. >>> x = torch.tensor([1, 2, 3, 4])
  19432. >>> torch.unsqueeze(x, 0)
  19433. tensor([[ 1, 2, 3, 4]])
  19434. >>> torch.unsqueeze(x, 1)
  19435. tensor([[ 1],
  19436. [ 2],
  19437. [ 3],
  19438. [ 4]])
  19439. """
  19440. ...
  19441. def unsqueeze_copy(input: Tensor, dim: _int, *, out: Optional[Tensor] = None) -> Tensor:
  19442. r"""
  19443. Performs the same operation as :func:`torch.unsqueeze`, but all output tensors
  19444. are freshly created instead of aliasing the input.
  19445. """
  19446. ...
  19447. def values_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  19448. r"""
  19449. Performs the same operation as :func:`torch.values`, but all output tensors
  19450. are freshly created instead of aliasing the input.
  19451. """
  19452. ...
  19453. def vander(x: Tensor, N: Optional[_int] = None, increasing: _bool = False) -> Tensor:
  19454. r"""
  19455. vander(x, N=None, increasing=False) -> Tensor
  19456. Generates a Vandermonde matrix.
  19457. The columns of the output matrix are elementwise powers of the input vector :math:`x^{(N-1)}, x^{(N-2)}, ..., x^0`.
  19458. If increasing is True, the order of the columns is reversed :math:`x^0, x^1, ..., x^{(N-1)}`. Such a
  19459. matrix with a geometric progression in each row is named for Alexandre-Theophile Vandermonde.
  19460. Arguments:
  19461. x (Tensor): 1-D input tensor.
  19462. N (int, optional): Number of columns in the output. If N is not specified,
  19463. a square array is returned :math:`(N = len(x))`.
  19464. increasing (bool, optional): Order of the powers of the columns. If True,
  19465. the powers increase from left to right, if False (the default) they are reversed.
  19466. Returns:
  19467. Tensor: Vandermonde matrix. If increasing is False, the first column is :math:`x^{(N-1)}`,
  19468. the second :math:`x^{(N-2)}` and so forth. If increasing is True, the columns
  19469. are :math:`x^0, x^1, ..., x^{(N-1)}`.
  19470. Example::
  19471. >>> x = torch.tensor([1, 2, 3, 5])
  19472. >>> torch.vander(x)
  19473. tensor([[ 1, 1, 1, 1],
  19474. [ 8, 4, 2, 1],
  19475. [ 27, 9, 3, 1],
  19476. [125, 25, 5, 1]])
  19477. >>> torch.vander(x, N=3)
  19478. tensor([[ 1, 1, 1],
  19479. [ 4, 2, 1],
  19480. [ 9, 3, 1],
  19481. [25, 5, 1]])
  19482. >>> torch.vander(x, N=3, increasing=True)
  19483. tensor([[ 1, 1, 1],
  19484. [ 1, 2, 4],
  19485. [ 1, 3, 9],
  19486. [ 1, 5, 25]])
  19487. """
  19488. ...
  19489. @overload
  19490. def var(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
  19491. r"""
  19492. var(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
  19493. Calculates the variance over the dimensions specified by :attr:`dim`. :attr:`dim`
  19494. can be a single dimension, list of dimensions, or ``None`` to reduce over all
  19495. dimensions.
  19496. The variance (:math:`\sigma^2`) is calculated as
  19497. .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
  19498. where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
  19499. sample mean, :math:`N` is the number of samples and :math:`\delta N` is
  19500. the :attr:`correction`.
  19501. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  19502. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  19503. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  19504. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  19505. Args:
  19506. input (Tensor): the input tensor.
  19507. dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
  19508. If ``None``, all dimensions are reduced.
  19509. Keyword args:
  19510. correction (int): difference between the sample size and sample degrees of freedom.
  19511. Defaults to `Bessel's correction`_, ``correction=1``.
  19512. .. versionchanged:: 2.0
  19513. Previously this argument was called ``unbiased`` and was a boolean
  19514. with ``True`` corresponding to ``correction=1`` and ``False`` being
  19515. ``correction=0``.
  19516. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  19517. out (Tensor, optional): the output tensor.
  19518. Example:
  19519. >>> a = torch.tensor(
  19520. ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
  19521. ... [ 1.5027, -0.3270, 0.5905, 0.6538],
  19522. ... [-1.5745, 1.3330, -0.5596, -0.6548],
  19523. ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
  19524. >>> torch.var(a, dim=1, keepdim=True)
  19525. tensor([[1.0631],
  19526. [0.5590],
  19527. [1.4893],
  19528. [0.8258]])
  19529. .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
  19530. """
  19531. ...
  19532. @overload
  19533. def var(input: Tensor, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False, out: Optional[Tensor] = None) -> Tensor:
  19534. r"""
  19535. var(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
  19536. Calculates the variance over the dimensions specified by :attr:`dim`. :attr:`dim`
  19537. can be a single dimension, list of dimensions, or ``None`` to reduce over all
  19538. dimensions.
  19539. The variance (:math:`\sigma^2`) is calculated as
  19540. .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
  19541. where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
  19542. sample mean, :math:`N` is the number of samples and :math:`\delta N` is
  19543. the :attr:`correction`.
  19544. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  19545. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  19546. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  19547. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  19548. Args:
  19549. input (Tensor): the input tensor.
  19550. dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
  19551. If ``None``, all dimensions are reduced.
  19552. Keyword args:
  19553. correction (int): difference between the sample size and sample degrees of freedom.
  19554. Defaults to `Bessel's correction`_, ``correction=1``.
  19555. .. versionchanged:: 2.0
  19556. Previously this argument was called ``unbiased`` and was a boolean
  19557. with ``True`` corresponding to ``correction=1`` and ``False`` being
  19558. ``correction=0``.
  19559. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  19560. out (Tensor, optional): the output tensor.
  19561. Example:
  19562. >>> a = torch.tensor(
  19563. ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
  19564. ... [ 1.5027, -0.3270, 0.5905, 0.6538],
  19565. ... [-1.5745, 1.3330, -0.5596, -0.6548],
  19566. ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
  19567. >>> torch.var(a, dim=1, keepdim=True)
  19568. tensor([[1.0631],
  19569. [0.5590],
  19570. [1.4893],
  19571. [0.8258]])
  19572. .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
  19573. """
  19574. ...
  19575. @overload
  19576. def var(input: Tensor, unbiased: _bool = True) -> Tensor:
  19577. r"""
  19578. var(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
  19579. Calculates the variance over the dimensions specified by :attr:`dim`. :attr:`dim`
  19580. can be a single dimension, list of dimensions, or ``None`` to reduce over all
  19581. dimensions.
  19582. The variance (:math:`\sigma^2`) is calculated as
  19583. .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
  19584. where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
  19585. sample mean, :math:`N` is the number of samples and :math:`\delta N` is
  19586. the :attr:`correction`.
  19587. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  19588. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  19589. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  19590. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  19591. Args:
  19592. input (Tensor): the input tensor.
  19593. dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
  19594. If ``None``, all dimensions are reduced.
  19595. Keyword args:
  19596. correction (int): difference between the sample size and sample degrees of freedom.
  19597. Defaults to `Bessel's correction`_, ``correction=1``.
  19598. .. versionchanged:: 2.0
  19599. Previously this argument was called ``unbiased`` and was a boolean
  19600. with ``True`` corresponding to ``correction=1`` and ``False`` being
  19601. ``correction=0``.
  19602. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  19603. out (Tensor, optional): the output tensor.
  19604. Example:
  19605. >>> a = torch.tensor(
  19606. ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
  19607. ... [ 1.5027, -0.3270, 0.5905, 0.6538],
  19608. ... [-1.5745, 1.3330, -0.5596, -0.6548],
  19609. ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
  19610. >>> torch.var(a, dim=1, keepdim=True)
  19611. tensor([[1.0631],
  19612. [0.5590],
  19613. [1.4893],
  19614. [0.8258]])
  19615. .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
  19616. """
  19617. ...
  19618. @overload
  19619. def var(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False, out: Optional[Tensor] = None) -> Tensor:
  19620. r"""
  19621. var(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
  19622. Calculates the variance over the dimensions specified by :attr:`dim`. :attr:`dim`
  19623. can be a single dimension, list of dimensions, or ``None`` to reduce over all
  19624. dimensions.
  19625. The variance (:math:`\sigma^2`) is calculated as
  19626. .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
  19627. where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
  19628. sample mean, :math:`N` is the number of samples and :math:`\delta N` is
  19629. the :attr:`correction`.
  19630. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  19631. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  19632. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  19633. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  19634. Args:
  19635. input (Tensor): the input tensor.
  19636. dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
  19637. If ``None``, all dimensions are reduced.
  19638. Keyword args:
  19639. correction (int): difference between the sample size and sample degrees of freedom.
  19640. Defaults to `Bessel's correction`_, ``correction=1``.
  19641. .. versionchanged:: 2.0
  19642. Previously this argument was called ``unbiased`` and was a boolean
  19643. with ``True`` corresponding to ``correction=1`` and ``False`` being
  19644. ``correction=0``.
  19645. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  19646. out (Tensor, optional): the output tensor.
  19647. Example:
  19648. >>> a = torch.tensor(
  19649. ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
  19650. ... [ 1.5027, -0.3270, 0.5905, 0.6538],
  19651. ... [-1.5745, 1.3330, -0.5596, -0.6548],
  19652. ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
  19653. >>> torch.var(a, dim=1, keepdim=True)
  19654. tensor([[1.0631],
  19655. [0.5590],
  19656. [1.4893],
  19657. [0.8258]])
  19658. .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
  19659. """
  19660. ...
  19661. @overload
  19662. def var(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
  19663. r"""
  19664. var(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
  19665. Calculates the variance over the dimensions specified by :attr:`dim`. :attr:`dim`
  19666. can be a single dimension, list of dimensions, or ``None`` to reduce over all
  19667. dimensions.
  19668. The variance (:math:`\sigma^2`) is calculated as
  19669. .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
  19670. where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
  19671. sample mean, :math:`N` is the number of samples and :math:`\delta N` is
  19672. the :attr:`correction`.
  19673. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  19674. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  19675. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  19676. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  19677. Args:
  19678. input (Tensor): the input tensor.
  19679. dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
  19680. If ``None``, all dimensions are reduced.
  19681. Keyword args:
  19682. correction (int): difference between the sample size and sample degrees of freedom.
  19683. Defaults to `Bessel's correction`_, ``correction=1``.
  19684. .. versionchanged:: 2.0
  19685. Previously this argument was called ``unbiased`` and was a boolean
  19686. with ``True`` corresponding to ``correction=1`` and ``False`` being
  19687. ``correction=0``.
  19688. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  19689. out (Tensor, optional): the output tensor.
  19690. Example:
  19691. >>> a = torch.tensor(
  19692. ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
  19693. ... [ 1.5027, -0.3270, 0.5905, 0.6538],
  19694. ... [-1.5745, 1.3330, -0.5596, -0.6548],
  19695. ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
  19696. >>> torch.var(a, dim=1, keepdim=True)
  19697. tensor([[1.0631],
  19698. [0.5590],
  19699. [1.4893],
  19700. [0.8258]])
  19701. .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
  19702. """
  19703. ...
  19704. @overload
  19705. def var_mean(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False) -> Tuple[Tensor, Tensor]:
  19706. r"""
  19707. var_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
  19708. Calculates the variance and mean over the dimensions specified by :attr:`dim`.
  19709. :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
  19710. reduce over all dimensions.
  19711. The variance (:math:`\sigma^2`) is calculated as
  19712. .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
  19713. where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
  19714. sample mean, :math:`N` is the number of samples and :math:`\delta N` is
  19715. the :attr:`correction`.
  19716. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  19717. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  19718. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  19719. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  19720. Args:
  19721. input (Tensor): the input tensor.
  19722. dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
  19723. If ``None``, all dimensions are reduced.
  19724. Keyword args:
  19725. correction (int): difference between the sample size and sample degrees of freedom.
  19726. Defaults to `Bessel's correction`_, ``correction=1``.
  19727. .. versionchanged:: 2.0
  19728. Previously this argument was called ``unbiased`` and was a boolean
  19729. with ``True`` corresponding to ``correction=1`` and ``False`` being
  19730. ``correction=0``.
  19731. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  19732. out (Tensor, optional): the output tensor.
  19733. Returns:
  19734. A tuple (var, mean) containing the variance and mean.
  19735. Example:
  19736. >>> a = torch.tensor(
  19737. ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
  19738. ... [ 1.5027, -0.3270, 0.5905, 0.6538],
  19739. ... [-1.5745, 1.3330, -0.5596, -0.6548],
  19740. ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
  19741. >>> torch.var_mean(a, dim=0, keepdim=True)
  19742. (tensor([[1.5926, 1.0056, 1.2005, 0.3646]]),
  19743. tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
  19744. .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
  19745. """
  19746. ...
  19747. @overload
  19748. def var_mean(input: Tensor, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tuple[Tensor, Tensor]:
  19749. r"""
  19750. var_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
  19751. Calculates the variance and mean over the dimensions specified by :attr:`dim`.
  19752. :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
  19753. reduce over all dimensions.
  19754. The variance (:math:`\sigma^2`) is calculated as
  19755. .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
  19756. where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
  19757. sample mean, :math:`N` is the number of samples and :math:`\delta N` is
  19758. the :attr:`correction`.
  19759. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  19760. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  19761. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  19762. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  19763. Args:
  19764. input (Tensor): the input tensor.
  19765. dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
  19766. If ``None``, all dimensions are reduced.
  19767. Keyword args:
  19768. correction (int): difference between the sample size and sample degrees of freedom.
  19769. Defaults to `Bessel's correction`_, ``correction=1``.
  19770. .. versionchanged:: 2.0
  19771. Previously this argument was called ``unbiased`` and was a boolean
  19772. with ``True`` corresponding to ``correction=1`` and ``False`` being
  19773. ``correction=0``.
  19774. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  19775. out (Tensor, optional): the output tensor.
  19776. Returns:
  19777. A tuple (var, mean) containing the variance and mean.
  19778. Example:
  19779. >>> a = torch.tensor(
  19780. ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
  19781. ... [ 1.5027, -0.3270, 0.5905, 0.6538],
  19782. ... [-1.5745, 1.3330, -0.5596, -0.6548],
  19783. ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
  19784. >>> torch.var_mean(a, dim=0, keepdim=True)
  19785. (tensor([[1.5926, 1.0056, 1.2005, 0.3646]]),
  19786. tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
  19787. .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
  19788. """
  19789. ...
  19790. @overload
  19791. def var_mean(input: Tensor, unbiased: _bool = True) -> Tuple[Tensor, Tensor]:
  19792. r"""
  19793. var_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
  19794. Calculates the variance and mean over the dimensions specified by :attr:`dim`.
  19795. :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
  19796. reduce over all dimensions.
  19797. The variance (:math:`\sigma^2`) is calculated as
  19798. .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
  19799. where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
  19800. sample mean, :math:`N` is the number of samples and :math:`\delta N` is
  19801. the :attr:`correction`.
  19802. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  19803. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  19804. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  19805. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  19806. Args:
  19807. input (Tensor): the input tensor.
  19808. dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
  19809. If ``None``, all dimensions are reduced.
  19810. Keyword args:
  19811. correction (int): difference between the sample size and sample degrees of freedom.
  19812. Defaults to `Bessel's correction`_, ``correction=1``.
  19813. .. versionchanged:: 2.0
  19814. Previously this argument was called ``unbiased`` and was a boolean
  19815. with ``True`` corresponding to ``correction=1`` and ``False`` being
  19816. ``correction=0``.
  19817. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  19818. out (Tensor, optional): the output tensor.
  19819. Returns:
  19820. A tuple (var, mean) containing the variance and mean.
  19821. Example:
  19822. >>> a = torch.tensor(
  19823. ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
  19824. ... [ 1.5027, -0.3270, 0.5905, 0.6538],
  19825. ... [-1.5745, 1.3330, -0.5596, -0.6548],
  19826. ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
  19827. >>> torch.var_mean(a, dim=0, keepdim=True)
  19828. (tensor([[1.5926, 1.0056, 1.2005, 0.3646]]),
  19829. tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
  19830. .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
  19831. """
  19832. ...
  19833. @overload
  19834. def var_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tuple[Tensor, Tensor]:
  19835. r"""
  19836. var_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
  19837. Calculates the variance and mean over the dimensions specified by :attr:`dim`.
  19838. :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
  19839. reduce over all dimensions.
  19840. The variance (:math:`\sigma^2`) is calculated as
  19841. .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
  19842. where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
  19843. sample mean, :math:`N` is the number of samples and :math:`\delta N` is
  19844. the :attr:`correction`.
  19845. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  19846. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  19847. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  19848. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  19849. Args:
  19850. input (Tensor): the input tensor.
  19851. dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
  19852. If ``None``, all dimensions are reduced.
  19853. Keyword args:
  19854. correction (int): difference between the sample size and sample degrees of freedom.
  19855. Defaults to `Bessel's correction`_, ``correction=1``.
  19856. .. versionchanged:: 2.0
  19857. Previously this argument was called ``unbiased`` and was a boolean
  19858. with ``True`` corresponding to ``correction=1`` and ``False`` being
  19859. ``correction=0``.
  19860. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  19861. out (Tensor, optional): the output tensor.
  19862. Returns:
  19863. A tuple (var, mean) containing the variance and mean.
  19864. Example:
  19865. >>> a = torch.tensor(
  19866. ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
  19867. ... [ 1.5027, -0.3270, 0.5905, 0.6538],
  19868. ... [-1.5745, 1.3330, -0.5596, -0.6548],
  19869. ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
  19870. >>> torch.var_mean(a, dim=0, keepdim=True)
  19871. (tensor([[1.5926, 1.0056, 1.2005, 0.3646]]),
  19872. tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
  19873. .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
  19874. """
  19875. ...
  19876. @overload
  19877. def var_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False) -> Tuple[Tensor, Tensor]:
  19878. r"""
  19879. var_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
  19880. Calculates the variance and mean over the dimensions specified by :attr:`dim`.
  19881. :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
  19882. reduce over all dimensions.
  19883. The variance (:math:`\sigma^2`) is calculated as
  19884. .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
  19885. where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
  19886. sample mean, :math:`N` is the number of samples and :math:`\delta N` is
  19887. the :attr:`correction`.
  19888. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  19889. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  19890. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  19891. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  19892. Args:
  19893. input (Tensor): the input tensor.
  19894. dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
  19895. If ``None``, all dimensions are reduced.
  19896. Keyword args:
  19897. correction (int): difference between the sample size and sample degrees of freedom.
  19898. Defaults to `Bessel's correction`_, ``correction=1``.
  19899. .. versionchanged:: 2.0
  19900. Previously this argument was called ``unbiased`` and was a boolean
  19901. with ``True`` corresponding to ``correction=1`` and ``False`` being
  19902. ``correction=0``.
  19903. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  19904. out (Tensor, optional): the output tensor.
  19905. Returns:
  19906. A tuple (var, mean) containing the variance and mean.
  19907. Example:
  19908. >>> a = torch.tensor(
  19909. ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
  19910. ... [ 1.5027, -0.3270, 0.5905, 0.6538],
  19911. ... [-1.5745, 1.3330, -0.5596, -0.6548],
  19912. ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
  19913. >>> torch.var_mean(a, dim=0, keepdim=True)
  19914. (tensor([[1.5926, 1.0056, 1.2005, 0.3646]]),
  19915. tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
  19916. .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
  19917. """
  19918. ...
  19919. def vdot(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  19920. r"""
  19921. vdot(input, other, *, out=None) -> Tensor
  19922. Computes the dot product of two 1D vectors along a dimension.
  19923. In symbols, this function computes
  19924. .. math::
  19925. \sum_{i=1}^n \overline{x_i}y_i.
  19926. where :math:`\overline{x_i}` denotes the conjugate for complex
  19927. vectors, and it is the identity for real vectors.
  19928. .. note::
  19929. Unlike NumPy's vdot, torch.vdot intentionally only supports computing the dot product
  19930. of two 1D tensors with the same number of elements.
  19931. .. seealso::
  19932. :func:`torch.linalg.vecdot` computes the dot product of two batches of vectors along a dimension.
  19933. Args:
  19934. input (Tensor): first tensor in the dot product, must be 1D. Its conjugate is used if it's complex.
  19935. other (Tensor): second tensor in the dot product, must be 1D.
  19936. Keyword args:
  19937. .. note:: out (Tensor, optional): the output tensor.
  19938. Example::
  19939. >>> torch.vdot(torch.tensor([2, 3]), torch.tensor([2, 1]))
  19940. tensor(7)
  19941. >>> a = torch.tensor((1 +2j, 3 - 1j))
  19942. >>> b = torch.tensor((2 +1j, 4 - 0j))
  19943. >>> torch.vdot(a, b)
  19944. tensor([16.+1.j])
  19945. >>> torch.vdot(b, a)
  19946. tensor([16.-1.j])
  19947. """
  19948. ...
  19949. def view_as_complex(input: Tensor) -> Tensor:
  19950. r"""
  19951. view_as_complex(input) -> Tensor
  19952. Returns a view of :attr:`input` as a complex tensor. For an input complex
  19953. tensor of :attr:`size` :math:`m1, m2, \dots, mi, 2`, this function returns a
  19954. new complex tensor of :attr:`size` :math:`m1, m2, \dots, mi` where the last
  19955. dimension of the input tensor is expected to represent the real and imaginary
  19956. components of complex numbers.
  19957. .. warning::
  19958. :func:`view_as_complex` is only supported for tensors with
  19959. :class:`torch.dtype` ``torch.float64`` and ``torch.float32``. The input is
  19960. expected to have the last dimension of :attr:`size` 2. In addition, the
  19961. tensor must have a `stride` of 1 for its last dimension. The strides of all
  19962. other dimensions must be even numbers.
  19963. Args:
  19964. input (Tensor): the input tensor.
  19965. Example::
  19966. >>> x=torch.randn(4, 2)
  19967. >>> x
  19968. tensor([[ 1.6116, -0.5772],
  19969. [-1.4606, -0.9120],
  19970. [ 0.0786, -1.7497],
  19971. [-0.6561, -1.6623]])
  19972. >>> torch.view_as_complex(x)
  19973. tensor([(1.6116-0.5772j), (-1.4606-0.9120j), (0.0786-1.7497j), (-0.6561-1.6623j)])
  19974. """
  19975. ...
  19976. def view_as_complex_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  19977. r"""
  19978. Performs the same operation as :func:`torch.view_as_complex`, but all output tensors
  19979. are freshly created instead of aliasing the input.
  19980. """
  19981. ...
  19982. def view_as_real(input: Tensor) -> Tensor:
  19983. r"""
  19984. view_as_real(input) -> Tensor
  19985. Returns a view of :attr:`input` as a real tensor. For an input complex tensor of
  19986. :attr:`size` :math:`m1, m2, \dots, mi`, this function returns a new
  19987. real tensor of size :math:`m1, m2, \dots, mi, 2`, where the last dimension of size 2
  19988. represents the real and imaginary components of complex numbers.
  19989. .. warning::
  19990. :func:`view_as_real` is only supported for tensors with ``complex dtypes``.
  19991. Args:
  19992. input (Tensor): the input tensor.
  19993. Example::
  19994. >>> x=torch.randn(4, dtype=torch.cfloat)
  19995. >>> x
  19996. tensor([(0.4737-0.3839j), (-0.2098-0.6699j), (0.3470-0.9451j), (-0.5174-1.3136j)])
  19997. >>> torch.view_as_real(x)
  19998. tensor([[ 0.4737, -0.3839],
  19999. [-0.2098, -0.6699],
  20000. [ 0.3470, -0.9451],
  20001. [-0.5174, -1.3136]])
  20002. """
  20003. ...
  20004. def view_as_real_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  20005. r"""
  20006. Performs the same operation as :func:`torch.view_as_real`, but all output tensors
  20007. are freshly created instead of aliasing the input.
  20008. """
  20009. ...
  20010. @overload
  20011. def view_copy(input: Tensor, dtype: _dtype, *, out: Optional[Tensor] = None) -> Tensor:
  20012. r"""
  20013. Performs the same operation as :func:`torch.view`, but all output tensors
  20014. are freshly created instead of aliasing the input.
  20015. """
  20016. ...
  20017. @overload
  20018. def view_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None) -> Tensor:
  20019. r"""
  20020. Performs the same operation as :func:`torch.view`, but all output tensors
  20021. are freshly created instead of aliasing the input.
  20022. """
  20023. ...
  20024. @overload
  20025. def vsplit(input: Tensor, sections: _int) -> Tuple[Tensor, ...]:
  20026. r"""
  20027. vsplit(input, indices_or_sections) -> List of Tensors
  20028. Splits :attr:`input`, a tensor with two or more dimensions, into multiple tensors
  20029. vertically according to :attr:`indices_or_sections`. Each split is a view of
  20030. :attr:`input`.
  20031. This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=0)
  20032. (the split dimension is 0), except that if :attr:`indices_or_sections` is an integer
  20033. it must evenly divide the split dimension or a runtime error will be thrown.
  20034. This function is based on NumPy's :func:`numpy.vsplit`.
  20035. Args:
  20036. input (Tensor): tensor to split.
  20037. indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
  20038. Example::
  20039. >>> t = torch.arange(16.0).reshape(4,4)
  20040. >>> t
  20041. tensor([[ 0., 1., 2., 3.],
  20042. [ 4., 5., 6., 7.],
  20043. [ 8., 9., 10., 11.],
  20044. [12., 13., 14., 15.]])
  20045. >>> torch.vsplit(t, 2)
  20046. (tensor([[0., 1., 2., 3.],
  20047. [4., 5., 6., 7.]]),
  20048. tensor([[ 8., 9., 10., 11.],
  20049. [12., 13., 14., 15.]]))
  20050. >>> torch.vsplit(t, [3, 6])
  20051. (tensor([[ 0., 1., 2., 3.],
  20052. [ 4., 5., 6., 7.],
  20053. [ 8., 9., 10., 11.]]),
  20054. tensor([[12., 13., 14., 15.]]),
  20055. tensor([], size=(0, 4)))
  20056. """
  20057. ...
  20058. @overload
  20059. def vsplit(input: Tensor, indices: _size) -> Tuple[Tensor, ...]:
  20060. r"""
  20061. vsplit(input, indices_or_sections) -> List of Tensors
  20062. Splits :attr:`input`, a tensor with two or more dimensions, into multiple tensors
  20063. vertically according to :attr:`indices_or_sections`. Each split is a view of
  20064. :attr:`input`.
  20065. This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=0)
  20066. (the split dimension is 0), except that if :attr:`indices_or_sections` is an integer
  20067. it must evenly divide the split dimension or a runtime error will be thrown.
  20068. This function is based on NumPy's :func:`numpy.vsplit`.
  20069. Args:
  20070. input (Tensor): tensor to split.
  20071. indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
  20072. Example::
  20073. >>> t = torch.arange(16.0).reshape(4,4)
  20074. >>> t
  20075. tensor([[ 0., 1., 2., 3.],
  20076. [ 4., 5., 6., 7.],
  20077. [ 8., 9., 10., 11.],
  20078. [12., 13., 14., 15.]])
  20079. >>> torch.vsplit(t, 2)
  20080. (tensor([[0., 1., 2., 3.],
  20081. [4., 5., 6., 7.]]),
  20082. tensor([[ 8., 9., 10., 11.],
  20083. [12., 13., 14., 15.]]))
  20084. >>> torch.vsplit(t, [3, 6])
  20085. (tensor([[ 0., 1., 2., 3.],
  20086. [ 4., 5., 6., 7.],
  20087. [ 8., 9., 10., 11.]]),
  20088. tensor([[12., 13., 14., 15.]]),
  20089. tensor([], size=(0, 4)))
  20090. """
  20091. ...
  20092. def vstack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor] = None) -> Tensor:
  20093. r"""
  20094. vstack(tensors, *, out=None) -> Tensor
  20095. Stack tensors in sequence vertically (row wise).
  20096. This is equivalent to concatenation along the first axis after all 1-D tensors have been reshaped by :func:`torch.atleast_2d`.
  20097. Args:
  20098. tensors (sequence of Tensors): sequence of tensors to concatenate
  20099. Keyword args:
  20100. out (Tensor, optional): the output tensor.
  20101. Example::
  20102. >>> a = torch.tensor([1, 2, 3])
  20103. >>> b = torch.tensor([4, 5, 6])
  20104. >>> torch.vstack((a,b))
  20105. tensor([[1, 2, 3],
  20106. [4, 5, 6]])
  20107. >>> a = torch.tensor([[1],[2],[3]])
  20108. >>> b = torch.tensor([[4],[5],[6]])
  20109. >>> torch.vstack((a,b))
  20110. tensor([[1],
  20111. [2],
  20112. [3],
  20113. [4],
  20114. [5],
  20115. [6]])
  20116. """
  20117. ...
  20118. @overload
  20119. def where(condition: Tensor) -> Tuple[Tensor, ...]:
  20120. r"""
  20121. where(condition, input, other, *, out=None) -> Tensor
  20122. Return a tensor of elements selected from either :attr:`input` or :attr:`other`, depending on :attr:`condition`.
  20123. The operation is defined as:
  20124. .. math::
  20125. \text{out}_i = \begin{cases}
  20126. \text{input}_i & \text{if } \text{condition}_i \\
  20127. \text{other}_i & \text{otherwise} \\
  20128. \end{cases}
  20129. .. note::
  20130. The tensors :attr:`condition`, :attr:`input`, :attr:`other` must be :ref:`broadcastable <broadcasting-semantics>`.
  20131. Arguments:
  20132. condition (BoolTensor): When True (nonzero), yield input, otherwise yield other
  20133. input (Tensor or Scalar): value (if :attr:`input` is a scalar) or values selected at indices
  20134. where :attr:`condition` is ``True``
  20135. other (Tensor or Scalar): value (if :attr:`other` is a scalar) or values selected at indices
  20136. where :attr:`condition` is ``False``
  20137. Keyword args:
  20138. out (Tensor, optional): the output tensor.
  20139. Returns:
  20140. Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`input`, :attr:`other`
  20141. Example::
  20142. >>> x = torch.randn(3, 2)
  20143. >>> y = torch.ones(3, 2)
  20144. >>> x
  20145. tensor([[-0.4620, 0.3139],
  20146. [ 0.3898, -0.7197],
  20147. [ 0.0478, -0.1657]])
  20148. >>> torch.where(x > 0, 1.0, 0.0)
  20149. tensor([[0., 1.],
  20150. [1., 0.],
  20151. [1., 0.]])
  20152. >>> torch.where(x > 0, x, y)
  20153. tensor([[ 1.0000, 0.3139],
  20154. [ 0.3898, 1.0000],
  20155. [ 0.0478, 1.0000]])
  20156. >>> x = torch.randn(2, 2, dtype=torch.double)
  20157. >>> x
  20158. tensor([[ 1.0779, 0.0383],
  20159. [-0.8785, -1.1089]], dtype=torch.float64)
  20160. >>> torch.where(x > 0, x, 0.)
  20161. tensor([[1.0779, 0.0383],
  20162. [0.0000, 0.0000]], dtype=torch.float64)
  20163. .. function:: where(condition) -> tuple of LongTensor
  20164. :noindex:
  20165. ``torch.where(condition)`` is identical to
  20166. ``torch.nonzero(condition, as_tuple=True)``.
  20167. .. note::
  20168. See also :func:`torch.nonzero`.
  20169. """
  20170. ...
  20171. @overload
  20172. def where(condition: Tensor, input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  20173. r"""
  20174. where(condition, input, other, *, out=None) -> Tensor
  20175. Return a tensor of elements selected from either :attr:`input` or :attr:`other`, depending on :attr:`condition`.
  20176. The operation is defined as:
  20177. .. math::
  20178. \text{out}_i = \begin{cases}
  20179. \text{input}_i & \text{if } \text{condition}_i \\
  20180. \text{other}_i & \text{otherwise} \\
  20181. \end{cases}
  20182. .. note::
  20183. The tensors :attr:`condition`, :attr:`input`, :attr:`other` must be :ref:`broadcastable <broadcasting-semantics>`.
  20184. Arguments:
  20185. condition (BoolTensor): When True (nonzero), yield input, otherwise yield other
  20186. input (Tensor or Scalar): value (if :attr:`input` is a scalar) or values selected at indices
  20187. where :attr:`condition` is ``True``
  20188. other (Tensor or Scalar): value (if :attr:`other` is a scalar) or values selected at indices
  20189. where :attr:`condition` is ``False``
  20190. Keyword args:
  20191. out (Tensor, optional): the output tensor.
  20192. Returns:
  20193. Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`input`, :attr:`other`
  20194. Example::
  20195. >>> x = torch.randn(3, 2)
  20196. >>> y = torch.ones(3, 2)
  20197. >>> x
  20198. tensor([[-0.4620, 0.3139],
  20199. [ 0.3898, -0.7197],
  20200. [ 0.0478, -0.1657]])
  20201. >>> torch.where(x > 0, 1.0, 0.0)
  20202. tensor([[0., 1.],
  20203. [1., 0.],
  20204. [1., 0.]])
  20205. >>> torch.where(x > 0, x, y)
  20206. tensor([[ 1.0000, 0.3139],
  20207. [ 0.3898, 1.0000],
  20208. [ 0.0478, 1.0000]])
  20209. >>> x = torch.randn(2, 2, dtype=torch.double)
  20210. >>> x
  20211. tensor([[ 1.0779, 0.0383],
  20212. [-0.8785, -1.1089]], dtype=torch.float64)
  20213. >>> torch.where(x > 0, x, 0.)
  20214. tensor([[1.0779, 0.0383],
  20215. [0.0000, 0.0000]], dtype=torch.float64)
  20216. .. function:: where(condition) -> tuple of LongTensor
  20217. :noindex:
  20218. ``torch.where(condition)`` is identical to
  20219. ``torch.nonzero(condition, as_tuple=True)``.
  20220. .. note::
  20221. See also :func:`torch.nonzero`.
  20222. """
  20223. ...
  20224. @overload
  20225. def where(condition: Tensor, self: Union[Number, _complex], other: Tensor) -> Tensor:
  20226. r"""
  20227. where(condition, input, other, *, out=None) -> Tensor
  20228. Return a tensor of elements selected from either :attr:`input` or :attr:`other`, depending on :attr:`condition`.
  20229. The operation is defined as:
  20230. .. math::
  20231. \text{out}_i = \begin{cases}
  20232. \text{input}_i & \text{if } \text{condition}_i \\
  20233. \text{other}_i & \text{otherwise} \\
  20234. \end{cases}
  20235. .. note::
  20236. The tensors :attr:`condition`, :attr:`input`, :attr:`other` must be :ref:`broadcastable <broadcasting-semantics>`.
  20237. Arguments:
  20238. condition (BoolTensor): When True (nonzero), yield input, otherwise yield other
  20239. input (Tensor or Scalar): value (if :attr:`input` is a scalar) or values selected at indices
  20240. where :attr:`condition` is ``True``
  20241. other (Tensor or Scalar): value (if :attr:`other` is a scalar) or values selected at indices
  20242. where :attr:`condition` is ``False``
  20243. Keyword args:
  20244. out (Tensor, optional): the output tensor.
  20245. Returns:
  20246. Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`input`, :attr:`other`
  20247. Example::
  20248. >>> x = torch.randn(3, 2)
  20249. >>> y = torch.ones(3, 2)
  20250. >>> x
  20251. tensor([[-0.4620, 0.3139],
  20252. [ 0.3898, -0.7197],
  20253. [ 0.0478, -0.1657]])
  20254. >>> torch.where(x > 0, 1.0, 0.0)
  20255. tensor([[0., 1.],
  20256. [1., 0.],
  20257. [1., 0.]])
  20258. >>> torch.where(x > 0, x, y)
  20259. tensor([[ 1.0000, 0.3139],
  20260. [ 0.3898, 1.0000],
  20261. [ 0.0478, 1.0000]])
  20262. >>> x = torch.randn(2, 2, dtype=torch.double)
  20263. >>> x
  20264. tensor([[ 1.0779, 0.0383],
  20265. [-0.8785, -1.1089]], dtype=torch.float64)
  20266. >>> torch.where(x > 0, x, 0.)
  20267. tensor([[1.0779, 0.0383],
  20268. [0.0000, 0.0000]], dtype=torch.float64)
  20269. .. function:: where(condition) -> tuple of LongTensor
  20270. :noindex:
  20271. ``torch.where(condition)`` is identical to
  20272. ``torch.nonzero(condition, as_tuple=True)``.
  20273. .. note::
  20274. See also :func:`torch.nonzero`.
  20275. """
  20276. ...
  20277. @overload
  20278. def where(condition: Tensor, input: Tensor, other: Union[Number, _complex]) -> Tensor:
  20279. r"""
  20280. where(condition, input, other, *, out=None) -> Tensor
  20281. Return a tensor of elements selected from either :attr:`input` or :attr:`other`, depending on :attr:`condition`.
  20282. The operation is defined as:
  20283. .. math::
  20284. \text{out}_i = \begin{cases}
  20285. \text{input}_i & \text{if } \text{condition}_i \\
  20286. \text{other}_i & \text{otherwise} \\
  20287. \end{cases}
  20288. .. note::
  20289. The tensors :attr:`condition`, :attr:`input`, :attr:`other` must be :ref:`broadcastable <broadcasting-semantics>`.
  20290. Arguments:
  20291. condition (BoolTensor): When True (nonzero), yield input, otherwise yield other
  20292. input (Tensor or Scalar): value (if :attr:`input` is a scalar) or values selected at indices
  20293. where :attr:`condition` is ``True``
  20294. other (Tensor or Scalar): value (if :attr:`other` is a scalar) or values selected at indices
  20295. where :attr:`condition` is ``False``
  20296. Keyword args:
  20297. out (Tensor, optional): the output tensor.
  20298. Returns:
  20299. Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`input`, :attr:`other`
  20300. Example::
  20301. >>> x = torch.randn(3, 2)
  20302. >>> y = torch.ones(3, 2)
  20303. >>> x
  20304. tensor([[-0.4620, 0.3139],
  20305. [ 0.3898, -0.7197],
  20306. [ 0.0478, -0.1657]])
  20307. >>> torch.where(x > 0, 1.0, 0.0)
  20308. tensor([[0., 1.],
  20309. [1., 0.],
  20310. [1., 0.]])
  20311. >>> torch.where(x > 0, x, y)
  20312. tensor([[ 1.0000, 0.3139],
  20313. [ 0.3898, 1.0000],
  20314. [ 0.0478, 1.0000]])
  20315. >>> x = torch.randn(2, 2, dtype=torch.double)
  20316. >>> x
  20317. tensor([[ 1.0779, 0.0383],
  20318. [-0.8785, -1.1089]], dtype=torch.float64)
  20319. >>> torch.where(x > 0, x, 0.)
  20320. tensor([[1.0779, 0.0383],
  20321. [0.0000, 0.0000]], dtype=torch.float64)
  20322. .. function:: where(condition) -> tuple of LongTensor
  20323. :noindex:
  20324. ``torch.where(condition)`` is identical to
  20325. ``torch.nonzero(condition, as_tuple=True)``.
  20326. .. note::
  20327. See also :func:`torch.nonzero`.
  20328. """
  20329. ...
  20330. @overload
  20331. def where(condition: Tensor, self: Union[Number, _complex], other: Union[Number, _complex]) -> Tensor:
  20332. r"""
  20333. where(condition, input, other, *, out=None) -> Tensor
  20334. Return a tensor of elements selected from either :attr:`input` or :attr:`other`, depending on :attr:`condition`.
  20335. The operation is defined as:
  20336. .. math::
  20337. \text{out}_i = \begin{cases}
  20338. \text{input}_i & \text{if } \text{condition}_i \\
  20339. \text{other}_i & \text{otherwise} \\
  20340. \end{cases}
  20341. .. note::
  20342. The tensors :attr:`condition`, :attr:`input`, :attr:`other` must be :ref:`broadcastable <broadcasting-semantics>`.
  20343. Arguments:
  20344. condition (BoolTensor): When True (nonzero), yield input, otherwise yield other
  20345. input (Tensor or Scalar): value (if :attr:`input` is a scalar) or values selected at indices
  20346. where :attr:`condition` is ``True``
  20347. other (Tensor or Scalar): value (if :attr:`other` is a scalar) or values selected at indices
  20348. where :attr:`condition` is ``False``
  20349. Keyword args:
  20350. out (Tensor, optional): the output tensor.
  20351. Returns:
  20352. Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`input`, :attr:`other`
  20353. Example::
  20354. >>> x = torch.randn(3, 2)
  20355. >>> y = torch.ones(3, 2)
  20356. >>> x
  20357. tensor([[-0.4620, 0.3139],
  20358. [ 0.3898, -0.7197],
  20359. [ 0.0478, -0.1657]])
  20360. >>> torch.where(x > 0, 1.0, 0.0)
  20361. tensor([[0., 1.],
  20362. [1., 0.],
  20363. [1., 0.]])
  20364. >>> torch.where(x > 0, x, y)
  20365. tensor([[ 1.0000, 0.3139],
  20366. [ 0.3898, 1.0000],
  20367. [ 0.0478, 1.0000]])
  20368. >>> x = torch.randn(2, 2, dtype=torch.double)
  20369. >>> x
  20370. tensor([[ 1.0779, 0.0383],
  20371. [-0.8785, -1.1089]], dtype=torch.float64)
  20372. >>> torch.where(x > 0, x, 0.)
  20373. tensor([[1.0779, 0.0383],
  20374. [0.0000, 0.0000]], dtype=torch.float64)
  20375. .. function:: where(condition) -> tuple of LongTensor
  20376. :noindex:
  20377. ``torch.where(condition)`` is identical to
  20378. ``torch.nonzero(condition, as_tuple=True)``.
  20379. .. note::
  20380. See also :func:`torch.nonzero`.
  20381. """
  20382. ...
  20383. @overload
  20384. def xlogy(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  20385. r"""
  20386. xlogy(input, other, *, out=None) -> Tensor
  20387. Alias for :func:`torch.special.xlogy`.
  20388. """
  20389. ...
  20390. @overload
  20391. def xlogy(self: Union[Number, _complex], other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
  20392. r"""
  20393. xlogy(input, other, *, out=None) -> Tensor
  20394. Alias for :func:`torch.special.xlogy`.
  20395. """
  20396. ...
  20397. @overload
  20398. def xlogy(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
  20399. r"""
  20400. xlogy(input, other, *, out=None) -> Tensor
  20401. Alias for :func:`torch.special.xlogy`.
  20402. """
  20403. ...
  20404. @overload
  20405. def xlogy_(input: Tensor, other: Tensor) -> Tensor: ...
  20406. @overload
  20407. def xlogy_(input: Tensor, other: Union[Number, _complex]) -> Tensor: ...
  20408. def zero_(input: Tensor) -> Tensor: ...
  20409. @overload
  20410. def zeros(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  20411. r"""
  20412. zeros(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  20413. Returns a tensor filled with the scalar value `0`, with the shape defined
  20414. by the variable argument :attr:`size`.
  20415. Args:
  20416. size (int...): a sequence of integers defining the shape of the output tensor.
  20417. Can be a variable number of arguments or a collection like a list or tuple.
  20418. Keyword args:
  20419. out (Tensor, optional): the output tensor.
  20420. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  20421. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  20422. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  20423. Default: ``torch.strided``.
  20424. device (:class:`torch.device`, optional): the desired device of returned tensor.
  20425. Default: if ``None``, uses the current device for the default tensor type
  20426. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  20427. for CPU tensor types and the current CUDA device for CUDA tensor types.
  20428. requires_grad (bool, optional): If autograd should record operations on the
  20429. returned tensor. Default: ``False``.
  20430. Example::
  20431. >>> torch.zeros(2, 3)
  20432. tensor([[ 0., 0., 0.],
  20433. [ 0., 0., 0.]])
  20434. >>> torch.zeros(5)
  20435. tensor([ 0., 0., 0., 0., 0.])
  20436. """
  20437. ...
  20438. @overload
  20439. def zeros(*size: _int, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  20440. r"""
  20441. zeros(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  20442. Returns a tensor filled with the scalar value `0`, with the shape defined
  20443. by the variable argument :attr:`size`.
  20444. Args:
  20445. size (int...): a sequence of integers defining the shape of the output tensor.
  20446. Can be a variable number of arguments or a collection like a list or tuple.
  20447. Keyword args:
  20448. out (Tensor, optional): the output tensor.
  20449. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  20450. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  20451. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  20452. Default: ``torch.strided``.
  20453. device (:class:`torch.device`, optional): the desired device of returned tensor.
  20454. Default: if ``None``, uses the current device for the default tensor type
  20455. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  20456. for CPU tensor types and the current CUDA device for CUDA tensor types.
  20457. requires_grad (bool, optional): If autograd should record operations on the
  20458. returned tensor. Default: ``False``.
  20459. Example::
  20460. >>> torch.zeros(2, 3)
  20461. tensor([[ 0., 0., 0.],
  20462. [ 0., 0., 0.]])
  20463. >>> torch.zeros(5)
  20464. tensor([ 0., 0., 0., 0., 0.])
  20465. """
  20466. ...
  20467. @overload
  20468. def zeros(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  20469. r"""
  20470. zeros(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  20471. Returns a tensor filled with the scalar value `0`, with the shape defined
  20472. by the variable argument :attr:`size`.
  20473. Args:
  20474. size (int...): a sequence of integers defining the shape of the output tensor.
  20475. Can be a variable number of arguments or a collection like a list or tuple.
  20476. Keyword args:
  20477. out (Tensor, optional): the output tensor.
  20478. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  20479. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  20480. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  20481. Default: ``torch.strided``.
  20482. device (:class:`torch.device`, optional): the desired device of returned tensor.
  20483. Default: if ``None``, uses the current device for the default tensor type
  20484. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  20485. for CPU tensor types and the current CUDA device for CUDA tensor types.
  20486. requires_grad (bool, optional): If autograd should record operations on the
  20487. returned tensor. Default: ``False``.
  20488. Example::
  20489. >>> torch.zeros(2, 3)
  20490. tensor([[ 0., 0., 0.],
  20491. [ 0., 0., 0.]])
  20492. >>> torch.zeros(5)
  20493. tensor([ 0., 0., 0., 0., 0.])
  20494. """
  20495. ...
  20496. @overload
  20497. def zeros(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  20498. r"""
  20499. zeros(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  20500. Returns a tensor filled with the scalar value `0`, with the shape defined
  20501. by the variable argument :attr:`size`.
  20502. Args:
  20503. size (int...): a sequence of integers defining the shape of the output tensor.
  20504. Can be a variable number of arguments or a collection like a list or tuple.
  20505. Keyword args:
  20506. out (Tensor, optional): the output tensor.
  20507. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  20508. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  20509. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  20510. Default: ``torch.strided``.
  20511. device (:class:`torch.device`, optional): the desired device of returned tensor.
  20512. Default: if ``None``, uses the current device for the default tensor type
  20513. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  20514. for CPU tensor types and the current CUDA device for CUDA tensor types.
  20515. requires_grad (bool, optional): If autograd should record operations on the
  20516. returned tensor. Default: ``False``.
  20517. Example::
  20518. >>> torch.zeros(2, 3)
  20519. tensor([[ 0., 0., 0.],
  20520. [ 0., 0., 0.]])
  20521. >>> torch.zeros(5)
  20522. tensor([ 0., 0., 0., 0., 0.])
  20523. """
  20524. ...
  20525. def zeros_like(input: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  20526. r"""
  20527. zeros_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
  20528. Returns a tensor filled with the scalar value `0`, with the same size as
  20529. :attr:`input`. ``torch.zeros_like(input)`` is equivalent to
  20530. ``torch.zeros(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
  20531. .. warning::
  20532. As of 0.4, this function does not support an :attr:`out` keyword. As an alternative,
  20533. the old ``torch.zeros_like(input, out=output)`` is equivalent to
  20534. ``torch.zeros(input.size(), out=output)``.
  20535. Args:
  20536. input (Tensor): the size of :attr:`input` will determine size of the output tensor.
  20537. Keyword args:
  20538. dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
  20539. Default: if ``None``, defaults to the dtype of :attr:`input`.
  20540. layout (:class:`torch.layout`, optional): the desired layout of returned tensor.
  20541. Default: if ``None``, defaults to the layout of :attr:`input`.
  20542. device (:class:`torch.device`, optional): the desired device of returned tensor.
  20543. Default: if ``None``, defaults to the device of :attr:`input`.
  20544. requires_grad (bool, optional): If autograd should record operations on the
  20545. returned tensor. Default: ``False``.
  20546. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  20547. returned Tensor. Default: ``torch.preserve_format``.
  20548. Example::
  20549. >>> input = torch.empty(2, 3)
  20550. >>> torch.zeros_like(input)
  20551. tensor([[ 0., 0., 0.],
  20552. [ 0., 0., 0.]])
  20553. """
  20554. ...
  20555. __all__ = ['__and__', '__lshift__', '__or__', '__rshift__', '__xor__', '_adaptive_avg_pool2d',
  20556. '_adaptive_avg_pool3d', '_add_batch_dim', '_add_relu', '_add_relu_', '_addmm_activation',
  20557. '_aminmax', '_amp_foreach_non_finite_check_and_unscale_', '_amp_update_scale_', '_assert_async',
  20558. '_assert_scalar', '_assert_tensor_metadata', '_batch_norm_impl_index', '_cast_Byte', '_cast_Char',
  20559. '_cast_Double', '_cast_Float', '_cast_Half', '_cast_Int', '_cast_Long', '_cast_Short',
  20560. '_choose_qparams_per_tensor', '_chunk_cat', '_coalesce', '_compute_linear_combination', '_conj',
  20561. '_conj_copy', '_conj_physical', '_convert_indices_from_coo_to_csr',
  20562. '_convert_indices_from_csr_to_coo', '_convert_weight_to_int4pack', '_convolution',
  20563. '_convolution_mode', '_copy_from', '_copy_from_and_resize', '_cslt_compress', '_cslt_sparse_mm',
  20564. '_cslt_sparse_mm_search', '_ctc_loss', '_cudnn_ctc_loss', '_cudnn_init_dropout_state',
  20565. '_cudnn_rnn', '_cudnn_rnn_flatten_weight', '_cufft_clear_plan_cache',
  20566. '_cufft_get_plan_cache_max_size', '_cufft_get_plan_cache_size', '_cufft_set_plan_cache_max_size',
  20567. '_cummax_helper', '_cummin_helper', '_debug_has_internal_overlap', '_dim_arange',
  20568. '_dirichlet_grad', '_disable_functionalization', '_efficientzerotensor', '_embedding_bag',
  20569. '_embedding_bag_forward_only', '_empty_affine_quantized', '_empty_per_channel_affine_quantized',
  20570. '_enable_functionalization', '_euclidean_dist', '_fake_quantize_learnable_per_channel_affine',
  20571. '_fake_quantize_learnable_per_tensor_affine',
  20572. '_fake_quantize_per_tensor_affine_cachemask_tensor_qparams',
  20573. '_fake_quantize_per_tensor_affine_cachemask_tensor_qparams', '_fft_c2c', '_fft_c2r', '_fft_r2c',
  20574. '_fill_mem_eff_dropout_mask_', '_foobar', '_foreach_abs', '_foreach_abs_', '_foreach_acos',
  20575. '_foreach_acos_', '_foreach_add', '_foreach_add_', '_foreach_addcdiv', '_foreach_addcdiv_',
  20576. '_foreach_addcmul', '_foreach_addcmul_', '_foreach_asin', '_foreach_asin_', '_foreach_atan',
  20577. '_foreach_atan_', '_foreach_ceil', '_foreach_ceil_', '_foreach_clamp_max', '_foreach_clamp_max_',
  20578. '_foreach_clamp_min', '_foreach_clamp_min_', '_foreach_copy_', '_foreach_cos', '_foreach_cos_',
  20579. '_foreach_cosh', '_foreach_cosh_', '_foreach_div', '_foreach_div_', '_foreach_erf',
  20580. '_foreach_erf_', '_foreach_erfc', '_foreach_erfc_', '_foreach_exp', '_foreach_exp_',
  20581. '_foreach_expm1', '_foreach_expm1_', '_foreach_floor', '_foreach_floor_', '_foreach_frac',
  20582. '_foreach_frac_', '_foreach_lerp', '_foreach_lerp_', '_foreach_lgamma', '_foreach_lgamma_',
  20583. '_foreach_log', '_foreach_log10', '_foreach_log10_', '_foreach_log1p', '_foreach_log1p_',
  20584. '_foreach_log2', '_foreach_log2_', '_foreach_log_', '_foreach_max', '_foreach_maximum',
  20585. '_foreach_maximum_', '_foreach_minimum', '_foreach_minimum_', '_foreach_mul', '_foreach_mul_',
  20586. '_foreach_neg', '_foreach_neg_', '_foreach_norm', '_foreach_pow', '_foreach_pow_',
  20587. '_foreach_reciprocal', '_foreach_reciprocal_', '_foreach_round', '_foreach_round_',
  20588. '_foreach_sigmoid', '_foreach_sigmoid_', '_foreach_sign', '_foreach_sign_', '_foreach_sin',
  20589. '_foreach_sin_', '_foreach_sinh', '_foreach_sinh_', '_foreach_sqrt', '_foreach_sqrt_',
  20590. '_foreach_sub', '_foreach_sub_', '_foreach_tan', '_foreach_tan_', '_foreach_tanh',
  20591. '_foreach_tanh_', '_foreach_trunc', '_foreach_trunc_', '_foreach_zero_', '_from_functional_tensor',
  20592. '_functional_assert_async', '_functional_assert_scalar', '_functional_sym_constrain_range',
  20593. '_functional_sym_constrain_range_for_size', '_functionalize_apply_view_metas',
  20594. '_functionalize_are_all_mutations_hidden_from_autograd',
  20595. '_functionalize_are_all_mutations_under_no_grad_or_inference_mode', '_functionalize_commit_update',
  20596. '_functionalize_has_metadata_mutation', '_functionalize_is_symbolic',
  20597. '_functionalize_mark_mutation_hidden_from_autograd', '_functionalize_replace',
  20598. '_functionalize_sync', '_functionalize_was_inductor_storage_resized',
  20599. '_functionalize_was_storage_changed', '_fused_adagrad_', '_fused_adam_', '_fused_adamw_',
  20600. '_fused_dropout', '_fused_moving_avg_obs_fq_helper', '_fused_moving_avg_obs_fq_helper',
  20601. '_fused_sdp_choice', '_fused_sgd_', '_fw_primal_copy', '_grid_sampler_2d_cpu_fallback',
  20602. '_has_compatible_shallow_copy_type', '_histogramdd_bin_edges', '_histogramdd_from_bin_cts',
  20603. '_histogramdd_from_bin_tensors', '_index_put_impl_', '_indices_copy', '_int_mm', '_is_all_true',
  20604. '_is_any_true', '_is_functional_tensor', '_is_zerotensor', '_lazy_clone', '_linalg_check_errors',
  20605. '_linalg_det', '_linalg_det', '_linalg_eigh', '_linalg_eigh', '_linalg_slogdet', '_linalg_slogdet',
  20606. '_linalg_solve_ex', '_linalg_solve_ex', '_linalg_svd', '_linalg_svd', '_log_softmax',
  20607. '_log_softmax_backward_data', '_logcumsumexp', '_lstm_mps', '_lu_with_info', '_lu_with_info',
  20608. '_make_dep_token', '_make_dual', '_make_dual_copy', '_make_per_channel_quantized_tensor',
  20609. '_make_per_tensor_quantized_tensor', '_masked_scale', '_masked_softmax', '_mixed_dtypes_linear',
  20610. '_mkldnn_reshape', '_mkldnn_transpose', '_mkldnn_transpose_', '_mps_convolution',
  20611. '_mps_convolution_transpose', '_native_batch_norm_legit', '_native_batch_norm_legit_no_training',
  20612. '_native_multi_head_attention', '_neg_view', '_neg_view_copy',
  20613. '_nested_compute_contiguous_strides_offsets', '_nested_from_padded',
  20614. '_nested_from_padded_and_nested_example', '_nested_get_jagged_dummy', '_nested_get_lengths',
  20615. '_nested_get_offsets', '_nested_get_ragged_idx', '_nested_get_values', '_nested_get_values_copy',
  20616. '_nested_tensor_from_mask', '_nested_tensor_from_mask_left_aligned',
  20617. '_nested_tensor_from_tensor_list', '_nested_tensor_softmax_with_shape', '_nested_view_from_buffer',
  20618. '_nested_view_from_buffer_copy', '_nested_view_from_jagged', '_nested_view_from_jagged_copy',
  20619. '_nnpack_available', '_nnpack_spatial_convolution', '_pack_padded_sequence',
  20620. '_pad_packed_sequence', '_pin_memory', '_prelu_kernel', '_print', '_propagate_xla_data',
  20621. '_remove_batch_dim', '_reshape_alias_copy', '_reshape_from_tensor', '_resize_output_',
  20622. '_rowwise_prune', '_sample_dirichlet', '_saturate_weight_to_fp16',
  20623. '_scaled_dot_product_attention_math', '_scaled_dot_product_cudnn_attention',
  20624. '_scaled_dot_product_cudnn_attention', '_scaled_dot_product_efficient_attention',
  20625. '_scaled_dot_product_efficient_attention', '_scaled_dot_product_flash_attention',
  20626. '_scaled_dot_product_flash_attention', '_scaled_dot_product_flash_attention_for_cpu',
  20627. '_scaled_dot_product_flash_attention_for_cpu', '_scaled_mm', '_shape_as_tensor',
  20628. '_sobol_engine_draw', '_sobol_engine_ff_', '_sobol_engine_initialize_state_',
  20629. '_sobol_engine_scramble_', '_softmax', '_softmax_backward_data', '_sparse_broadcast_to',
  20630. '_sparse_broadcast_to_copy', '_sparse_csr_prod', '_sparse_csr_sum',
  20631. '_sparse_log_softmax_backward_data', '_sparse_semi_structured_addmm',
  20632. '_sparse_semi_structured_apply', '_sparse_semi_structured_apply_dense',
  20633. '_sparse_semi_structured_linear', '_sparse_semi_structured_mm', '_sparse_semi_structured_tile',
  20634. '_sparse_softmax_backward_data', '_sparse_sparse_matmul', '_sparse_sum', '_stack',
  20635. '_standard_gamma', '_standard_gamma_grad', '_sync', '_test_autograd_multiple_dispatch',
  20636. '_test_autograd_multiple_dispatch_view', '_test_autograd_multiple_dispatch_view_copy',
  20637. '_test_check_tensor', '_test_functorch_fallback', '_test_parallel_materialize',
  20638. '_test_serialization_subcmul', '_to_cpu', '_to_functional_tensor', '_to_sparse_semi_structured',
  20639. '_transform_bias_rescale_qkv', '_transformer_encoder_layer_fwd', '_trilinear',
  20640. '_triton_multi_head_attention', '_triton_scaled_dot_attention', '_unique', '_unique2',
  20641. '_unpack_dual', '_unpack_dual', '_unsafe_index', '_unsafe_index_put', '_use_cudnn_ctc_loss',
  20642. '_use_cudnn_rnn_flatten_weight', '_validate_compressed_sparse_indices',
  20643. '_validate_sparse_bsc_tensor_args', '_validate_sparse_bsr_tensor_args',
  20644. '_validate_sparse_compressed_tensor_args', '_validate_sparse_coo_tensor_args',
  20645. '_validate_sparse_csc_tensor_args', '_validate_sparse_csr_tensor_args', '_values_copy',
  20646. '_weight_int4pack_mm', '_weight_int8pack_mm', '_weight_norm', '_weight_norm_interface', 'abs',
  20647. 'abs_', 'absolute', 'acos', 'acos_', 'acosh', 'acosh_', 'adaptive_avg_pool1d',
  20648. 'adaptive_max_pool1d', 'add', 'addbmm', 'addcdiv', 'addcmul', 'addmm', 'addmv', 'addmv_', 'addr',
  20649. 'adjoint', 'affine_grid_generator', 'alias_copy', 'all', 'allclose', 'alpha_dropout',
  20650. 'alpha_dropout_', 'amax', 'amin', 'aminmax', 'aminmax', 'angle', 'any', 'arange', 'arccos',
  20651. 'arccos_', 'arccosh', 'arccosh_', 'arcsin', 'arcsin_', 'arcsinh', 'arcsinh_', 'arctan', 'arctan2',
  20652. 'arctan_', 'arctanh', 'arctanh_', 'argmax', 'argmin', 'argsort', 'argwhere', 'as_strided',
  20653. 'as_strided_', 'as_strided_copy', 'as_strided_scatter', 'as_tensor', 'asarray', 'asin', 'asin_',
  20654. 'asinh', 'asinh_', 'atan', 'atan2', 'atan_', 'atanh', 'atanh_', 'avg_pool1d', 'baddbmm',
  20655. 'bartlett_window', 'batch_norm', 'batch_norm_backward_elemt', 'batch_norm_backward_reduce',
  20656. 'batch_norm_elemt', 'batch_norm_gather_stats', 'batch_norm_gather_stats_with_counts',
  20657. 'batch_norm_stats', 'batch_norm_update_stats', 'bernoulli', 'bilinear',
  20658. 'binary_cross_entropy_with_logits', 'bincount', 'binomial', 'bitwise_and', 'bitwise_left_shift',
  20659. 'bitwise_not', 'bitwise_or', 'bitwise_right_shift', 'bitwise_xor', 'blackman_window', 'bmm',
  20660. 'broadcast_to', 'bucketize', 'can_cast', 'cat', 'ccol_indices_copy', 'ceil', 'ceil_', 'celu',
  20661. 'celu_', 'channel_shuffle', 'cholesky', 'cholesky_inverse', 'cholesky_solve',
  20662. 'choose_qparams_optimized', 'chunk', 'clamp', 'clamp_', 'clamp_max', 'clamp_max_', 'clamp_min',
  20663. 'clamp_min_', 'clip', 'clip_', 'clone', 'col_indices_copy', 'column_stack', 'combinations',
  20664. 'complex', 'concat', 'concatenate', 'conj', 'conj_physical', 'conj_physical_', 'constant_pad_nd',
  20665. 'conv1d', 'conv2d', 'conv3d', 'conv_tbc', 'conv_transpose1d', 'conv_transpose2d',
  20666. 'conv_transpose3d', 'convolution', 'copysign', 'corrcoef', 'cos', 'cos_', 'cosh', 'cosh_',
  20667. 'cosine_embedding_loss', 'cosine_similarity', 'count_nonzero', 'cov', 'cross', 'crow_indices_copy',
  20668. 'ctc_loss', 'cudnn_affine_grid_generator', 'cudnn_batch_norm', 'cudnn_convolution',
  20669. 'cudnn_convolution_add_relu', 'cudnn_convolution_relu', 'cudnn_convolution_transpose',
  20670. 'cudnn_grid_sampler', 'cudnn_is_acceptable', 'cummax', 'cummax', 'cummin', 'cummin', 'cumprod',
  20671. 'cumsum', 'cumulative_trapezoid', 'deg2rad', 'deg2rad_', 'dequantize', 'det', 'detach', 'detach_',
  20672. 'detach_copy', 'diag', 'diag_embed', 'diagflat', 'diagonal', 'diagonal_copy', 'diagonal_scatter',
  20673. 'diff', 'digamma', 'dist', 'div', 'divide', 'dot', 'dropout', 'dropout_', 'dsmm', 'dsplit',
  20674. 'dstack', 'embedding', 'embedding_bag', 'embedding_renorm_', 'empty', 'empty_like',
  20675. 'empty_permuted', 'empty_quantized', 'empty_strided', 'eq', 'equal', 'erf', 'erf_', 'erfc',
  20676. 'erfc_', 'erfinv', 'exp', 'exp2', 'exp2_', 'exp_', 'expand_copy', 'expm1', 'expm1_', 'eye',
  20677. 'fake_quantize_per_channel_affine', 'fake_quantize_per_tensor_affine', 'fbgemm_linear_fp16_weight',
  20678. 'fbgemm_linear_fp16_weight_fp32_activation', 'fbgemm_linear_int8_weight',
  20679. 'fbgemm_linear_int8_weight_fp32_activation', 'fbgemm_linear_quantize_weight',
  20680. 'fbgemm_pack_gemm_matrix_fp16', 'fbgemm_pack_quantized_matrix', 'feature_alpha_dropout',
  20681. 'feature_alpha_dropout_', 'feature_dropout', 'feature_dropout_', 'fill', 'fill_', 'fix', 'fix_',
  20682. 'flatten', 'flip', 'fliplr', 'flipud', 'float_power', 'floor', 'floor_', 'floor_divide', 'fmax',
  20683. 'fmin', 'fmod', 'frac', 'frac_', 'frexp', 'frexp', 'frobenius_norm', 'from_file', 'from_numpy',
  20684. 'frombuffer', 'full', 'full_like', 'fused_moving_avg_obs_fake_quant', 'gather', 'gcd', 'gcd_',
  20685. 'ge', 'geqrf', 'geqrf', 'ger', 'get_default_dtype', 'get_num_interop_threads', 'get_num_threads',
  20686. 'gradient', 'greater', 'greater_equal', 'grid_sampler', 'grid_sampler_2d', 'grid_sampler_3d',
  20687. 'group_norm', 'gru', 'gru_cell', 'gt', 'hamming_window', 'hann_window', 'hardshrink', 'heaviside',
  20688. 'hinge_embedding_loss', 'histc', 'histogram', 'histogram', 'histogramdd', 'histogramdd', 'hsmm',
  20689. 'hsplit', 'hspmm', 'hstack', 'hypot', 'i0', 'i0_', 'igamma', 'igammac', 'imag', 'index_add',
  20690. 'index_copy', 'index_fill', 'index_put', 'index_put_', 'index_reduce', 'index_select',
  20691. 'indices_copy', 'init_num_threads', 'inner', 'instance_norm', 'int_repr', 'inverse', 'is_complex',
  20692. 'is_conj', 'is_distributed', 'is_floating_point', 'is_grad_enabled', 'is_inference',
  20693. 'is_inference_mode_enabled', 'is_neg', 'is_nonzero', 'is_same_size', 'is_signed',
  20694. 'is_vulkan_available', 'isclose', 'isfinite', 'isin', 'isinf', 'isnan', 'isneginf', 'isposinf',
  20695. 'isreal', 'istft', 'kaiser_window', 'kl_div', 'kron', 'kthvalue', 'kthvalue', 'layer_norm', 'lcm',
  20696. 'lcm_', 'ldexp', 'ldexp_', 'le', 'lerp', 'less', 'less_equal', 'lgamma', 'linspace', 'log',
  20697. 'log10', 'log10_', 'log1p', 'log1p_', 'log2', 'log2_', 'log_', 'log_softmax', 'logaddexp',
  20698. 'logaddexp2', 'logcumsumexp', 'logdet', 'logical_and', 'logical_not', 'logical_or', 'logical_xor',
  20699. 'logit', 'logit_', 'logspace', 'logsumexp', 'lstm', 'lstm_cell', 'lt', 'lu_solve', 'lu_unpack',
  20700. 'lu_unpack', 'margin_ranking_loss', 'masked_fill', 'masked_scatter', 'masked_select', 'matmul',
  20701. 'matrix_exp', 'matrix_power', 'max', 'max', 'max_pool1d', 'max_pool1d_with_indices', 'max_pool2d',
  20702. 'max_pool3d', 'maximum', 'mean', 'median', 'median', 'min', 'min', 'minimum', 'miopen_batch_norm',
  20703. 'miopen_convolution', 'miopen_convolution_add_relu', 'miopen_convolution_relu',
  20704. 'miopen_convolution_transpose', 'miopen_depthwise_convolution', 'miopen_rnn',
  20705. 'mkldnn_adaptive_avg_pool2d', 'mkldnn_convolution', 'mkldnn_linear_backward_weights',
  20706. 'mkldnn_max_pool2d', 'mkldnn_max_pool3d', 'mkldnn_rnn_layer', 'mm', 'mode', 'mode', 'moveaxis',
  20707. 'movedim', 'msort', 'mul', 'multinomial', 'multiply', 'mv', 'mvlgamma', 'nan_to_num',
  20708. 'nan_to_num_', 'nanmean', 'nanmedian', 'nanmedian', 'nanquantile', 'nansum', 'narrow',
  20709. 'narrow_copy', 'native_batch_norm', 'native_channel_shuffle', 'native_dropout',
  20710. 'native_group_norm', 'native_layer_norm', 'native_norm', 'ne', 'neg', 'neg_', 'negative',
  20711. 'negative_', 'nextafter', 'nonzero', 'nonzero_static', 'norm_except_dim', 'normal', 'not_equal',
  20712. 'nuclear_norm', 'numel', 'ones', 'ones_like', 'orgqr', 'ormqr', 'outer', 'pairwise_distance',
  20713. 'pdist', 'permute', 'permute_copy', 'pinverse', 'pixel_shuffle', 'pixel_unshuffle', 'poisson',
  20714. 'poisson_nll_loss', 'polar', 'polygamma', 'positive', 'pow', 'prelu', 'prod', 'promote_types',
  20715. 'put', 'q_per_channel_axis', 'q_per_channel_scales', 'q_per_channel_zero_points', 'q_scale',
  20716. 'q_zero_point', 'qr', 'qr', 'quantile', 'quantize_per_channel', 'quantize_per_tensor',
  20717. 'quantize_per_tensor_dynamic', 'quantized_batch_norm', 'quantized_gru_cell', 'quantized_lstm_cell',
  20718. 'quantized_max_pool1d', 'quantized_max_pool2d', 'quantized_max_pool3d', 'quantized_rnn_relu_cell',
  20719. 'quantized_rnn_tanh_cell', 'rad2deg', 'rad2deg_', 'rand', 'rand_like', 'randint', 'randint_like',
  20720. 'randn', 'randn_like', 'randperm', 'range', 'ravel', 'real', 'reciprocal', 'reciprocal_', 'relu',
  20721. 'relu_', 'remainder', 'renorm', 'repeat_interleave', 'reshape', 'resize_as_', 'resize_as_sparse_',
  20722. 'resolve_conj', 'resolve_neg', 'result_type', 'rms_norm', 'rnn_relu', 'rnn_relu_cell', 'rnn_tanh',
  20723. 'rnn_tanh_cell', 'roll', 'rot90', 'round', 'round_', 'row_indices_copy', 'row_stack', 'rrelu',
  20724. 'rrelu_', 'rsqrt', 'rsqrt_', 'rsub', 'saddmm', 'scalar_tensor', 'scatter', 'scatter_add',
  20725. 'scatter_reduce', 'searchsorted', 'segment_reduce', 'select', 'select_copy', 'select_scatter',
  20726. 'selu', 'selu_', 'set_flush_denormal', 'set_num_interop_threads', 'set_num_threads', 'sgn',
  20727. 'sigmoid', 'sigmoid_', 'sign', 'signbit', 'sin', 'sin_', 'sinc', 'sinc_', 'sinh', 'sinh_',
  20728. 'slice_copy', 'slice_inverse', 'slice_scatter', 'slogdet', 'slogdet', 'smm', 'softmax', 'sort',
  20729. 'sort', 'sparse_bsc_tensor', 'sparse_bsr_tensor', 'sparse_compressed_tensor', 'sparse_coo_tensor',
  20730. 'sparse_csc_tensor', 'sparse_csr_tensor', 'split_copy', 'split_with_sizes',
  20731. 'split_with_sizes_copy', 'spmm', 'sqrt', 'sqrt_', 'square', 'square_', 'squeeze', 'squeeze_copy',
  20732. 'sspaddmm', 'stack', 'std', 'std_mean', 'sub', 'subtract', 'sum', 'svd', 'svd', 'swapaxes',
  20733. 'swapdims', 'sym_constrain_range', 'sym_constrain_range_for_size', 't', 't_copy', 'take',
  20734. 'take_along_dim', 'tan', 'tan_', 'tanh', 'tanh_', 'tensor', 'tensor_split', 'threshold',
  20735. 'threshold_', 'tile', 'topk', 'topk', 'trace', 'transpose', 'transpose_copy', 'trapezoid', 'trapz',
  20736. 'triangular_solve', 'triangular_solve', 'tril', 'tril_indices', 'triplet_margin_loss', 'triu',
  20737. 'triu_indices', 'true_divide', 'trunc', 'trunc_', 'unbind', 'unbind_copy', 'unflatten',
  20738. 'unfold_copy', 'unique_dim', 'unsafe_chunk', 'unsafe_split', 'unsafe_split_with_sizes',
  20739. 'unsqueeze', 'unsqueeze_copy', 'values_copy', 'vander', 'var', 'var_mean', 'vdot',
  20740. 'view_as_complex', 'view_as_complex_copy', 'view_as_real', 'view_as_real_copy', 'view_copy',
  20741. 'vsplit', 'vstack', 'where', 'xlogy', 'xlogy_', 'zero_', 'zeros', 'zeros_like']