__init__.pyi 394 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149
  1. # @generated from torch/_C/__init__.pyi.in
  2. # mypy: disable-error-code="type-arg"
  3. # mypy: allow-untyped-defs
  4. import builtins
  5. from enum import Enum, IntEnum
  6. from pathlib import Path
  7. from typing import (
  8. Any,
  9. AnyStr,
  10. BinaryIO,
  11. Callable,
  12. ContextManager,
  13. Dict,
  14. Generic,
  15. Iterable,
  16. Iterator,
  17. List,
  18. Literal,
  19. NamedTuple,
  20. Optional,
  21. Protocol,
  22. Sequence,
  23. Set,
  24. SupportsIndex,
  25. Tuple,
  26. Type,
  27. TypeVar,
  28. Union,
  29. overload,
  30. runtime_checkable,
  31. )
  32. from typing_extensions import ParamSpec, Self
  33. import numpy
  34. import torch
  35. from torch import inf, SymInt, Tensor
  36. from torch.autograd.graph import Node as _Node
  37. from torch.package import PackageExporter
  38. from torch.storage import UntypedStorage, TypedStorage
  39. from torch.types import (
  40. _bool,
  41. _complex,
  42. _device,
  43. _dispatchkey,
  44. _dtype,
  45. _float,
  46. _int,
  47. _layout,
  48. _qscheme,
  49. _size,
  50. Device,
  51. Number,
  52. Storage,
  53. )
  54. from torch._prims_common import DeviceLikeType
  55. from torch.utils._python_dispatch import TorchDispatchMode
  56. # This module is defined in torch/csrc/Module.cpp
  57. from . import _functorch, _lazy, _lazy_ts_backend, _nn, _onnx, _VariableFunctions, _cpu, _aoti, _verbose
  58. K = TypeVar("K")
  59. T = TypeVar("T")
  60. S = TypeVar("S", bound="torch.Tensor")
  61. P = ParamSpec("P")
  62. ReturnVal = TypeVar("ReturnVal", covariant=True) # return value (always covariant)
  63. _T_co = TypeVar("_T_co", covariant=True)
  64. @runtime_checkable
  65. class _NestedSequence(Protocol[_T_co]):
  66. """A protocol for representing nested sequences.
  67. References::
  68. `numpy._typing._NestedSequence`
  69. <https://github.com/numpy/numpy/blob/main/numpy/_typing/_nested_sequence.py>
  70. """
  71. def __len__(self, /) -> builtins.int: ...
  72. def __getitem__(self, index: builtins.int, /) -> _T_co | _NestedSequence[_T_co]: ...
  73. def __contains__(self, x: builtins.object, /) -> builtins.bool: ...
  74. def __iter__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]: ...
  75. def __reversed__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]: ...
  76. def count(self, value: Any, /) -> builtins.int: ...
  77. def index(self, value: Any, /) -> builtins.int: ...
  78. # Defined in torch/csrc/Device.cpp
  79. class device:
  80. type: str # THPDevice_type
  81. index: _int # THPDevice_index
  82. def __get__(self, instance, owner=None) -> device: ...
  83. # THPDevice_pynew
  84. @overload
  85. def __init__(self, device: DeviceLikeType) -> None: ...
  86. @overload
  87. def __init__(self, type: str, index: _int) -> None: ...
  88. # Uncomment if we ever make torch.device a decorator
  89. # def __call__(self, func: T) -> T: ...
  90. def __enter__(self) -> device: ...
  91. def __exit__(self, exc_type, exc_val, exc_tb) -> None: ...
  92. def __reduce__(self) -> Tuple[Any, ...]: ... # THPDevice_reduce
  93. # Defined in torch/csrc/Stream.cpp
  94. class Stream:
  95. stream_id: _int # Stream id
  96. device_index: _int
  97. device_type: _int
  98. device: _device # The device of the stream
  99. @overload
  100. def __new__(self, device: Optional[DeviceLikeType] = None, *, priority: _int = 0) -> Stream: ...
  101. @overload
  102. def __new__(self, stream_id: _int, device_index: _int, device_type: _int, *, priority: _int = 0) -> Stream: ...
  103. def query(self) -> _bool: ...
  104. def synchronize(self) -> None: ...
  105. def wait_event(self, event: Event) -> None: ...
  106. def wait_stream(self, other: Stream) -> None: ...
  107. def record_event(self, event: Optional[Event] = None) -> Event: ...
  108. def __hash__(self) -> _int: ...
  109. def __repr__(self) -> str: ...
  110. def __eq__(self, other: object) -> _bool: ...
  111. # Defined in torch/csrc/Event.cpp
  112. class Event:
  113. device: _device # The device of the Event
  114. event_id: _int # The raw event created by device backend
  115. def __new__(self,
  116. device: Optional[DeviceLikeType] = None,
  117. *,
  118. enable_timing: _bool = False,
  119. blocking: _bool = False,
  120. interprocess: _bool = False) -> Event: ...
  121. @classmethod
  122. def from_ipc_handle(self, device: _device, ipc_handle: bytes) -> Event: ...
  123. def record(self, stream: Optional[Stream] = None) -> None: ...
  124. def wait(self, stream: Optional[Stream] = None) -> None: ...
  125. def query(self) -> _bool: ...
  126. def elapsed_time(self, other: Event) -> _float: ...
  127. def synchronize(self) -> None: ...
  128. def ipc_handle(self) -> bytes: ...
  129. def __repr__(self) -> str: ...
  130. # Defined in torch/csrc/Size.cpp
  131. class Size(Tuple[_int, ...]):
  132. # TODO: __reduce__
  133. @overload # type: ignore[override]
  134. def __getitem__(self: Size, key: _int) -> _int: ...
  135. @overload
  136. def __getitem__(self: Size, key: slice) -> Size: ...
  137. def numel(self: Size) -> _int: ...
  138. # Defined in torch/csrc/Dtype.cpp
  139. class dtype:
  140. # TODO: __reduce__
  141. is_floating_point: _bool
  142. is_complex: _bool
  143. is_signed: _bool
  144. itemsize: _int
  145. def to_real(self) -> dtype: ...
  146. def to_complex(self) -> dtype: ...
  147. # Defined in torch/csrc/TypeInfo.cpp
  148. class iinfo:
  149. bits: _int
  150. min: _int
  151. max: _int
  152. dtype: str
  153. def __init__(self, dtype: _dtype) -> None: ...
  154. class finfo:
  155. bits: _int
  156. min: _float
  157. max: _float
  158. eps: _float
  159. tiny: _float
  160. smallest_normal: _float
  161. resolution: _float
  162. dtype: str
  163. @overload
  164. def __init__(self, dtype: _dtype) -> None: ...
  165. @overload
  166. def __init__(self) -> None: ...
  167. float32: dtype = ...
  168. float: dtype = ...
  169. float64: dtype = ...
  170. double: dtype = ...
  171. float16: dtype = ...
  172. bfloat16: dtype = ...
  173. float8_e4m3fn: dtype = ...
  174. float8_e4m3fnuz: dtype = ...
  175. float8_e5m2: dtype = ...
  176. float8_e5m2fnuz: dtype = ...
  177. half: dtype = ...
  178. uint8: dtype = ...
  179. uint16: dtype = ...
  180. uint32: dtype = ...
  181. uint64: dtype = ...
  182. int8: dtype = ...
  183. int16: dtype = ...
  184. short: dtype = ...
  185. int32: dtype = ...
  186. int: dtype = ...
  187. int64: dtype = ...
  188. long: dtype = ...
  189. complex32: dtype = ...
  190. complex64: dtype = ...
  191. chalf: dtype = ...
  192. cfloat: dtype = ...
  193. complex128: dtype = ...
  194. cdouble: dtype = ...
  195. quint8: dtype = ...
  196. qint8: dtype = ...
  197. qint32: dtype = ...
  198. bool: dtype = ...
  199. quint4x2: dtype = ...
  200. quint2x4: dtype = ...
  201. bits1x8: dtype = ...
  202. bits2x4: dtype = ...
  203. bits4x2: dtype = ...
  204. bits8: dtype = ...
  205. bits16: dtype = ...
  206. # Defined in torch/csrc/Layout.cpp
  207. class layout: ...
  208. # Defined in torch/csrc/utils/disable_torch_function.cpp
  209. def DisableTorchFunction(): ...
  210. def DisableTorchFunctionSubclass(): ...
  211. # Defined in torch/csrc/utils/tensor_layouts.cpp
  212. strided: layout = ...
  213. sparse_coo: layout = ...
  214. sparse_csr: layout = ...
  215. sparse_csc: layout = ...
  216. sparse_bsr: layout = ...
  217. sparse_bsc: layout = ...
  218. _mkldnn: layout = ...
  219. jagged: layout = ...
  220. # Defined in torch/csrc/MemoryFormat.cpp
  221. class memory_format: ...
  222. # Defined in torch/csrc/utils/tensor_memoryformats.cpp
  223. contiguous_format: memory_format = ...
  224. channels_last: memory_format = ...
  225. channels_last_3d: memory_format = ...
  226. preserve_format: memory_format = ...
  227. # Defined in torch/csrc/QScheme.cpp
  228. class qscheme: ...
  229. # Defined in torch/csrc/utils/tensor_qschemes.h
  230. per_tensor_affine: qscheme = ...
  231. per_channel_affine: qscheme = ...
  232. per_tensor_symmetric: qscheme = ...
  233. per_channel_symmetric: qscheme = ...
  234. per_channel_affine_float_qparams: qscheme = ...
  235. # Defined in torch/csrc/autograd/python_function.cpp
  236. class _FunctionBase:
  237. saved_tensors: Tuple[Tensor]
  238. _raw_saved_tensors: Tuple[Any]
  239. next_functions: Tuple[Tuple[Any, _int], ...]
  240. needs_input_grad: Tuple[_bool]
  241. metadata: dict
  242. _materialize_non_diff_grads: _bool
  243. # skip adding type hints for the fields that have wrappers defined
  244. # in torch/autograd/function.py
  245. # Defined in torch/csrc/autograd/python_legacy_variable.cpp
  246. class _LegacyVariableBase(Tensor): # inherits from Tensor to appease mypy
  247. def __init__(
  248. self,
  249. data: Optional[Tensor] = ...,
  250. requires_grad: Optional[_bool] = ...,
  251. volatile: Optional[_bool] = ...,
  252. _grad_fn: Optional[_FunctionBase] = ...,
  253. ) -> None: ...
  254. # Defined in torch/csrc/jit/python/init.cpp
  255. class IODescriptor: ...
  256. class JITException: ...
  257. class Future(Generic[T]):
  258. def __init__(self, devices: List[device]) -> None: ...
  259. def done(self) -> _bool: ...
  260. def value(self) -> T: ...
  261. def wait(self) -> T: ...
  262. def add_done_callback(self, callback: Callable) -> None: ...
  263. def then(self, callback: Callable) -> Future[T]: ...
  264. def set_result(self, result: T) -> None: ...
  265. def _set_unwrap_func(self, callback: Callable) -> None: ...
  266. class _Await:
  267. def __init__(self) -> None: ...
  268. def fn(self) -> Callable: ...
  269. def args(self) -> Tuple[Any, ...]: ...
  270. def is_nowait(self) -> _bool: ...
  271. def _jit_set_num_profiled_runs(num: _size) -> _size: ...
  272. # Defined in torch/csrc/jit/passes/mobile_optimizer_type.h
  273. class _MobileOptimizerType: ...
  274. CONV_BN_FUSION: _MobileOptimizerType
  275. INSERT_FOLD_PREPACK_OPS: _MobileOptimizerType
  276. REMOVE_DROPOUT: _MobileOptimizerType
  277. FUSE_ADD_RELU: _MobileOptimizerType
  278. HOIST_CONV_PACKED_PARAMS: _MobileOptimizerType
  279. VULKAN_AUTOMATIC_GPU_TRANSFER: _MobileOptimizerType
  280. def fork(*args: Any, **kwargs: Any) -> Future: ...
  281. def wait(fut: Future) -> Any: ...
  282. def _awaitable(*args: Any, **kwargs: Any) -> _Await: ...
  283. def _awaitable_wait(aw: _Await) -> Any: ...
  284. def _awaitable_nowait(x: Any) -> _Await: ...
  285. def _collect_all(futures: List[Future]) -> Future: ...
  286. def _set_print_stack_traces_on_fatal_signal(print: _bool) -> None: ...
  287. def unify_type_list(types: List[JitType]) -> JitType: ...
  288. def _freeze_module(
  289. module: ScriptModule,
  290. preserved_attrs: List[str] = [],
  291. freeze_interfaces: _bool = True,
  292. preserveParameters: _bool = True,
  293. ) -> ScriptModule: ...
  294. def _jit_pass_optimize_frozen_graph(Graph, optimize_numerics: _bool = True) -> None: ...
  295. def _jit_pass_optimize_for_inference(
  296. module: torch.jit.ScriptModule,
  297. other_methods: List[str] = [],
  298. ) -> None: ...
  299. def _jit_pass_fold_frozen_conv_bn(graph: Graph): ...
  300. def _jit_pass_fold_frozen_conv_add_or_sub(graph: Graph): ...
  301. def _jit_pass_fold_frozen_conv_mul_or_div(graph: Graph): ...
  302. def _jit_pass_fuse_frozen_conv_add_relu(graph: Graph): ...
  303. def _jit_pass_concat_frozen_linear(graph: Graph): ...
  304. def _jit_pass_convert_frozen_ops_to_mkldnn(graph: Graph): ...
  305. def _jit_pass_transpose_frozen_linear(graph: Graph): ...
  306. def _jit_pass_remove_dropout(module: torch.jit.ScriptModule): ...
  307. def _is_tracing() -> _bool: ...
  308. def _jit_init() -> _bool: ...
  309. def _jit_flatten(arg: Any) -> Tuple[List[Tensor], IODescriptor]: ...
  310. def _jit_unflatten(vars: List[Tensor], desc: IODescriptor) -> Any: ...
  311. def _jit_get_operation(op_name: str) -> Tuple[Callable, List[str]]: ...
  312. def _get_operation_overload(
  313. op_name: str,
  314. op_overload_name: str,
  315. ) -> Tuple[Callable, Callable, List[Any]]: ...
  316. def _get_schema(op_name: str, overload_name: str) -> FunctionSchema: ...
  317. def _jit_pass_optimize_for_mobile(
  318. module: torch.jit.ScriptModule,
  319. optimization_blocklist: Set[_MobileOptimizerType],
  320. preserved_methods: List[AnyStr],
  321. ) -> torch.jit.ScriptModule: ...
  322. def _clone_module_with_class(
  323. module: torch.jit.ScriptModule,
  324. ignored_methods: List[AnyStr],
  325. ignored_attributes: List[AnyStr],
  326. ) -> torch.jit.ScriptModule: ...
  327. def _jit_pass_vulkan_optimize_for_mobile(
  328. module: torch.jit.ScriptModule,
  329. optimization_blocklist: Set[_MobileOptimizerType],
  330. preserved_methods: List[AnyStr],
  331. ) -> torch.jit.ScriptModule: ...
  332. def _jit_pass_metal_optimize_for_mobile(
  333. module: torch.jit.ScriptModule,
  334. preserved_methods: List[AnyStr],
  335. ) -> torch.jit.ScriptModule: ...
  336. def _jit_pass_inline(Graph) -> None: ...
  337. def _jit_pass_constant_propagation(Graph) -> None: ...
  338. def _jit_pass_propagate_shapes_on_graph(Graph) -> None: ...
  339. def _jit_register_decomposition_for_schema(schema: FunctionSchema, Graph) -> None: ...
  340. def _jit_erase_non_input_shape_information(Graph) -> None: ...
  341. def _jit_get_schemas_for_operator(name: str) -> List[FunctionSchema]: ...
  342. def _jit_get_all_schemas() -> List[FunctionSchema]: ...
  343. def _jit_check_alias_annotation(
  344. g: Graph,
  345. args: Tuple[Any, ...],
  346. unqualified_op_name: str,
  347. ): ...
  348. def _jit_can_fuse_on_cpu() -> _bool: ...
  349. def _jit_can_fuse_on_gpu() -> _bool: ...
  350. def _jit_can_fuse_on_cpu_legacy() -> _bool: ...
  351. def _debug_get_fusion_group_inlining() -> _bool: ...
  352. def _debug_set_fusion_group_inlining(enable: _bool): ...
  353. def _jit_texpr_fuser_enabled() -> _bool: ...
  354. def _jit_nvfuser_enabled() -> _bool: ...
  355. def _jit_llga_enabled() -> _bool: ...
  356. def _jit_set_llga_enabled(enable: _bool): ...
  357. def _llvm_enabled() -> _bool: ...
  358. def _jit_override_can_fuse_on_cpu(override: _bool): ...
  359. def _jit_override_can_fuse_on_gpu(override: _bool): ...
  360. def _jit_override_can_fuse_on_cpu_legacy(override: _bool): ...
  361. def _jit_set_symbolic_shapes_test_mode(override: _bool): ...
  362. def _jit_symbolic_shapes_test_mode_enabled() -> _bool: ...
  363. def _jit_set_texpr_fuser_enabled(enable: _bool): ...
  364. def _jit_set_te_must_use_llvm_cpu(use_llvm: _bool): ...
  365. def _jit_set_nvfuser_enabled(enable: _bool) -> _bool: ...
  366. def _jit_cat_wo_conditionals(optimize_cat: _bool): ...
  367. def _jit_opt_conditionals(opt_conds: _bool): ...
  368. def _jit_pass_canonicalize(graph: Graph, keep_unique_names: _bool = True): ...
  369. def _jit_pass_erase_shape_information(graph: Graph): ...
  370. def _jit_pass_fold_convbn(module: torch.jit.ScriptModule): ...
  371. def _jit_pass_insert_observers(
  372. module: torch.jit.ScriptModule,
  373. method_name: str,
  374. qconfig_dict: Dict[str, Any],
  375. inplace: _bool,
  376. quant_type: _int,
  377. ): ...
  378. def _jit_pass_insert_quant_dequant(
  379. module: torch.jit.ScriptModule,
  380. method_name: str,
  381. inplace: _bool,
  382. debug: _bool,
  383. quant_type: _int,
  384. ): ...
  385. def _jit_pass_insert_quant_dequant_for_ondevice_ptq(
  386. module: torch.jit.ScriptModule,
  387. method_name: str,
  388. inplace: _bool,
  389. debug: _bool,
  390. quant_type: _int,
  391. ): ...
  392. def _jit_pass_quant_finalize(
  393. module: torch.jit.ScriptModule,
  394. quant_type: _int,
  395. preserved_attrs: Sequence[str],
  396. ): ...
  397. def _jit_pass_quant_finalize_for_ondevice_ptq(
  398. module: torch.jit.ScriptModule,
  399. quant_type: _int,
  400. method_name: str,
  401. ): ...
  402. def _jit_pass_insert_observer_method_for_ondevice_ptq(
  403. module: torch.jit.ScriptModule,
  404. method_name: str,
  405. qconfig_dict: Dict[str, Any],
  406. inplace: _bool,
  407. quant_type: _int,
  408. ): ...
  409. def _jit_set_profiling_executor(profiling_flag: _bool) -> _bool: ...
  410. def _jit_set_profiling_mode(profiling_flag: _bool) -> _bool: ...
  411. def _jit_set_fusion_strategy(
  412. strategy: List[Tuple[str, _int]],
  413. ) -> List[Tuple[str, _int]]: ...
  414. def _jit_try_infer_type(obj: Any) -> InferredType: ...
  415. def _jit_get_trigger_value(trigger_name: str) -> _int: ...
  416. # Defined in torch/csrc/jit/python/script_init.cpp
  417. ResolutionCallback = Callable[[str], Callable[..., Any]]
  418. # Defined in torch/csrc/jit/python/script_init.cpp
  419. # and torch/csrc/jit/python/init.cpp
  420. def _maybe_call_torch_function_for_op_packet(
  421. op_overload_packet: Any,
  422. args: Any,
  423. kwargs: Any,
  424. ) -> Any: ...
  425. def _check_schema_allow_fake_script_object(
  426. schema: FunctionSchema,
  427. args: Any,
  428. kwargs: Any,
  429. ) -> _bool: ...
  430. def _create_function_from_graph(qualname: str, graph: Graph) -> ScriptFunction: ...
  431. def _debug_set_autodiff_subgraph_inlining(disabled: _bool) -> None: ...
  432. def _ivalue_tags_match(lhs: ScriptModule, rhs: ScriptModule) -> _bool: ...
  433. def _jit_assert_is_instance(obj: Any, type: JitType): ...
  434. def _jit_clear_class_registry() -> None: ...
  435. def _jit_set_emit_hooks(
  436. ModuleHook: Optional[Callable],
  437. FunctionHook: Optional[Callable],
  438. ) -> None: ...
  439. def _jit_get_emit_hooks() -> Tuple[Callable, Callable]: ...
  440. def _load_for_lite_interpreter(
  441. filename: Union[str, Path],
  442. map_location: Optional[DeviceLikeType],
  443. ): ...
  444. def _load_for_lite_interpreter_from_buffer(
  445. buffer: BinaryIO,
  446. map_location: Optional[DeviceLikeType],
  447. ): ...
  448. def _export_operator_list(module: LiteScriptModule): ...
  449. def _quantize_ondevice_ptq_dynamic(module: LiteScriptModule, method_name: str): ...
  450. def _get_model_bytecode_version(filename: Union[str, Path]) -> _int: ...
  451. def _get_model_bytecode_version_from_buffer(buffer: BinaryIO) -> _int: ...
  452. def _backport_for_mobile(
  453. filename_input: Union[str, Path],
  454. filename_output: Union[str, Path],
  455. to_version: _int,
  456. ) -> None: ...
  457. def _backport_for_mobile_from_buffer(
  458. buffer: BinaryIO,
  459. filename_output: Union[str, Path],
  460. to_version: _int,
  461. ) -> None: ...
  462. def _backport_for_mobile_to_buffer(
  463. filename_input: Union[str, Path],
  464. to_version: _int,
  465. ) -> bytes: ...
  466. def _backport_for_mobile_from_buffer_to_buffer(
  467. buffer: BinaryIO,
  468. to_version: _int,
  469. ) -> bytes: ...
  470. def _get_model_ops_and_info(filename: Union[str, Path]): ...
  471. def _get_model_ops_and_info_from_buffer(buffer: BinaryIO): ...
  472. def _get_mobile_model_contained_types(filename: Union[str, Path]): ...
  473. def _get_mobile_model_contained_types_from_buffer(buffer: BinaryIO): ...
  474. def _logging_set_logger(logger: LoggerBase) -> LoggerBase: ...
  475. def _get_graph_executor_optimize(optimize: Optional[_bool] = None) -> _bool: ...
  476. def _set_graph_executor_optimize(optimize: _bool): ...
  477. def _export_opnames(module: ScriptModule) -> List[str]: ...
  478. def _create_function_from_trace(
  479. qualname: str,
  480. func: Callable[..., Any],
  481. input_tuple: Tuple[Any, ...],
  482. var_lookup_fn: Callable[[Tensor], str],
  483. strict: _bool,
  484. force_outplace: _bool,
  485. argument_names: List[str],
  486. ) -> Tuple[Graph, Stack]: ...
  487. def _create_function_from_trace_with_dict(
  488. qualname: str,
  489. func: Callable[..., Any],
  490. input_dict: Dict[str, Any],
  491. var_lookup_fn: Callable[[Tensor], str],
  492. strict: _bool,
  493. force_outplace: _bool,
  494. argument_names: List[str],
  495. ) -> Tuple[Graph, Stack]: ...
  496. def _jit_is_script_object(obj: Any) -> _bool: ...
  497. def _last_executed_optimized_graph() -> Graph: ...
  498. def parse_type_comment(comment: str) -> Decl: ...
  499. def _get_upgraders_map_size() -> _int: ...
  500. def _get_upgraders_entry_map() -> Dict[str, str]: ...
  501. def _dump_upgraders_map() -> Dict[str, str]: ...
  502. def _test_only_populate_upgraders(content: Dict[str, str]) -> None: ...
  503. def _test_only_remove_upgraders(content: Dict[str, str]) -> None: ...
  504. def merge_type_from_type_comment(
  505. decl: Decl,
  506. type_annotation_decl: Decl,
  507. is_method: _bool,
  508. ) -> Decl: ...
  509. def parse_ir(input: str, parse_tensor_constants: _bool = False) -> Graph: ...
  510. def parse_schema(schema: str) -> FunctionSchema: ...
  511. def get_device(input: Tensor) -> _int: ...
  512. def _resolve_type_from_object(
  513. obj: Any,
  514. range: SourceRange,
  515. rcb: ResolutionCallback,
  516. ) -> JitType: ...
  517. def _create_module_with_type(ty: JitType) -> ScriptModule: ...
  518. def _create_object_with_type(ty: ClassType) -> ScriptObject: ...
  519. def _run_emit_module_hook(m: ScriptModule): ...
  520. def _replace_overloaded_method_decl(
  521. overload_decl: Decl,
  522. implementation_def: Def,
  523. new_name: str,
  524. ) -> Def: ...
  525. def _jit_pass_lower_all_tuples(graph: Graph) -> None: ...
  526. def _jit_pass_onnx_set_dynamic_input_shape(
  527. graph: Graph,
  528. dynamic_axes: Dict[str, Dict[_int, str]],
  529. input_names: List[str],
  530. ) -> None: ...
  531. def _jit_pass_onnx_graph_shape_type_inference(
  532. graph: Graph,
  533. params_dict: Dict[str, IValue],
  534. opset_version: _int,
  535. ) -> None: ...
  536. def _jit_pass_onnx_assign_output_shape(
  537. graph: Graph,
  538. tensors: List[Tensor],
  539. desc: IODescriptor,
  540. onnx_shape_inference: _bool,
  541. is_script: _bool,
  542. opset_version: _int,
  543. ) -> None: ...
  544. def _jit_pass_onnx_remove_inplace_ops_for_onnx(
  545. graph: Graph,
  546. module: Optional[ScriptModule] = None,
  547. ) -> None: ...
  548. def _jit_pass_remove_inplace_ops(graph: Graph) -> None: ...
  549. def _jit_pass_canonicalize_graph_fuser_ops(graph: Graph) -> None: ...
  550. def _jit_pass_peephole(
  551. graph: Graph,
  552. disable_shape_peepholes: _bool = False,
  553. ) -> None: ...
  554. def _jit_pass_onnx_autograd_function_process(graph: Graph) -> None: ...
  555. def _jit_pass_fuse_addmm(graph: Graph) -> None: ...
  556. def _jit_pass_onnx_preprocess(graph: Graph) -> None: ...
  557. def _jit_pass_prepare_division_for_onnx(graph: Graph) -> None: ...
  558. def _jit_pass_onnx_remove_print(graph: Graph) -> None: ...
  559. def _jit_pass_onnx_preprocess_caffe2(graph: Graph) -> None: ...
  560. def _jit_pass_onnx_unpack_quantized_weights(
  561. graph: Graph,
  562. paramsDict: Dict[str, IValue],
  563. caffe2: _bool,
  564. ) -> Dict[str, IValue]: ...
  565. def _jit_pass_onnx_quantization_insert_permutes(
  566. graph: Graph,
  567. paramsDict: Dict[str, IValue],
  568. ) -> Dict[str, IValue]: ...
  569. def _jit_pass_custom_pattern_based_rewrite_graph(
  570. pattern: str,
  571. fused_node_name: str,
  572. graph: Graph,
  573. ) -> None: ...
  574. def _jit_onnx_list_model_parameters(
  575. module: ScriptModule,
  576. ) -> Tuple[ScriptModule, List[IValue]]: ...
  577. def _jit_pass_erase_number_types(graph: Graph) -> None: ...
  578. def _jit_pass_onnx_lint(graph: Graph) -> None: ...
  579. def _jit_pass_onnx(
  580. graph: Graph,
  581. _jit_pass_onnx: _onnx.OperatorExportTypes,
  582. ) -> Graph: ...
  583. def _jit_pass_onnx_scalar_type_analysis(
  584. graph: Graph,
  585. lowprecision_cast: _bool,
  586. opset_version: _int,
  587. ) -> None: ...
  588. def _jit_pass_onnx_peephole(
  589. graph: Graph,
  590. opset_version: _int,
  591. fixed_batch_size: _bool,
  592. ) -> None: ...
  593. def _jit_pass_dce_allow_deleting_nodes_with_side_effects(graph: Graph) -> None: ...
  594. def _jit_pass_onnx_function_substitution(graph: Graph) -> None: ...
  595. def _jit_pass_onnx_function_extraction(
  596. graph: Graph,
  597. module_names: Set[str],
  598. param_names: List[str],
  599. ) -> Dict[Node, Dict[str, str]]: ...
  600. def _jit_pass_onnx_clear_scope_records() -> None: ...
  601. def _jit_pass_onnx_track_scope_attributes(
  602. graph: Graph,
  603. onnx_attrs: Dict[str, Any],
  604. ) -> None: ...
  605. def _jit_is_onnx_log_enabled() -> _bool: ...
  606. def _jit_set_onnx_log_enabled(enabled: _bool) -> None: ...
  607. def _jit_set_onnx_log_output_stream(stream_name: str) -> None: ...
  608. def _jit_onnx_log(*args: Any) -> None: ...
  609. def _jit_pass_lower_graph(graph: Graph, m: Module) -> Tuple[Graph, List[IValue]]: ...
  610. def _jit_pass_inline_fork_wait(graph: Graph) -> None: ...
  611. def _jit_pass_onnx_deduplicate_initializers(
  612. graph: Graph,
  613. params_dict: Dict[str, IValue],
  614. is_train: _bool,
  615. ) -> Dict[str, IValue]: ...
  616. def _jit_pass_onnx_eval_peephole(
  617. graph: Graph,
  618. paramsDict: Dict[str, IValue],
  619. ) -> Dict[str, IValue]: ...
  620. def _jit_pass_onnx_constant_fold(
  621. graph: Graph,
  622. paramsDict: Dict[str, IValue],
  623. opset_version: _int,
  624. ) -> Dict[str, IValue]: ...
  625. def _jit_pass_onnx_eliminate_unused_items(
  626. graph: Graph,
  627. paramsDict: Dict[str, IValue],
  628. ) -> Dict[str, IValue]: ...
  629. def _jit_pass_onnx_cast_all_constant_to_floating(graph: Graph) -> None: ...
  630. def _jit_pass_filter_non_tensor_arguments(
  631. params: Dict[str, IValue],
  632. ) -> Dict[str, Tensor]: ...
  633. def _jit_decay_packed_param_input_types(graph: Graph) -> None: ...
  634. def _jit_pass_onnx_node_shape_type_inference(
  635. n: Node,
  636. paramsDict: Dict[str, IValue],
  637. opset_version: _int,
  638. ) -> None: ...
  639. def _jit_onnx_convert_pattern_from_subblock(
  640. block: Block,
  641. n: Node,
  642. env: Dict[Value, Value],
  643. values_in_env: Set[Value],
  644. ) -> List[Value]: ...
  645. def _jit_pass_onnx_block(
  646. old_block: Block,
  647. new_block: Block,
  648. operator_export_type: _onnx.OperatorExportTypes,
  649. env: Dict[Value, Value],
  650. values_in_env: Set[Value],
  651. is_sub_block: _bool,
  652. ) -> Dict[Value, Value]: ...
  653. def _jit_pass_onnx_assign_scoped_names_for_node_and_value(graph: Graph) -> None: ...
  654. def _jit_pass_fixup_onnx_controlflow_node(
  655. n: Node,
  656. opset_version: _int,
  657. ) -> List[Value]: ...
  658. def _jit_onnx_create_full_scope_name(class_name: str, variable_name: str) -> str: ...
  659. def _compile_graph_to_code_table(name: str, graph: Graph) -> IValue: ...
  660. def _generate_upgraders_graph() -> Dict[str, Graph]: ...
  661. def _calculate_package_version_based_on_upgraders(val: _bool): ...
  662. def _get_version_calculator_flag() -> _bool: ...
  663. def _jit_script_interface_compile(
  664. name: str,
  665. class_def: ClassDef,
  666. rcb: ResolutionCallback,
  667. is_module: _bool,
  668. ): ...
  669. def _jit_script_compile_overload(
  670. qualname: str,
  671. overload_decl: Decl,
  672. implementation_def: Def,
  673. rcb: ResolutionCallback,
  674. implementation_defaults: Dict[str, Any],
  675. signature: Any,
  676. ): ...
  677. def _jit_script_compile(
  678. qual_name: str,
  679. definition: Def,
  680. rcb: ResolutionCallback,
  681. defaults: Dict[str, Any],
  682. ): ...
  683. def _jit_script_class_compile(
  684. qual_name: str,
  685. definition: ClassDef,
  686. defaults: Dict[str, Dict[str, Any]],
  687. rcb: ResolutionCallback,
  688. ): ...
  689. def _parse_source_def(src: str) -> Def: ...
  690. def import_ir_module(
  691. cu: CompilationUnit,
  692. filename: Union[str, Path],
  693. map_location: Optional[DeviceLikeType],
  694. extra_files: Dict[str, Any],
  695. ) -> ScriptModule: ...
  696. def import_ir_module_from_buffer(
  697. cu: CompilationUnit,
  698. buffer: BinaryIO,
  699. map_location: Optional[DeviceLikeType],
  700. extra_files: Dict[str, Any],
  701. ) -> ScriptModule: ...
  702. def _import_ir_module_from_package(
  703. cu: CompilationUnit,
  704. reader: PyTorchFileReader,
  705. storage_context: DeserializationStorageContext,
  706. map_location: Optional[DeviceLikeType],
  707. ts_id: str,
  708. ) -> ScriptModule: ...
  709. def _assign_output_shapes(graph: Graph, inputs: List[Tensor]) -> Graph: ...
  710. def _check_onnx_proto(proto: str) -> None: ...
  711. def _propagate_and_assign_input_shapes(
  712. graph: Graph,
  713. inputs: Tuple[Tensor, ...],
  714. param_count_list: List[_int],
  715. with_grad: _bool,
  716. propagate: _bool,
  717. ) -> Graph: ...
  718. # Defined in torch/csrc/jit/runtime/graph_executor.h
  719. class GraphExecutorState: ...
  720. # Defined in torch/torch/csrc/jit/ir/alias_analysis.h
  721. class AliasDb:
  722. def __str__(self) -> str: ...
  723. class _InsertPoint:
  724. def __enter__(self) -> None: ...
  725. def __exit__(self, *args) -> None: ...
  726. # Defined in torch/csrc/jit/ir/ir.h
  727. class Use:
  728. @property
  729. def user(self) -> Node: ...
  730. @property
  731. def offset(self) -> _int: ...
  732. def isAfter(self, other: Use) -> _bool: ...
  733. # Defined in torch/csrc/jit/ir/ir.h
  734. class Value:
  735. def type(self) -> JitType: ...
  736. def setType(self, t: JitType) -> Value: ...
  737. def setTypeAs(self, other: Value) -> Value: ...
  738. def inferTypeFrom(self, t: Tensor) -> None: ...
  739. def debugName(self) -> str: ...
  740. def setDebugName(self, name: str) -> None: ...
  741. def unique(self) -> _int: ...
  742. def offset(self) -> _int: ...
  743. def node(self) -> Node: ...
  744. def uses(self) -> List[Use]: ...
  745. def replaceAllUsesWith(self, val: Value) -> None: ...
  746. def replaceAllUsesAfterNodeWith(self, node: Node, val: Value) -> None: ...
  747. def requires_grad(self) -> _bool: ...
  748. def requiresGrad(self) -> _bool: ...
  749. def copyMetadata(self, other: Value) -> Value: ...
  750. def isCompleteTensor(self) -> _bool: ...
  751. def toIValue(self) -> IValue: ...
  752. # Defined in torch/csrc/jit/ir/ir.h
  753. class Block:
  754. def inputs(self) -> Iterator[Value]: ...
  755. def outputs(self) -> Iterator[Value]: ...
  756. def nodes(self) -> Iterator[Node]: ...
  757. def paramNode(self) -> Node: ...
  758. def returnNode(self) -> Node: ...
  759. def owningNode(self) -> Node: ...
  760. def registerOutput(self, n: Value) -> _int: ...
  761. def addNode(self, name: str, inputs: Sequence[Value]) -> Node: ...
  762. # Defined in torch/csrc/jit/ir/ir.h
  763. class Node:
  764. def __getitem__(self, key: str) -> Any: ...
  765. def schema(self) -> str: ...
  766. def input(self) -> Value: ...
  767. def inputs(self) -> Iterator[Value]: ...
  768. def inputsAt(self, idx: _int) -> Value: ...
  769. def inputsSize(self) -> _int: ...
  770. def output(self) -> Value: ...
  771. def outputs(self) -> Iterator[Value]: ...
  772. def outputsAt(self, idx: _int) -> Value: ...
  773. def outputsSize(self) -> _int: ...
  774. def hasMultipleOutputs(self) -> _bool: ...
  775. def blocks(self) -> List[Block]: ...
  776. def addBlock(self) -> Block: ...
  777. def mustBeNone(self) -> _bool: ...
  778. def matches(self, pattern: str) -> _bool: ...
  779. def kind(self) -> str: ...
  780. def kindOf(self, name: str) -> str: ...
  781. def addInput(self, name: str) -> Value: ...
  782. def replaceInput(self, i: _int, newValue: Value) -> Value: ...
  783. def replaceInputWith(self, from_: Value, to: Value) -> None: ...
  784. def replaceAllUsesWith(self, n: Node) -> None: ...
  785. def insertBefore(self, n: Node) -> Node: ...
  786. def insertAfter(self, n: Node) -> Node: ...
  787. def isBefore(self, n: Node) -> _bool: ...
  788. def isAfter(self, n: Node) -> _bool: ...
  789. def moveBefore(self, n: Node) -> None: ...
  790. def moveAfter(self, n: Node) -> None: ...
  791. def removeInput(self, i: _int) -> None: ...
  792. def removeAllInputs(self, i: _int) -> None: ...
  793. def hasUses(self) -> _bool: ...
  794. def eraseOutput(self, i: _int) -> None: ...
  795. def addOutput(self) -> Value: ...
  796. def scopeName(self) -> str: ...
  797. def isNondeterministic(self) -> _bool: ...
  798. def copyAttributes(self, rhs: Node) -> Node: ...
  799. def copyMetadata(self, rhs: Node) -> Node: ...
  800. def hasAttributes(self) -> _bool: ...
  801. def hasAttribute(self, name: str) -> _bool: ...
  802. def removeAttribute(self, attr: str) -> Node: ...
  803. def namedInput(self, name: str) -> Value: ...
  804. def sourceRange(self) -> SourceRange: ...
  805. def owningBlock(self) -> Block: ...
  806. def findNode(self, kind: str, recurse: _bool = True) -> Node: ...
  807. def findAllNodes(self, kind: str, recurse: _bool = True) -> List[Node]: ...
  808. def getModuleHierarchy(self) -> str: ...
  809. def prev(self) -> Node: ...
  810. def destroy(self) -> None: ...
  811. def attributeNames(self) -> List[str]: ...
  812. # Accessors for attributes as types.
  813. def f(self, name: str) -> _float: ...
  814. def f_(self, name: str, val: _float) -> Node: ...
  815. def fs(self, name: str) -> List[_float]: ...
  816. def fs_(self, name: str, val: List[_float]) -> Node: ...
  817. def c(self, name: str) -> complex: ...
  818. def c_(self, name: str, val: complex) -> Node: ...
  819. def s(self, name: str) -> str: ...
  820. def s_(self, name: str, val: str) -> Node: ...
  821. def ss(self, name: str) -> List[str]: ...
  822. def ss_(self, name: str, val: List[str]) -> Node: ...
  823. def i(self, name: str) -> _int: ...
  824. def i_(self, name: str, val: _int) -> Node: ...
  825. # Cannot define "is" like this because it's a reserved keyword in python.
  826. # def is(self, name: str) -> List[_int]: ...
  827. # def is_(self, name: str, val: List[_int]) -> Node: ...
  828. def g(self, name: str) -> Graph: ...
  829. def g_(self, name: str, val: Graph) -> Node: ...
  830. def gs(self, name: str) -> List[Graph]: ...
  831. def gs_(self, name: str, val: List[Graph]) -> Node: ...
  832. def ival(self, name: str) -> IValue: ...
  833. def ival_(self, name: str, val: IValue) -> Node: ...
  834. def t(self, name: str) -> Tensor: ...
  835. def t_(self, name: str, val: Tensor) -> Node: ...
  836. def ts(self, name: str) -> List[Tensor]: ...
  837. def ts_(self, name: str, val: List[Tensor]) -> Node: ...
  838. def ty(self, name: str) -> JitType: ...
  839. def ty_(self, name: str, val: JitType) -> Node: ...
  840. def tys(self, name: str) -> List[JitType]: ...
  841. def tys_(self, name: str, val: List[JitType]) -> Node: ...
  842. # Defined in torch/torch/csrc/jit/ir/ir.h
  843. class Graph:
  844. def inputs(self) -> Iterator[Value]: ...
  845. def outputs(self) -> Iterator[Value]: ...
  846. def nodes(self) -> Iterator[Node]: ...
  847. def param_node(self) -> Node: ...
  848. def return_node(self) -> Node: ...
  849. def addInput(self, name: str = "") -> Value: ...
  850. def eraseInput(self, i: _int) -> None: ...
  851. def registerOutput(self, n: Value) -> _int: ...
  852. def eraseOutput(self, i: _int) -> None: ...
  853. def create(self, name: str, args, num_outputs: _int) -> Node: ...
  854. def appendNode(self, n: Node) -> Node: ...
  855. def prependNode(self, n: Node) -> Node: ...
  856. def insertNode(self, n: Node) -> Node: ...
  857. def block(self) -> Block: ...
  858. def lint(self) -> None: ...
  859. def alias_db(self) -> AliasDb: ...
  860. def setInsertPoint(self, n: Union[Block, Node]) -> None: ...
  861. def insert_point_guard(self, n: Union[Block, Node]) -> _InsertPoint: ...
  862. def insertPoint(self) -> Node: ...
  863. def insertGraph(self, callee: Graph, inputs: List[Value]) -> List[Value]: ...
  864. def makeMultiOutputIntoTuple(self) -> None: ...
  865. def copy(self) -> Graph: ...
  866. # Defined in torch/aten/src/ATen/core/alias_info.h
  867. class AliasInfo:
  868. is_write: _bool
  869. before_set: Set[str]
  870. after_set: Set[str]
  871. # Defined in torch/aten/src/ATen/core/function_schema.h
  872. class Argument:
  873. name: str
  874. type: JitType
  875. default_value: Optional[Any]
  876. def has_default_value(self) -> _bool: ...
  877. kwarg_only: _bool
  878. is_out: _bool
  879. alias_info: Optional[AliasInfo]
  880. class FunctionSchema:
  881. arguments: List[Argument]
  882. returns: List[Argument]
  883. name: str
  884. overload_name: str
  885. is_mutable: _bool
  886. class _UpgraderEntry:
  887. bumped_at_version: _int
  888. upgrader_name: str
  889. old_schema: str
  890. def __init__(
  891. self,
  892. bumped_at_version: _int,
  893. upgrader_name: str,
  894. old_schema: str,
  895. ) -> None: ...
  896. class _UpgraderRange:
  897. min_version: _int
  898. max_version: _int
  899. def _get_max_operator_version() -> _int: ...
  900. def _get_operator_version_map() -> Dict[str, List[_UpgraderEntry]]: ...
  901. def _get_upgrader_ranges(name: str) -> List[_UpgraderRange]: ...
  902. def _test_only_add_entry_to_op_version(op_name: str, entry: _UpgraderEntry) -> None: ...
  903. def _test_only_remove_entry_to_op_version(op_name: str) -> None: ...
  904. # Defined in torch/csrc/jit/python/script_init.cpp
  905. class ScriptModuleSerializer:
  906. def __init__(self, export_writer: PyTorchFileWriter) -> None: ...
  907. def serialize(self, model: ScriptModule, script_module_id: _int) -> None: ...
  908. def write_files(self) -> None: ...
  909. def storage_context(self) -> SerializationStorageContext: ...
  910. # Defined in torch/csrc/jit/python/script_init.cpp
  911. class SerializationStorageContext:
  912. def __init__(self) -> None: ...
  913. def has_storage(self, storage: Storage) -> _bool: ...
  914. def get_or_add_storage(self, storage: Storage) -> _int: ...
  915. # Defined in torch/csrc/jit/python/script_init.cpp
  916. class DeserializationStorageContext:
  917. def __init__(self) -> None: ...
  918. def get_storage(self, name: str, dtype: _dtype) -> Tensor: ...
  919. def has_storage(self, name: str) -> _bool: ...
  920. def add_storage(self, name: str, tensor: Tensor) -> _int: ...
  921. # Defined in torch/csrc/jit/python/script_init.cpp
  922. class ConcreteModuleTypeBuilder:
  923. def __init__(self, obj: Any) -> None: ...
  924. def set_module_dict(self): ...
  925. def set_module_list(self): ...
  926. def set_parameter_list(self): ...
  927. def set_parameter_dict(self): ...
  928. def add_attribute(
  929. self,
  930. name: str,
  931. ty: JitType,
  932. is_param: _bool,
  933. is_buffer: _bool,
  934. ): ...
  935. def add_module(self, name: str, meta: ConcreteModuleType): ...
  936. def add_constant(self, name: str, value: Any): ...
  937. def add_overload(self, method_name: str, overloaded_method_names: List[str]): ...
  938. def add_builtin_function(self, name: str, symbol_name: str): ...
  939. def add_failed_attribute(self, name: str, failure_reason: str): ...
  940. def add_function_attribute(
  941. self,
  942. name: str,
  943. ty: JitType,
  944. func: Callable[..., Any],
  945. ): ...
  946. def add_ignored_attribute(self, name: str): ...
  947. def add_ignored_attributes(self, names: List[str]): ...
  948. def add_forward_hook(self, hook: Callable[..., Any]): ...
  949. def add_forward_pre_hook(self, pre_hook: Callable[..., Any]): ...
  950. class ConcreteModuleType:
  951. def get_constants(self) -> Dict[str, Any]: ...
  952. def equals(self, other: ConcreteModuleType) -> _bool: ...
  953. @staticmethod
  954. def from_jit_type(ty: JitType) -> ConcreteModuleType: ...
  955. class CallStack:
  956. def __init__(self, name: str, range: SourceRange): ...
  957. class ErrorReport:
  958. def __init__(self, range: SourceRange) -> None: ...
  959. def what(self) -> str: ...
  960. @staticmethod
  961. def call_stack() -> str: ...
  962. class CompilationUnit:
  963. def __init__(self, lang: str = ..., _frames_up: _int = ...) -> None: ...
  964. def find_function(self, name: str) -> ScriptFunction: ...
  965. def __getattr__(self, name: str) -> ScriptFunction: ...
  966. def define(
  967. self,
  968. script: str,
  969. rcb: ResolutionCallback = ...,
  970. _frames_up: _int = ...,
  971. ): ...
  972. def get_interface(self, name: str) -> InterfaceType: ...
  973. def get_functions(self) -> List[ScriptFunction]: ...
  974. def create_function(
  975. self,
  976. name: str,
  977. graph: Graph,
  978. shouldMangle: _bool = ...,
  979. ) -> ScriptFunction: ...
  980. def get_class(self, name: str) -> ClassType: ...
  981. class ScriptObject:
  982. def setattr(self, name: str, value: Any): ...
  983. class ScriptModule(ScriptObject):
  984. def _method_names(self) -> List[str]: ...
  985. def _get_method(self, name: str) -> ScriptMethod: ...
  986. class LiteScriptModule:
  987. def __call__(self, *input): ...
  988. def find_method(self, method_name: str): ...
  989. def forward(self, *input) -> List[str]: ...
  990. def run_method(self, method_name: str, *input): ...
  991. # NOTE: switch to collections.abc.Callable in python 3.9
  992. class ScriptFunction(Generic[P, ReturnVal]):
  993. def __call__(self, *args: P.args, **kwargs: P.kwargs) -> ReturnVal: ...
  994. def save(self, filename: str, _extra_files: Dict[str, bytes]) -> None: ...
  995. def save_to_buffer(self, _extra_files: Dict[str, bytes]) -> bytes: ...
  996. @property
  997. def graph(self) -> Graph: ...
  998. def inlined_graph(self) -> Graph: ...
  999. def schema(self) -> FunctionSchema: ...
  1000. def code(self) -> str: ...
  1001. def name(self) -> str: ...
  1002. @property
  1003. def qualified_name(self) -> str: ...
  1004. # NOTE: switch to collections.abc.Callable in python 3.9
  1005. class ScriptMethod(Generic[P, ReturnVal]):
  1006. graph: Graph
  1007. def __call__(self, *args: P.args, **kwargs: P.kwargs) -> ReturnVal: ...
  1008. @property
  1009. def owner(self) -> ScriptModule: ...
  1010. @property
  1011. def name(self) -> str: ...
  1012. class ScriptDict(Generic[K, T]):
  1013. def __init__(self, dict: Dict[K, T]) -> None: ...
  1014. def __len__(self) -> _int: ...
  1015. def __contains__(self, key: K) -> _bool: ...
  1016. def __getitem__(self, key: K) -> T: ...
  1017. def __setitem__(self, key: K, value: T) -> None: ...
  1018. def __delitem__(self, key: K) -> None: ...
  1019. def __iter__(self) -> Iterator[K]: ...
  1020. def items(self) -> Iterator[tuple[K, T]]: ...
  1021. def keys(self) -> Iterator[K]: ...
  1022. class ScriptList(Generic[T]):
  1023. def __init__(self, list: List[T]) -> None: ...
  1024. def __len__(self) -> _int: ...
  1025. def __contains__(self, item: T) -> _bool: ...
  1026. @overload
  1027. def __getitem__(self, idx: _int) -> T: ...
  1028. @overload
  1029. def __getitem__(self, idx: slice) -> ScriptList[T]: ...
  1030. @overload
  1031. def __setitem__(self, idx: _int, value: T) -> None: ...
  1032. @overload
  1033. def __setitem__(self, idx: slice, value: List[T]) -> None: ...
  1034. def __delitem__(self, idx: _int) -> None: ...
  1035. def __iter__(self) -> Iterator[T]: ...
  1036. def count(self, value: T) -> _int: ...
  1037. def remove(self, value: T) -> None: ...
  1038. def append(self, value: T) -> None: ...
  1039. def clear(self) -> None: ...
  1040. @overload
  1041. def extend(self, values: List[T]) -> None: ...
  1042. @overload
  1043. def extend(self, values: Iterable[T]) -> None: ...
  1044. @overload
  1045. def pop(self) -> T: ...
  1046. @overload
  1047. def pop(self, idx: _int) -> T: ...
  1048. class ModuleDict:
  1049. def __init__(self, mod: ScriptModule) -> None: ...
  1050. def items(self) -> List[Tuple[str, Any]]: ...
  1051. class ParameterDict:
  1052. def __init__(self, mod: ScriptModule) -> None: ...
  1053. class BufferDict:
  1054. def __init__(self, mod: ScriptModule) -> None: ...
  1055. # Defined in torch/csrc/jit/api/module.h
  1056. class Module: ...
  1057. # Defined in torch/csrc/Module.cpp
  1058. def _initExtension(shm_manager_path: str) -> None: ... # THPModule_initExtension
  1059. def _autograd_init() -> _bool: ... # THPAutograd_initExtension
  1060. def _add_docstr(obj: T, doc_obj: str) -> T: ... # THPModule_addDocStr
  1061. def _init_names(arg: Sequence[Type]) -> None: ... # THPModule_initNames
  1062. def _has_distributed() -> _bool: ... # THPModule_hasDistributed
  1063. def _set_default_tensor_type(type) -> None: ... # THPModule_setDefaultTensorType
  1064. def _set_default_dtype(d: _dtype) -> None: ... # THPModule_setDefaultDtype
  1065. def _infer_size(arg1: Size, arg2: Size) -> Size: ... # THPModule_inferSize
  1066. def _crash_if_csrc_asan() -> _int: ... # THPModule_crashIfCsrcASAN
  1067. def _crash_if_csrc_ubsan() -> _int: ... # THPModule_crashIfCsrcUBSAN
  1068. def _crash_if_aten_asan() -> _int: ... # THPModule_crashIfATenASAN
  1069. def _show_config() -> str: ... # THPModule_showConfig
  1070. def _cxx_flags() -> str: ... # THPModule_cxxFlags
  1071. def _parallel_info() -> str: ... # THPModule_parallelInfo
  1072. def _get_cpu_capability() -> str: ... # THPModule_getCpuCapability
  1073. def _set_backcompat_broadcast_warn(
  1074. arg: _bool,
  1075. ) -> None: ... # THPModule_setBackcompatBroadcastWarn
  1076. def _get_backcompat_broadcast_warn() -> _bool: ... # THPModule_getBackcompatBroadcastWarn
  1077. def _set_backcompat_keepdim_warn(
  1078. arg: _bool,
  1079. ) -> None: ... # THPModule_setBackcompatKeepdimWarn
  1080. def _get_backcompat_keepdim_warn() -> _bool: ... # THPModule_getBackcompatKeepdimWarn
  1081. def get_num_thread() -> _int: ... # THPModule_getNumThreads
  1082. def set_num_threads(nthreads: _int) -> None: ... # THPModule_setNumThreads
  1083. def get_num_interop_threads() -> _int: ... # THPModule_getNumInteropThreads
  1084. def set_num_interop_threads(
  1085. nthreads: _int,
  1086. ) -> None: ... # THPModule_setNumInteropThreads
  1087. def _get_cudnn_enabled() -> _bool: ... # THPModule_userEnabledCuDNN
  1088. def _set_cudnn_enabled(arg: _bool) -> None: ... # THPModule_setUserEnabledCuDNN
  1089. def _get_flash_sdp_enabled() -> _bool: ... # THPModule_userEnabledFusedSDP
  1090. def _set_sdp_use_flash(arg: _bool) -> None: ... # THPModule_setSDPUseFlash
  1091. def _get_mem_efficient_sdp_enabled() -> _bool: ... # THPModule_userEnabledMathSDP
  1092. def _set_sdp_use_mem_efficient(
  1093. arg: _bool,
  1094. ) -> None: ... # THPModule_setSDPUseMemEfficient
  1095. def _get_math_sdp_enabled() -> _bool: ... # THPModule_userEnabledMathSDP
  1096. def _set_sdp_use_math(arg: _bool) -> None: ... # THPModule_setSDPUseMath
  1097. def _get_cudnn_sdp_enabled() -> _bool: ... # THPModule_userEnabledMathSDP
  1098. def _set_sdp_use_cudnn(arg: _bool) -> None: ... # THPModule_setSDPUseMath
  1099. def _get_mkldnn_enabled() -> _bool: ... # THPModule_userEnabledMkldnn
  1100. def _set_mkldnn_enabled(arg: _bool) -> None: ... # THPModule_setUserEnabledMkldnn
  1101. def _get_cudnn_benchmark() -> _bool: ... # THPModule_benchmarkCuDNN
  1102. def _set_cudnn_benchmark(arg: _bool) -> None: ... # THPModule_setBenchmarkCuDNN
  1103. def _get_cudnn_deterministic() -> _bool: ... # THPModule_deterministicCuDNN
  1104. def _set_cudnn_deterministic(arg: _bool) -> None: ... # THPModule_setDeterministicCuDNN
  1105. def _get_deterministic_algorithms() -> _bool: ... # THPModule_deterministicAlgorithms
  1106. def _get_deterministic_algorithms_warn_only() -> _bool: ... # THPModule_deterministicAlgorithmsWarnOnly
  1107. def _set_deterministic_algorithms(
  1108. mode: _bool,
  1109. *,
  1110. warn_only: _bool = ...,
  1111. ) -> None: ... # THPModule_setDeterministicAlgorithms
  1112. def _get_deterministic_fill_uninitialized_memory() -> _bool: ... # THPModule_deterministicFillUninitializedMemory
  1113. def _set_deterministic_fill_uninitialized_memory(arg: _bool) -> None: ... # THPModule_setDeterministicFillUninitializedMemory
  1114. def _get_nnpack_enabled() -> _bool: ... # THPModule_userEnabledNNPACK
  1115. def _set_nnpack_enabled(arg: _bool) -> None: ... # THPModule_setUserEnabledNNPACK
  1116. def _get_warnAlways() -> _bool: ... # THPModule_warnAlways
  1117. def _set_warnAlways(arg: _bool) -> None: ... # THPModule_setWarnAlways
  1118. def _get_cudnn_allow_tf32() -> _bool: ... # THPModule_allowTF32CuDNN
  1119. def _set_cudnn_allow_tf32(arg: _bool) -> None: ... # THPModule_setAllowTF32CuDNN
  1120. def _get_cublas_allow_tf32() -> _bool: ... # THPModule_allowTF32CuBLAS
  1121. def _set_cublas_allow_tf32(arg: _bool) -> None: ... # THPModule_setAllowTF32CuBLAS
  1122. def _get_float32_matmul_precision() -> str: ... # THPModule_float32MatmulPrecision
  1123. def _set_float32_matmul_precision(
  1124. arg: str,
  1125. ) -> None: ... # THPModule_setFloat32MatmulPrecision
  1126. def _get_cublas_allow_fp16_reduced_precision_reduction() -> _bool: ... # THPModule_allowFP16ReductionCuBLAS
  1127. def _set_cublas_allow_fp16_reduced_precision_reduction(
  1128. arg: _bool,
  1129. ) -> None: ... # THPModule_setAllowFP16ReductionCuBLAS
  1130. def _get_cublas_allow_bf16_reduced_precision_reduction() -> _bool: ... # THPModule_allowBF16ReductionCuBLAS
  1131. def _set_cublas_allow_bf16_reduced_precision_reduction(
  1132. arg: _bool,
  1133. ) -> None: ... # THPModule_setAllowBF16ReductionCuBLAS
  1134. def _set_conj(x: Tensor, conj: _bool) -> None: ...
  1135. def _set_neg(x: Tensor, neg: _bool) -> None: ...
  1136. def _set_meta_in_tls_dispatch_include(meta_in_tls: _bool) -> None: ...
  1137. def _meta_in_tls_dispatch_include() -> _bool: ...
  1138. def _stash_obj_in_tls(key: str, arg: Any) -> None: ...
  1139. def _get_obj_in_tls(key: str) -> Any: ...
  1140. def _is_key_in_tls(key: str) -> _bool: ...
  1141. def _select_batch_norm_backend(*args, **kwargs) -> BatchNormBackend: ...
  1142. def _select_conv_backend(*args, **kwargs) -> ConvBackend: ...
  1143. def _conv_determine_backend_memory_format(
  1144. input: Tensor,
  1145. weight: Tensor,
  1146. backend: ConvBackend,
  1147. ) -> memory_format: ...
  1148. def _has_storage(x: Tensor) -> _bool: ...
  1149. def _construct_storage_from_data_pointer(data_ptr: _int, device: torch.device, size: _int) -> Storage: ...
  1150. def _should_allow_numbers_as_tensors(func_name: str) -> _bool: ...
  1151. def _group_tensors_by_device_and_dtype(nested_tensorlists: List[List[Optional[Tensor]]], with_indices: _bool = False) -> Dict[Tuple[torch.device, torch.dtype], Tuple[List[List[Optional[Tensor]]], List[_int]]]: ...
  1152. # NB: There is no Capsule type in typing, see
  1153. # https://code.activestate.com/lists/python-dev/139675/
  1154. def _to_dlpack(data: Tensor) -> Any: ... # THPModule_toDLPack
  1155. def _from_dlpack(data: Any) -> Tensor: ... # THPModule_fromDLPack
  1156. def _get_cpp_backtrace(
  1157. frames_to_skip: _int,
  1158. maximum_number_of_frames: _int,
  1159. ) -> str: ... # THPModule_getCppBacktrace
  1160. def set_flush_denormal(arg: _bool) -> _bool: ... # THPModule_setFlushDenormal
  1161. def get_default_dtype() -> _dtype: ... # THPModule_getDefaultDtype
  1162. def _get_default_device() -> str: ... # THPModule_getDefaultDevice
  1163. def _get_qengine() -> _int: ... # THPModule_qEngine
  1164. def _set_qengine(qengine: _int) -> None: ... # THPModule_setQEngine
  1165. def _supported_qengines() -> List[_int]: ... # THPModule_supportedQEngines
  1166. def _is_xnnpack_enabled() -> _bool: ... # THPModule_isEnabledXNNPACK
  1167. def _check_sparse_tensor_invariants() -> _bool: ... # THPModule_checkSparseTensorInvariants
  1168. def _set_check_sparse_tensor_invariants(
  1169. arg: _bool,
  1170. ) -> None: ... # THPModule_setCheckSparseTensorInvariants
  1171. def _set_default_mobile_cpu_allocator() -> None: ... # THPModule_setDefaultMobileCPUAllocator
  1172. def _unset_default_mobile_cpu_allocator() -> None: ... # THPModule_unsetDefaultMobileCPUAllocator
  1173. def _is_torch_function_enabled() -> _bool: ... # THPModule_isEnabledTorchFunction
  1174. def _has_torch_function(
  1175. args: Iterable[Any],
  1176. ) -> _bool: ... # THPModule_has_torch_function
  1177. def _has_torch_function_unary(Any) -> _bool: ... # THPModule_has_torch_function_unary
  1178. def _has_torch_function_variadic(
  1179. *args: Any,
  1180. ) -> _bool: ... # THPModule_has_torch_function_variadic
  1181. def _vmapmode_increment_nesting() -> _int: ... # THPModule_vmapmode_increment_nesting
  1182. def _vmapmode_decrement_nesting() -> _int: ... # THPModule_vmapmode_decrement_nesting
  1183. def _log_api_usage_once(str) -> None: ... # LogAPIUsageOnceFromPython
  1184. def _log_api_usage_metadata(event: str, metadata_map: Dict[str, str]) -> None: ... # LogAPIUsageMetadataFromPython
  1185. def _demangle(str) -> str: ... # c10::demangle
  1186. def _disabled_torch_function_impl(
  1187. func: Callable,
  1188. types: Iterable[Type],
  1189. args: Tuple,
  1190. kwargs: Dict,
  1191. ) -> Any: ... # THPModule_disable_torch_function
  1192. def _disabled_torch_dispatch_impl(
  1193. func: Callable,
  1194. types: Iterable[Type],
  1195. args: Tuple,
  1196. kwargs: Dict,
  1197. ) -> Any: ... # THPModule_disable_dispatch_function
  1198. def _get_linalg_preferred_backend() -> torch._C._LinalgBackend: ...
  1199. def _set_linalg_preferred_backend(arg: torch._C._LinalgBackend): ...
  1200. class _LinalgBackend:
  1201. Default: _LinalgBackend
  1202. Cusolver: _LinalgBackend
  1203. Magma: _LinalgBackend
  1204. class BatchNormBackend(Enum): ...
  1205. def _get_blas_preferred_backend() -> torch._C._BlasBackend: ...
  1206. def _set_blas_preferred_backend(arg: torch._C._BlasBackend): ...
  1207. class _BlasBackend:
  1208. Cublas: _BlasBackend
  1209. Cublaslt: _BlasBackend
  1210. class ConvBackend(Enum): ...
  1211. class Tag(Enum):
  1212. core: _int = 0
  1213. data_dependent_output: _int = 1
  1214. dynamic_output_shape: _int = 2
  1215. generated: _int = 3
  1216. inplace_view: _int = 4
  1217. needs_fixed_stride_order: _int = 5
  1218. nondeterministic_bitwise: _int = 6
  1219. nondeterministic_seeded: _int = 7
  1220. pointwise: _int = 8
  1221. pt2_compliant_tag: _int = 9
  1222. view_copy: _int = 10
  1223. # Defined in `valgrind.h` and `callgrind.h` respectively.
  1224. def _valgrind_supported_platform() -> _bool: ... # NVALGRIND
  1225. def _valgrind_toggle() -> None: ... # CALLGRIND_TOGGLE_COLLECT
  1226. def _valgrind_toggle_and_dump_stats() -> None: ... # CALLGRIND_TOGGLE_COLLECT and CALLGRIND_DUMP_STATS
  1227. has_openmp: _bool
  1228. has_mkl: _bool
  1229. _has_mps: _bool
  1230. has_lapack: _bool
  1231. _has_cuda: _bool
  1232. _has_magma: _bool
  1233. _has_xpu: _bool
  1234. _has_mkldnn: _bool
  1235. _has_cudnn: _bool
  1236. has_spectral: _bool
  1237. _GLIBCXX_USE_CXX11_ABI: _bool
  1238. default_generator: Generator
  1239. # Defined in torch/csrc/autograd/init.cpp
  1240. def _set_grad_enabled(enabled: _bool) -> None: ...
  1241. def is_grad_enabled() -> _bool: ...
  1242. def _set_fwd_grad_enabled(enabled: _bool) -> None: ...
  1243. def _is_fwd_grad_enabled() -> _bool: ...
  1244. def is_inference_mode_enabled() -> _bool: ...
  1245. @overload
  1246. def set_autocast_enabled(device_type: str, enabled: _bool) -> None: ...
  1247. @overload
  1248. def set_autocast_enabled(enabled: _bool) -> None: ...
  1249. @overload
  1250. def is_autocast_enabled(device_type: str) -> _bool: ...
  1251. @overload
  1252. def is_autocast_enabled() -> _bool: ...
  1253. def set_autocast_dtype(device_type: str, dtype: _dtype) -> None: ...
  1254. def get_autocast_dtype(device_type: str) -> _dtype: ...
  1255. def clear_autocast_cache() -> None: ...
  1256. def set_autocast_cpu_enabled(enabled: _bool) -> None: ...
  1257. def is_autocast_cpu_enabled() -> _bool: ...
  1258. def _is_any_autocast_enabled() -> _bool: ...
  1259. def _is_autocast_available(device_type: str) -> _bool: ...
  1260. def set_autocast_cpu_dtype(dtype: _dtype) -> None: ...
  1261. def set_autocast_gpu_dtype(dtype: _dtype) -> None: ...
  1262. def get_autocast_cpu_dtype() -> _dtype: ...
  1263. def get_autocast_gpu_dtype() -> _dtype: ...
  1264. def autocast_increment_nesting() -> _int: ...
  1265. def autocast_decrement_nesting() -> _int: ...
  1266. def is_autocast_cache_enabled() -> _bool: ...
  1267. def set_autocast_cache_enabled(enabled: _bool) -> None: ...
  1268. def _increment_version(tensor: Tensor) -> None: ...
  1269. def set_anomaly_enabled(enabled: _bool, check_nan: _bool = True) -> None: ...
  1270. def is_anomaly_enabled() -> _bool: ...
  1271. def is_anomaly_check_nan_enabled() -> _bool: ...
  1272. def _is_multithreading_enabled() -> _bool: ...
  1273. def _set_multithreading_enabled(enabled: _bool) -> None: ...
  1274. def _set_view_replay_enabled(enabled: _bool) -> None: ...
  1275. def _is_view_replay_enabled() -> _bool: ...
  1276. def _enter_dual_level() -> _int: ...
  1277. def _exit_dual_level(level: _int) -> None: ...
  1278. def _make_dual(tensor: Tensor, tangent: Tensor, level: _int) -> Tensor: ...
  1279. def _unpack_dual(tensor: Tensor, level: _int) -> Tensor: ...
  1280. def __set_forward_AD_enabled(enabled: _bool) -> None: ...
  1281. def __is_forward_AD_enabled() -> _bool: ...
  1282. def _register_default_hooks(pack_hook: Callable, unpack_hook: Callable) -> None: ...
  1283. def _reset_default_hooks() -> None: ...
  1284. def _is_torch_function_mode_enabled() -> _bool: ...
  1285. def _set_torch_function_mode(cls: Any) -> None: ...
  1286. def _push_on_torch_function_stack(cls: Any) -> None: ...
  1287. def _pop_torch_function_stack() -> Any: ...
  1288. def _get_function_stack_at(idx: _int) -> Any: ...
  1289. def _len_torch_function_stack() -> _int: ...
  1290. def _set_torch_dispatch_mode(cls: Any) -> None: ...
  1291. def _push_on_torch_dispatch_stack(cls: TorchDispatchMode) -> None: ...
  1292. def _pop_torch_dispatch_stack(mode_key: Optional[torch._C._TorchDispatchModeKey] = None) -> Any: ...
  1293. def _get_dispatch_mode(mode_key: Optional[torch._C._TorchDispatchModeKey]) -> Any: ...
  1294. def _unset_dispatch_mode(mode: torch._C._TorchDispatchModeKey) -> Optional[TorchDispatchMode]: ...
  1295. def _set_dispatch_mode(mode: TorchDispatchMode) -> None: ...
  1296. def _get_dispatch_stack_at(idx: _int) -> Any: ...
  1297. def _len_torch_dispatch_stack() -> _int: ...
  1298. def _activate_gpu_trace() -> None: ...
  1299. class _DisableTorchDispatch:
  1300. def __init__(self): ...
  1301. def __enter__(self): ...
  1302. def __exit__(self, exc_type, exc_value, traceback): ...
  1303. class _EnableTorchFunction:
  1304. def __init__(self): ...
  1305. def __enter__(self): ...
  1306. def __exit__(self, exc_type, exc_value, traceback): ...
  1307. class _EnablePythonDispatcher:
  1308. def __init__(self): ...
  1309. def __enter__(self): ...
  1310. def __exit__(self, exc_type, exc_value, traceback): ...
  1311. class _DisablePythonDispatcher:
  1312. def __init__(self): ...
  1313. def __enter__(self): ...
  1314. def __exit__(self, exc_type, exc_value, traceback): ...
  1315. class _EnablePreDispatch:
  1316. def __init__(self): ...
  1317. def __enter__(self): ...
  1318. def __exit__(self, exc_type, exc_value, traceback): ...
  1319. class _DisableFuncTorch:
  1320. def __init__(self): ...
  1321. def __enter__(self): ...
  1322. def __exit__(self, exc_type, exc_value, traceback): ...
  1323. class _DisableAutocast:
  1324. def __init__(self): ...
  1325. def __enter__(self): ...
  1326. def __exit__(self, exc_type, exc_value, traceback): ...
  1327. class _InferenceMode:
  1328. def __init__(self, enabled: _bool): ...
  1329. def __enter__(self): ...
  1330. def __exit__(self, exc_type, exc_value, traceback): ...
  1331. def _set_autograd_fallback_mode(mode: str) -> None: ...
  1332. def _get_autograd_fallback_mode() -> str: ...
  1333. # Defined in torch/csrc/jit/python/script_init.cpp
  1334. class LoggerBase: ...
  1335. class NoopLogger(LoggerBase): ...
  1336. class LockingLogger(LoggerBase): ...
  1337. class AggregationType(Enum):
  1338. SUM = 0
  1339. AVG = 1
  1340. class FileCheck:
  1341. def run(self, test_string: str) -> None: ...
  1342. def check(self, test_string: str) -> FileCheck: ...
  1343. def check_not(self, test_string: str) -> FileCheck: ...
  1344. def check_same(self, test_string: str) -> FileCheck: ...
  1345. def check_next(self, test_string: str) -> FileCheck: ...
  1346. def check_count(
  1347. self,
  1348. test_string: str,
  1349. count: _int,
  1350. exactly: _bool = False,
  1351. ) -> FileCheck: ...
  1352. def check_dag(self, test_string: str) -> FileCheck: ...
  1353. def check_source_highlighted(self, test_string: str) -> FileCheck: ...
  1354. def check_regex(self, test_string: str) -> FileCheck: ...
  1355. # Defined in torch/csrc/jit/python/init.cpp
  1356. class PyTorchFileReader:
  1357. @overload
  1358. def __init__(self, name: str) -> None: ...
  1359. @overload
  1360. def __init__(self, buffer: BinaryIO) -> None: ...
  1361. def get_record(self, name: str) -> bytes: ...
  1362. def serialization_id(self) -> str: ...
  1363. class PyTorchFileWriter:
  1364. @overload
  1365. def __init__(self, name: str) -> None: ...
  1366. @overload
  1367. def __init__(self, buffer: BinaryIO) -> None: ...
  1368. def write_record(self, name: str, data: Union[Storage, bytes, _int], size: _int) -> None: ...
  1369. def write_end_of_file(self) -> None: ...
  1370. def set_min_version(self, version: _int) -> None: ...
  1371. def get_all_written_records(self) -> List[str]: ...
  1372. def archive_name(self) -> str: ...
  1373. def serialization_id(self) -> str: ...
  1374. def _jit_get_inline_everything_mode() -> _bool: ...
  1375. def _jit_set_inline_everything_mode(enabled: _bool) -> None: ...
  1376. def _jit_get_logging_option() -> str: ...
  1377. def _jit_set_logging_option(option: str) -> None: ...
  1378. def _jit_set_logging_stream(stream_name: str) -> None: ...
  1379. def _jit_pass_cse(Graph) -> _bool: ...
  1380. def _jit_pass_dce(Graph) -> None: ...
  1381. def _jit_pass_lint(Graph) -> None: ...
  1382. # Defined in torch/csrc/jit/python/python_custom_class.cpp
  1383. def _get_custom_class_python_wrapper(name: str, attr: str) -> Any: ...
  1384. # Defined in torch/csrc/Module.cpp
  1385. def _rename_privateuse1_backend(backend: str) -> None: ...
  1386. def _get_privateuse1_backend_name() -> str: ...
  1387. # Defined in torch/csrc/Generator.cpp
  1388. class Generator:
  1389. device: _device
  1390. def __init__(self, device: Optional[DeviceLikeType] = None) -> None: ...
  1391. def __reduce__(self) -> Tuple[Type[Generator], Tuple[_device], Tuple[_int, Optional[_int], Tensor]]: ...
  1392. def __setstate__(self, state: Tuple[_int, Optional[_int], Tensor]) -> None: ...
  1393. def get_state(self) -> Tensor: ...
  1394. def set_state(self, _new_state: Tensor) -> Generator: ...
  1395. def clone_state(self) -> Generator: ...
  1396. def graphsafe_get_state(self) -> Generator: ...
  1397. def graphsafe_set_state(self, _new_state: Generator) -> Generator: ...
  1398. def set_offset(self, offset: _int) -> Generator: ...
  1399. def get_offset(self) -> _int: ...
  1400. def manual_seed(self, seed: _int) -> Generator: ...
  1401. def seed(self) -> _int: ...
  1402. def initial_seed(self) -> _int: ...
  1403. # Defined in torch/csrc/utils/python_dispatch.cpp
  1404. class _DispatchOperatorHandle:
  1405. def schema(self) -> FunctionSchema: ...
  1406. def debug(self) -> str: ...
  1407. class _DispatchModule:
  1408. def def_(self, schema: str, alias: str = "") -> _DispatchModule: ...
  1409. def def_legacy(self, schema: str) -> _DispatchModule: ...
  1410. def def_name_t_t(
  1411. self,
  1412. name: str,
  1413. dispatch: str,
  1414. debug: str = "default_def_name_t_t",
  1415. ) -> _DispatchModule: ...
  1416. def def_schema_t_t(
  1417. self,
  1418. schema: str,
  1419. dispatch: str,
  1420. alias: str,
  1421. debug: str = "default_def_schema_t_t",
  1422. ) -> _DispatchModule: ...
  1423. def impl_t_t(
  1424. self,
  1425. name: str,
  1426. dispatch: str,
  1427. debug: str = "impl_t_t",
  1428. ) -> _DispatchModule: ...
  1429. def impl(self, name: str, dispatch: str, func: Callable) -> _DispatchModule: ...
  1430. def define(self, schema: str, alias: str = "") -> _DispatchModule: ...
  1431. def fallback_fallthrough(self, dispatch: str = "") -> _DispatchModule: ...
  1432. _after_ADInplaceOrView_keyset: DispatchKeySet
  1433. _after_autograd_keyset: DispatchKeySet
  1434. def _dispatch_library(
  1435. kind: str,
  1436. name: str,
  1437. dispatch: str,
  1438. file: str = "",
  1439. linenum: Any = 0,
  1440. ) -> _DispatchModule: ...
  1441. def _dispatch_dump(name: str) -> str: ...
  1442. def _dispatch_dump_table(name: str) -> str: ...
  1443. def _dispatch_check_invariants(name: str) -> None: ...
  1444. def _dispatch_check_all_invariants() -> None: ...
  1445. def _dispatch_call_boxed(handle: _DispatchOperatorHandle, *args, **kwargs) -> Any: ...
  1446. def _dispatch_find_schema_or_throw(name: str, overload_name: str) -> _DispatchOperatorHandle: ...
  1447. def _dispatch_set_report_error_callback(handle: _DispatchOperatorHandle, callback: Callable) -> None: ...
  1448. def _dispatch_has_kernel(name: str) -> _bool: ...
  1449. def _dispatch_has_kernel_for_dispatch_key(
  1450. name: str,
  1451. dispatch: _dispatchkey,
  1452. ) -> _bool: ...
  1453. def _dispatch_has_kernel_for_any_dispatch_key(
  1454. name: str,
  1455. dispatch_key_set: DispatchKeySet,
  1456. ) -> _bool: ...
  1457. def _dispatch_kernel_for_dispatch_key_is_fallthrough(
  1458. name: str,
  1459. dispatch: _dispatchkey,
  1460. ) -> _bool: ...
  1461. def _dispatch_has_computed_kernel_for_dispatch_key(
  1462. name: str,
  1463. dispatch: _dispatchkey,
  1464. ) -> _bool: ...
  1465. def _dispatch_find_dangling_impls() -> List[str]: ...
  1466. def _dispatch_get_all_op_names() -> List[str]: ...
  1467. def _dispatch_tls_set_dispatch_key_excluded(
  1468. dispatch: _dispatchkey,
  1469. val: _bool,
  1470. ) -> None: ...
  1471. def _dispatch_tls_is_dispatch_key_excluded(dispatch: _dispatchkey) -> _bool: ...
  1472. def _dispatch_tls_set_dispatch_key_included(
  1473. dispatch: _dispatchkey,
  1474. val: _bool,
  1475. ) -> None: ...
  1476. def _dispatch_tls_is_dispatch_key_included(dispatch: _dispatchkey) -> _bool: ...
  1477. def _dispatch_isTensorSubclassLike(tensor: Tensor) -> _bool: ...
  1478. def _dispatch_key_name(dispatch: _dispatchkey) -> str: ...
  1479. def _dispatch_key_for_device(device_type: str) -> str: ...
  1480. def _parse_dispatch_key(key: str) -> Optional[DispatchKey]: ...
  1481. def _dispatch_key_parse(dispatch: _dispatchkey) -> DispatchKey: ...
  1482. def _dispatch_num_backends() -> _int: ...
  1483. def _dispatch_pystub(name: str, overload: str) -> Optional[Tuple[str, str]]: ...
  1484. def _dispatch_is_alias_key(dispatch: _dispatchkey) -> _bool: ...
  1485. def _functionality_to_backend_keys(dispatch: _dispatchkey) -> List[DispatchKey]: ...
  1486. def _functionalization_reapply_views_tls() -> _bool: ...
  1487. def _only_lift_cpu_tensors() -> _bool: ...
  1488. def _set_only_lift_cpu_tensors(value: _bool) -> None: ...
  1489. def _set_throw_on_mutable_data_ptr(tensor: Tensor) -> None: ...
  1490. def _set_warn_deprecated_on_mutable_data_ptr(tensor: Tensor) -> None: ...
  1491. class DispatchKey(Enum):
  1492. Undefined: DispatchKey = ...
  1493. FPGA: DispatchKey = ...
  1494. MAIA: DispatchKey = ...
  1495. Vulkan: DispatchKey = ...
  1496. Metal: DispatchKey = ...
  1497. MKLDNN: DispatchKey = ...
  1498. OpenGL: DispatchKey = ...
  1499. OpenCL: DispatchKey = ...
  1500. IDEEP: DispatchKey = ...
  1501. CustomRNGKeyId: DispatchKey = ...
  1502. MkldnnCPU: DispatchKey = ...
  1503. Sparse: DispatchKey = ...
  1504. SparseCsr: DispatchKey = ...
  1505. NestedTensor: DispatchKey = ...
  1506. Dense: DispatchKey = ...
  1507. PythonTLSSnapshot: DispatchKey = ...
  1508. PreDispatch: DispatchKey = ...
  1509. PythonDispatcher: DispatchKey = ...
  1510. Python: DispatchKey = ...
  1511. FuncTorchDynamicLayerBackMode: DispatchKey = ...
  1512. ZeroTensor: DispatchKey = ...
  1513. Conjugate: DispatchKey = ...
  1514. Negative: DispatchKey = ...
  1515. BackendSelect: DispatchKey = ...
  1516. Named: DispatchKey = ...
  1517. AutogradOther: DispatchKey = ...
  1518. AutogradFunctionality: DispatchKey = ...
  1519. AutogradNestedTensor: DispatchKey = ...
  1520. Tracer: DispatchKey = ...
  1521. Autocast: DispatchKey = ...
  1522. AutocastCPU: DispatchKey = ...
  1523. AutocastCUDA: DispatchKey = ...
  1524. Batched: DispatchKey = ...
  1525. VmapMode: DispatchKey = ...
  1526. FuncTorchGradWrapper: DispatchKey = ...
  1527. FuncTorchBatched: DispatchKey = ...
  1528. BatchedNestedTensor: DispatchKey = ...
  1529. FuncTorchVmapMode: DispatchKey = ...
  1530. FuncTorchDynamicLayerFrontMode: DispatchKey = ...
  1531. Functionalize: DispatchKey = ...
  1532. TESTING_ONLY_GenericWrapper: DispatchKey = ...
  1533. TESTING_ONLY_GenericMode: DispatchKey = ...
  1534. ADInplaceOrView: DispatchKey = ...
  1535. Autograd: DispatchKey = ...
  1536. CompositeImplicitAutograd: DispatchKey = ...
  1537. CompositeImplicitAutogradNestedTensor: DispatchKey = ...
  1538. CompositeExplicitAutograd: DispatchKey = ...
  1539. CompositeExplicitAutogradNonFunctional: DispatchKey = ...
  1540. FuncTorchBatchedDecomposition: DispatchKey = ...
  1541. CPU: DispatchKey = ...
  1542. CUDA: DispatchKey = ...
  1543. HIP: DispatchKey = ...
  1544. XLA: DispatchKey = ...
  1545. MTIA: DispatchKey = ...
  1546. MPS: DispatchKey = ...
  1547. IPU: DispatchKey = ...
  1548. XPU: DispatchKey = ...
  1549. HPU: DispatchKey = ...
  1550. VE: DispatchKey = ...
  1551. Lazy: DispatchKey = ...
  1552. Meta: DispatchKey = ...
  1553. PrivateUse1: DispatchKey = ...
  1554. PrivateUse2: DispatchKey = ...
  1555. PrivateUse3: DispatchKey = ...
  1556. QuantizedCPU: DispatchKey = ...
  1557. QuantizedCUDA: DispatchKey = ...
  1558. QuantizedHIP: DispatchKey = ...
  1559. QuantizedXLA: DispatchKey = ...
  1560. QuantizedMTIA: DispatchKey = ...
  1561. QuantizedMPS: DispatchKey = ...
  1562. QuantizedIPU: DispatchKey = ...
  1563. QuantizedXPU: DispatchKey = ...
  1564. QuantizedHPU: DispatchKey = ...
  1565. QuantizedVE: DispatchKey = ...
  1566. QuantizedLazy: DispatchKey = ...
  1567. QuantizedMeta: DispatchKey = ...
  1568. QuantizedPrivateUse1: DispatchKey = ...
  1569. QuantizedPrivateUse2: DispatchKey = ...
  1570. QuantizedPrivateUse3: DispatchKey = ...
  1571. SparseCPU: DispatchKey = ...
  1572. SparseCUDA: DispatchKey = ...
  1573. SparseHIP: DispatchKey = ...
  1574. SparseXLA: DispatchKey = ...
  1575. SparseMTIA: DispatchKey = ...
  1576. SparseMPS: DispatchKey = ...
  1577. SparseIPU: DispatchKey = ...
  1578. SparseXPU: DispatchKey = ...
  1579. SparseHPU: DispatchKey = ...
  1580. SparseVE: DispatchKey = ...
  1581. SparseLazy: DispatchKey = ...
  1582. SparseMeta: DispatchKey = ...
  1583. SparsePrivateUse1: DispatchKey = ...
  1584. SparsePrivateUse2: DispatchKey = ...
  1585. SparsePrivateUse3: DispatchKey = ...
  1586. SparseCsrCPU: DispatchKey = ...
  1587. SparseCsrCUDA: DispatchKey = ...
  1588. SparseCsrHIP: DispatchKey = ...
  1589. SparseCsrXLA: DispatchKey = ...
  1590. SparseCsrMTIA: DispatchKey = ...
  1591. SparseCsrMPS: DispatchKey = ...
  1592. SparseCsrIPU: DispatchKey = ...
  1593. SparseCsrXPU: DispatchKey = ...
  1594. SparseCsrHPU: DispatchKey = ...
  1595. SparseCsrVE: DispatchKey = ...
  1596. SparseCsrLazy: DispatchKey = ...
  1597. SparseCsrMeta: DispatchKey = ...
  1598. SparseCsrPrivateUse1: DispatchKey = ...
  1599. SparseCsrPrivateUse2: DispatchKey = ...
  1600. SparseCsrPrivateUse3: DispatchKey = ...
  1601. NestedTensorCPU: DispatchKey = ...
  1602. NestedTensorCUDA: DispatchKey = ...
  1603. NestedTensorHIP: DispatchKey = ...
  1604. NestedTensorXLA: DispatchKey = ...
  1605. NestedTensorMTIA: DispatchKey = ...
  1606. NestedTensorMPS: DispatchKey = ...
  1607. NestedTensorIPU: DispatchKey = ...
  1608. NestedTensorXPU: DispatchKey = ...
  1609. NestedTensorHPU: DispatchKey = ...
  1610. NestedTensorVE: DispatchKey = ...
  1611. NestedTensorLazy: DispatchKey = ...
  1612. NestedTensorMeta: DispatchKey = ...
  1613. NestedTensorPrivateUse1: DispatchKey = ...
  1614. NestedTensorPrivateUse2: DispatchKey = ...
  1615. NestedTensorPrivateUse3: DispatchKey = ...
  1616. AutogradCPU: DispatchKey = ...
  1617. AutogradCUDA: DispatchKey = ...
  1618. AutogradHIP: DispatchKey = ...
  1619. AutogradXLA: DispatchKey = ...
  1620. AutogradMTIA: DispatchKey = ...
  1621. AutogradMPS: DispatchKey = ...
  1622. AutogradIPU: DispatchKey = ...
  1623. AutogradXPU: DispatchKey = ...
  1624. AutogradHPU: DispatchKey = ...
  1625. AutogradVE: DispatchKey = ...
  1626. AutogradLazy: DispatchKey = ...
  1627. AutogradMeta: DispatchKey = ...
  1628. AutogradPrivateUse1: DispatchKey = ...
  1629. AutogradPrivateUse2: DispatchKey = ...
  1630. AutogradPrivateUse3: DispatchKey = ...
  1631. class DispatchKeySet:
  1632. def __init__(self, key: DispatchKey) -> None: ...
  1633. def __or__(self, other: DispatchKeySet) -> DispatchKeySet: ...
  1634. def __sub__(self, other: DispatchKeySet) -> DispatchKeySet: ...
  1635. def __and__(self, other: DispatchKeySet) -> DispatchKeySet: ...
  1636. def highestPriorityTypeId(self) -> DispatchKey: ...
  1637. def has(self, k: _dispatchkey) -> _bool: ...
  1638. def add(self, k: _dispatchkey) -> DispatchKeySet: ...
  1639. def remove(self, k: _dispatchkey) -> DispatchKeySet: ...
  1640. def __repr__(self) -> str: ...
  1641. _dispatch_autogradother_backends: DispatchKeySet
  1642. _additional_keys_to_prop_for_wrapper_tensors: DispatchKeySet
  1643. def _dispatch_has_backend_fallback(dispatch: _dispatchkey) -> _bool: ...
  1644. def _dispatch_keyset_full_after(t: _dispatchkey) -> DispatchKeySet: ...
  1645. def _dispatch_keyset_full() -> DispatchKeySet: ...
  1646. def _dispatch_keyset_to_string(keyset: DispatchKeySet) -> str: ...
  1647. def _dispatch_get_backend_keyset_from_autograd(
  1648. dispatch: _dispatchkey,
  1649. ) -> DispatchKeySet: ...
  1650. def _dispatch_keys(tensor: Tensor) -> DispatchKeySet: ...
  1651. def _dispatch_tls_local_exclude_set() -> DispatchKeySet: ...
  1652. def _dispatch_tls_local_include_set() -> DispatchKeySet: ...
  1653. def _dispatch_is_included_in_alias(
  1654. dispatch_a: _dispatchkey,
  1655. dispatch_b: _dispatchkey,
  1656. ) -> _bool: ...
  1657. def _propagate_xla_data(a: Tensor, b: Tensor) -> None: ...
  1658. def _replace_(a: Tensor, b: Tensor) -> None: ...
  1659. def _commit_update(a: Tensor) -> None: ...
  1660. class _ExcludeDispatchKeyGuard:
  1661. def __init__(self, keyset: DispatchKeySet): ...
  1662. def __enter__(self): ...
  1663. def __exit__(self, exc_type, exc_value, traceback): ...
  1664. class _IncludeDispatchKeyGuard:
  1665. def __init__(self, k: DispatchKey): ...
  1666. def __enter__(self): ...
  1667. def __exit__(self, exc_type, exc_value, traceback): ...
  1668. class _ForceDispatchKeyGuard:
  1669. def __init__(self, include: DispatchKeySet, exclude: DispatchKeySet): ...
  1670. def __enter__(self): ...
  1671. def __exit__(self, exc_type, exc_value, traceback): ...
  1672. class _PreserveDispatchKeyGuard:
  1673. def __init__(self): ...
  1674. def __enter__(self): ...
  1675. def __exit__(self, exc_type, exc_value, traceback): ...
  1676. class _AutoDispatchBelowAutograd:
  1677. def __init__(self): ...
  1678. def __enter__(self): ...
  1679. def __exit__(self, exc_type, exc_value, traceback): ...
  1680. class _AutoDispatchBelowADInplaceOrView:
  1681. def __init__(self): ...
  1682. def __enter__(self): ...
  1683. def __exit__(self, exc_type, exc_value, traceback): ...
  1684. def _dispatch_print_registrations_for_dispatch_key(dispatch_key: str = "") -> None: ...
  1685. def _dispatch_get_registrations_for_dispatch_key(
  1686. dispatch_key: str = "",
  1687. ) -> List[str]: ...
  1688. def _are_functorch_transforms_active() -> _bool: ...
  1689. # Define in torch/csrc/autograd/init.cpp
  1690. def _set_python_dispatcher(dispatcher: object) -> None: ...
  1691. def _get_nested_int(id: _int, coeff: _int) -> SymInt: ...
  1692. def _get_constant_bool_symnode(val: _bool) -> Any: ...
  1693. class _TorchDispatchModeKey(Enum):
  1694. FAKE: _TorchDispatchModeKey = ...
  1695. PROXY: _TorchDispatchModeKey = ...
  1696. FUNCTIONAL: _TorchDispatchModeKey = ...
  1697. class _SetExcludeDispatchKeyGuard:
  1698. def __init__(self, k: DispatchKey, enabled: _bool): ...
  1699. def __enter__(self): ...
  1700. def __exit__(self, exc_type, exc_value, traceback): ...
  1701. # Defined in torch/csrc/utils/init.cpp
  1702. class BenchmarkConfig:
  1703. num_calling_threads: _int
  1704. num_worker_threads: _int
  1705. num_warmup_iters: _int
  1706. num_iters: _int
  1707. profiler_output_path: str
  1708. class BenchmarkExecutionStats:
  1709. latency_avg_ms: _float
  1710. num_iters: _int
  1711. class ThroughputBenchmark:
  1712. def __init__(self, module: Any) -> None: ...
  1713. def add_input(self, *args: Any, **kwargs: Any) -> None: ...
  1714. def run_once(self, *args: Any, **kwargs: Any) -> Any: ...
  1715. def benchmark(self, config: BenchmarkConfig) -> BenchmarkExecutionStats: ...
  1716. # Defined in torch/csrc/Storage.cpp
  1717. class StorageBase(object): ...
  1718. # TODO: where
  1719. class DoubleTensor(Tensor): ...
  1720. class FloatTensor(Tensor): ...
  1721. class BFloat16Tensor(Tensor): ...
  1722. class LongTensor(Tensor): ...
  1723. class IntTensor(Tensor): ...
  1724. class ShortTensor(Tensor): ...
  1725. class HalfTensor(Tensor): ...
  1726. class CharTensor(Tensor): ...
  1727. class ByteTensor(Tensor): ...
  1728. class BoolTensor(Tensor): ...
  1729. # Defined in torch/csrc/autograd/python_engine.cpp
  1730. class _ImperativeEngine:
  1731. def queue_callback(self, callback: Callable[[], None]) -> None: ...
  1732. def run_backward(self, *args: Any, **kwargs: Any) -> Tuple[Tensor, ...]: ...
  1733. def is_checkpoint_valid(self) -> _bool: ...
  1734. # Defined in torch/csrc/autograd/python_variable.cpp
  1735. class _TensorMeta(type): ...
  1736. # Defined in torch/csrc/autograd/python_variable.cpp
  1737. class TensorBase(metaclass=_TensorMeta):
  1738. requires_grad: _bool
  1739. retains_grad: _bool
  1740. shape: Size
  1741. data: Tensor
  1742. names: List[str]
  1743. device: _device
  1744. dtype: _dtype
  1745. layout: _layout
  1746. real: Tensor
  1747. imag: Tensor
  1748. T: Tensor
  1749. H: Tensor
  1750. mT: Tensor
  1751. mH: Tensor
  1752. ndim: _int
  1753. output_nr: _int
  1754. _version: _int
  1755. _base: Optional[Tensor]
  1756. _cdata: _int
  1757. grad_fn: Optional[_Node]
  1758. _grad_fn: Any
  1759. _grad: Optional[Tensor]
  1760. grad: Optional[Tensor]
  1761. _backward_hooks: Optional[Dict[_int, Callable[[Tensor], Optional[Tensor]]]]
  1762. nbytes: _int
  1763. itemsize: _int
  1764. _has_symbolic_sizes_strides: _bool
  1765. def _view_func_unsafe(
  1766. self,
  1767. new_base: Tensor,
  1768. symint_visitor_fn: Optional[Callable[[_int], _int]] = None,
  1769. tensor_visitor_fn: Optional[Callable[[Tensor], Tensor]] = None
  1770. ):
  1771. ...
  1772. def __abs__(self) -> Tensor: ...
  1773. def __add__(self, other: Any) -> Tensor: ...
  1774. @overload
  1775. def __and__(self, other: Tensor) -> Tensor: ...
  1776. @overload
  1777. def __and__(self, other: Union[Number, _complex]) -> Tensor: ...
  1778. @overload
  1779. def __and__(self, other: Any) -> Tensor: ...
  1780. def __bool__(self) -> builtins.bool: ...
  1781. def __complex__(self) -> builtins.complex: ...
  1782. def __div__(self, other: Any) -> Tensor: ...
  1783. def __eq__(self, other: Any) -> Tensor: ... # type: ignore[override]
  1784. def __float__(self) -> builtins.float: ...
  1785. def __floordiv__(self, other: Any) -> Tensor: ...
  1786. def __ge__(self, other: Any) -> Tensor: ...
  1787. def __getitem__(self, indices: Union[Union[SupportsIndex, Union[None, _bool, _int, slice, ellipsis, Tensor], _NestedSequence[Union[None, _bool, _int, slice, ellipsis, Tensor]]], tuple[Union[SupportsIndex, Union[None, _bool, _int, slice, ellipsis, Tensor], _NestedSequence[Union[None, _bool, _int, slice, ellipsis, Tensor]]], ...]]) -> Tensor: ...
  1788. def __gt__(self, other: Any) -> Tensor: ...
  1789. def __iadd__(self, other: Any) -> Tensor: ...
  1790. @overload
  1791. def __iand__(self, other: Tensor) -> Tensor: ...
  1792. @overload
  1793. def __iand__(self, other: Union[Number, _complex]) -> Tensor: ...
  1794. @overload
  1795. def __iand__(self, other: Any) -> Tensor: ...
  1796. def __idiv__(self, other: Any) -> Tensor: ...
  1797. def __ifloordiv__(self, other: Any) -> Tensor: ...
  1798. @overload
  1799. def __ilshift__(self, other: Tensor) -> Tensor: ...
  1800. @overload
  1801. def __ilshift__(self, other: Union[Number, _complex]) -> Tensor: ...
  1802. @overload
  1803. def __ilshift__(self, other: Any) -> Tensor: ...
  1804. def __imod__(self, other: Any) -> Tensor: ...
  1805. def __imul__(self, other: Any) -> Tensor: ...
  1806. def __index__(self) -> builtins.int: ...
  1807. @overload
  1808. def __init__(self, *args: Any, device: Optional[DeviceLikeType] = None) -> None: ...
  1809. @overload
  1810. def __init__(self, storage: Storage) -> None: ...
  1811. @overload
  1812. def __init__(self, other: Tensor) -> None: ...
  1813. @overload
  1814. def __init__(self, size: _size, *, device: Optional[DeviceLikeType] = None) -> None: ...
  1815. def __int__(self) -> builtins.int: ...
  1816. def __invert__(self) -> Tensor: ...
  1817. @overload
  1818. def __ior__(self, other: Tensor) -> Tensor: ...
  1819. @overload
  1820. def __ior__(self, other: Union[Number, _complex]) -> Tensor: ...
  1821. @overload
  1822. def __ior__(self, other: Any) -> Tensor: ...
  1823. @overload
  1824. def __irshift__(self, other: Tensor) -> Tensor: ...
  1825. @overload
  1826. def __irshift__(self, other: Union[Number, _complex]) -> Tensor: ...
  1827. @overload
  1828. def __irshift__(self, other: Any) -> Tensor: ...
  1829. def __isub__(self, other: Any) -> Tensor: ...
  1830. @overload
  1831. def __ixor__(self, other: Tensor) -> Tensor: ...
  1832. @overload
  1833. def __ixor__(self, other: Union[Number, _complex]) -> Tensor: ...
  1834. @overload
  1835. def __ixor__(self, other: Any) -> Tensor: ...
  1836. def __le__(self, other: Any) -> Tensor: ...
  1837. def __long__(self) -> builtins.int: ...
  1838. @overload
  1839. def __lshift__(self, other: Tensor) -> Tensor: ...
  1840. @overload
  1841. def __lshift__(self, other: Union[Number, _complex]) -> Tensor: ...
  1842. @overload
  1843. def __lshift__(self, other: Any) -> Tensor: ...
  1844. def __lt__(self, other: Any) -> Tensor: ...
  1845. def __matmul__(self, other: Any) -> Tensor: ...
  1846. def __mod__(self, other: Any) -> Tensor: ...
  1847. def __mul__(self, other: Any) -> Tensor: ...
  1848. def __ne__(self, other: Any) -> Tensor: ... # type: ignore[override]
  1849. def __neg__(self) -> Tensor: ...
  1850. def __new__(cls, *args, **kwargs) -> Self: ...
  1851. def __nonzero__(self) -> builtins.bool: ...
  1852. @overload
  1853. def __or__(self, other: Tensor) -> Tensor: ...
  1854. @overload
  1855. def __or__(self, other: Union[Number, _complex]) -> Tensor: ...
  1856. @overload
  1857. def __or__(self, other: Any) -> Tensor: ...
  1858. def __pow__(self, other: Any) -> Tensor: ...
  1859. def __radd__(self, other: Any) -> Tensor: ...
  1860. def __rand__(self, other: Any) -> Tensor: ...
  1861. def __rfloordiv__(self, other: Any) -> Tensor: ...
  1862. def __rmul__(self, other: Any) -> Tensor: ...
  1863. def __ror__(self, other: Any) -> Tensor: ...
  1864. def __rpow__(self, other: Any) -> Tensor: ...
  1865. @overload
  1866. def __rshift__(self, other: Tensor) -> Tensor: ...
  1867. @overload
  1868. def __rshift__(self, other: Union[Number, _complex]) -> Tensor: ...
  1869. @overload
  1870. def __rshift__(self, other: Any) -> Tensor: ...
  1871. def __rsub__(self, other: Any) -> Tensor: ...
  1872. def __rtruediv__(self, other: Any) -> Tensor: ...
  1873. def __rxor__(self, other: Any) -> Tensor: ...
  1874. def __setitem__(self, indices: Union[Union[SupportsIndex, Union[None, _bool, _int, slice, ellipsis, Tensor], _NestedSequence[Union[None, _bool, _int, slice, ellipsis, Tensor]]], tuple[Union[SupportsIndex, Union[None, _bool, _int, slice, ellipsis, Tensor], _NestedSequence[Union[None, _bool, _int, slice, ellipsis, Tensor]]], ...]], val: Union[Tensor, Number]) -> None: ...
  1875. def __sub__(self, other: Any) -> Tensor: ...
  1876. def __truediv__(self, other: Any) -> Tensor: ...
  1877. @overload
  1878. def __xor__(self, other: Tensor) -> Tensor: ...
  1879. @overload
  1880. def __xor__(self, other: Union[Number, _complex]) -> Tensor: ...
  1881. @overload
  1882. def __xor__(self, other: Any) -> Tensor: ...
  1883. def _addmm_activation(self, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, use_gelu: _bool = False) -> Tensor: ...
  1884. def _autocast_to_full_precision(self, cuda_enabled: _bool, cpu_enabled: _bool) -> Tensor: ...
  1885. def _autocast_to_reduced_precision(self, cuda_enabled: _bool, cpu_enabled: _bool, cuda_dtype: _dtype, cpu_dtype: _dtype) -> Tensor: ...
  1886. def _coalesced_(self, coalesced: _bool) -> Tensor: ...
  1887. def _conj(self) -> Tensor: ...
  1888. def _conj_physical(self) -> Tensor: ...
  1889. def _dimI(self) -> _int: ...
  1890. def _dimV(self) -> _int: ...
  1891. def _indices(self) -> Tensor: ...
  1892. def _is_all_true(self) -> Tensor: ...
  1893. def _is_any_true(self) -> Tensor: ...
  1894. def _is_view(self) -> _bool: ...
  1895. def _is_zerotensor(self) -> _bool: ...
  1896. def _lazy_clone(self) -> Tensor: ...
  1897. @staticmethod
  1898. def _make_subclass(cls: Type[S], data: Tensor, require_grad: _bool = False, dispatch_strides: _bool = False, dispatch_device: _bool = False, device_for_backend_keys: Optional[_device] = None) -> S: ...
  1899. def _neg_view(self) -> Tensor: ...
  1900. def _nested_tensor_size(self) -> Tensor: ...
  1901. def _nested_tensor_storage_offsets(self) -> Tensor: ...
  1902. def _nested_tensor_strides(self) -> Tensor: ...
  1903. def _nnz(self) -> _int: ...
  1904. def _sparse_mask_projection(self, mask: Tensor, accumulate_matches: _bool = False) -> Tensor: ...
  1905. def _to_dense(self, dtype: Optional[_dtype] = None, masked_grad: Optional[_bool] = None) -> Tensor: ...
  1906. @overload
  1907. def _to_sparse(self, *, layout: Optional[_layout] = None, blocksize: Optional[Union[_int, _size]] = None, dense_dim: Optional[_int] = None) -> Tensor: ...
  1908. @overload
  1909. def _to_sparse(self, sparse_dim: _int) -> Tensor: ...
  1910. def _to_sparse_bsc(self, blocksize: Union[_int, _size], dense_dim: Optional[_int] = None) -> Tensor: ...
  1911. def _to_sparse_bsr(self, blocksize: Union[_int, _size], dense_dim: Optional[_int] = None) -> Tensor: ...
  1912. def _to_sparse_csc(self, dense_dim: Optional[_int] = None) -> Tensor: ...
  1913. def _to_sparse_csr(self, dense_dim: Optional[_int] = None) -> Tensor: ...
  1914. def _values(self) -> Tensor: ...
  1915. def abs(self) -> Tensor:
  1916. r"""
  1917. abs() -> Tensor
  1918. See :func:`torch.abs`
  1919. """
  1920. ...
  1921. def abs_(self) -> Tensor:
  1922. r"""
  1923. abs_() -> Tensor
  1924. In-place version of :meth:`~Tensor.abs`
  1925. """
  1926. ...
  1927. def absolute(self) -> Tensor:
  1928. r"""
  1929. absolute() -> Tensor
  1930. Alias for :func:`abs`
  1931. """
  1932. ...
  1933. def absolute_(self) -> Tensor:
  1934. r"""
  1935. absolute_() -> Tensor
  1936. In-place version of :meth:`~Tensor.absolute`
  1937. Alias for :func:`abs_`
  1938. """
  1939. ...
  1940. def acos(self) -> Tensor:
  1941. r"""
  1942. acos() -> Tensor
  1943. See :func:`torch.acos`
  1944. """
  1945. ...
  1946. def acos_(self) -> Tensor:
  1947. r"""
  1948. acos_() -> Tensor
  1949. In-place version of :meth:`~Tensor.acos`
  1950. """
  1951. ...
  1952. def acosh(self) -> Tensor:
  1953. r"""
  1954. acosh() -> Tensor
  1955. See :func:`torch.acosh`
  1956. """
  1957. ...
  1958. def acosh_(self) -> Tensor:
  1959. r"""
  1960. acosh_() -> Tensor
  1961. In-place version of :meth:`~Tensor.acosh`
  1962. """
  1963. ...
  1964. def add(self, other: Union[Tensor, Number, _complex, torch.SymInt, torch.SymFloat], *, alpha: Optional[Union[Number, _complex]] = 1, out: Optional[Tensor] = None) -> Tensor:
  1965. r"""
  1966. add(other, *, alpha=1) -> Tensor
  1967. Add a scalar or tensor to :attr:`self` tensor. If both :attr:`alpha`
  1968. and :attr:`other` are specified, each element of :attr:`other` is scaled by
  1969. :attr:`alpha` before being used.
  1970. When :attr:`other` is a tensor, the shape of :attr:`other` must be
  1971. :ref:`broadcastable <broadcasting-semantics>` with the shape of the underlying
  1972. tensor
  1973. See :func:`torch.add`
  1974. """
  1975. ...
  1976. def add_(self, other: Union[Tensor, Number, _complex, torch.SymInt, torch.SymFloat], *, alpha: Optional[Union[Number, _complex]] = 1) -> Tensor:
  1977. r"""
  1978. add_(other, *, alpha=1) -> Tensor
  1979. In-place version of :meth:`~Tensor.add`
  1980. """
  1981. ...
  1982. def addbmm(self, batch1: Tensor, batch2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor:
  1983. r"""
  1984. addbmm(batch1, batch2, *, beta=1, alpha=1) -> Tensor
  1985. See :func:`torch.addbmm`
  1986. """
  1987. ...
  1988. def addbmm_(self, batch1: Tensor, batch2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor:
  1989. r"""
  1990. addbmm_(batch1, batch2, *, beta=1, alpha=1) -> Tensor
  1991. In-place version of :meth:`~Tensor.addbmm`
  1992. """
  1993. ...
  1994. def addcdiv(self, tensor1: Tensor, tensor2: Tensor, *, value: Union[Number, _complex] = 1) -> Tensor:
  1995. r"""
  1996. addcdiv(tensor1, tensor2, *, value=1) -> Tensor
  1997. See :func:`torch.addcdiv`
  1998. """
  1999. ...
  2000. def addcdiv_(self, tensor1: Tensor, tensor2: Tensor, *, value: Union[Number, _complex] = 1) -> Tensor:
  2001. r"""
  2002. addcdiv_(tensor1, tensor2, *, value=1) -> Tensor
  2003. In-place version of :meth:`~Tensor.addcdiv`
  2004. """
  2005. ...
  2006. def addcmul(self, tensor1: Tensor, tensor2: Tensor, *, value: Union[Number, _complex] = 1) -> Tensor:
  2007. r"""
  2008. addcmul(tensor1, tensor2, *, value=1) -> Tensor
  2009. See :func:`torch.addcmul`
  2010. """
  2011. ...
  2012. def addcmul_(self, tensor1: Tensor, tensor2: Tensor, *, value: Union[Number, _complex] = 1) -> Tensor:
  2013. r"""
  2014. addcmul_(tensor1, tensor2, *, value=1) -> Tensor
  2015. In-place version of :meth:`~Tensor.addcmul`
  2016. """
  2017. ...
  2018. def addmm(self, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor:
  2019. r"""
  2020. addmm(mat1, mat2, *, beta=1, alpha=1) -> Tensor
  2021. See :func:`torch.addmm`
  2022. """
  2023. ...
  2024. def addmm_(self, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor:
  2025. r"""
  2026. addmm_(mat1, mat2, *, beta=1, alpha=1) -> Tensor
  2027. In-place version of :meth:`~Tensor.addmm`
  2028. """
  2029. ...
  2030. def addmv(self, mat: Tensor, vec: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor:
  2031. r"""
  2032. addmv(mat, vec, *, beta=1, alpha=1) -> Tensor
  2033. See :func:`torch.addmv`
  2034. """
  2035. ...
  2036. def addmv_(self, mat: Tensor, vec: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor:
  2037. r"""
  2038. addmv_(mat, vec, *, beta=1, alpha=1) -> Tensor
  2039. In-place version of :meth:`~Tensor.addmv`
  2040. """
  2041. ...
  2042. def addr(self, vec1: Tensor, vec2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor:
  2043. r"""
  2044. addr(vec1, vec2, *, beta=1, alpha=1) -> Tensor
  2045. See :func:`torch.addr`
  2046. """
  2047. ...
  2048. def addr_(self, vec1: Tensor, vec2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor:
  2049. r"""
  2050. addr_(vec1, vec2, *, beta=1, alpha=1) -> Tensor
  2051. In-place version of :meth:`~Tensor.addr`
  2052. """
  2053. ...
  2054. def adjoint(self) -> Tensor:
  2055. r"""
  2056. adjoint() -> Tensor
  2057. Alias for :func:`adjoint`
  2058. """
  2059. ...
  2060. def align_as(self, other: Tensor) -> Tensor:
  2061. r"""
  2062. align_as(other) -> Tensor
  2063. Permutes the dimensions of the :attr:`self` tensor to match the dimension order
  2064. in the :attr:`other` tensor, adding size-one dims for any new names.
  2065. This operation is useful for explicit broadcasting by names (see examples).
  2066. All of the dims of :attr:`self` must be named in order to use this method.
  2067. The resulting tensor is a view on the original tensor.
  2068. All dimension names of :attr:`self` must be present in ``other.names``.
  2069. :attr:`other` may contain named dimensions that are not in ``self.names``;
  2070. the output tensor has a size-one dimension for each of those new names.
  2071. To align a tensor to a specific order, use :meth:`~Tensor.align_to`.
  2072. Examples::
  2073. # Example 1: Applying a mask
  2074. >>> mask = torch.randint(2, [127, 128], dtype=torch.bool).refine_names('W', 'H')
  2075. >>> imgs = torch.randn(32, 128, 127, 3, names=('N', 'H', 'W', 'C'))
  2076. >>> imgs.masked_fill_(mask.align_as(imgs), 0)
  2077. # Example 2: Applying a per-channel-scale
  2078. >>> def scale_channels(input, scale):
  2079. >>> scale = scale.refine_names('C')
  2080. >>> return input * scale.align_as(input)
  2081. >>> num_channels = 3
  2082. >>> scale = torch.randn(num_channels, names=('C',))
  2083. >>> imgs = torch.rand(32, 128, 128, num_channels, names=('N', 'H', 'W', 'C'))
  2084. >>> more_imgs = torch.rand(32, num_channels, 128, 128, names=('N', 'C', 'H', 'W'))
  2085. >>> videos = torch.randn(3, num_channels, 128, 128, 128, names=('N', 'C', 'H', 'W', 'D'))
  2086. # scale_channels is agnostic to the dimension order of the input
  2087. >>> scale_channels(imgs, scale)
  2088. >>> scale_channels(more_imgs, scale)
  2089. >>> scale_channels(videos, scale)
  2090. .. warning::
  2091. The named tensor API is experimental and subject to change.
  2092. """
  2093. ...
  2094. @overload
  2095. def align_to(self, order: Sequence[Union[str, ellipsis, None]], ellipsis_idx: _int) -> Tensor: ...
  2096. @overload
  2097. def align_to(self, names: Sequence[Union[str, ellipsis, None]]) -> Tensor: ...
  2098. @overload
  2099. def all(self) -> Tensor:
  2100. r"""
  2101. all(dim=None, keepdim=False) -> Tensor
  2102. See :func:`torch.all`
  2103. """
  2104. ...
  2105. @overload
  2106. def all(self, dim: Optional[_size] = None, keepdim: _bool = False) -> Tensor:
  2107. r"""
  2108. all(dim=None, keepdim=False) -> Tensor
  2109. See :func:`torch.all`
  2110. """
  2111. ...
  2112. @overload
  2113. def all(self, dim: _int, keepdim: _bool = False) -> Tensor:
  2114. r"""
  2115. all(dim=None, keepdim=False) -> Tensor
  2116. See :func:`torch.all`
  2117. """
  2118. ...
  2119. @overload
  2120. def all(self, dim: Union[str, ellipsis, None], keepdim: _bool = False) -> Tensor:
  2121. r"""
  2122. all(dim=None, keepdim=False) -> Tensor
  2123. See :func:`torch.all`
  2124. """
  2125. ...
  2126. def allclose(self, other: Tensor, rtol: _float = 1e-05, atol: _float = 1e-08, equal_nan: _bool = False) -> _bool:
  2127. r"""
  2128. allclose(other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor
  2129. See :func:`torch.allclose`
  2130. """
  2131. ...
  2132. def amax(self, dim: Union[_int, _size] = (), keepdim: _bool = False) -> Tensor:
  2133. r"""
  2134. amax(dim=None, keepdim=False) -> Tensor
  2135. See :func:`torch.amax`
  2136. """
  2137. ...
  2138. def amin(self, dim: Union[_int, _size] = (), keepdim: _bool = False) -> Tensor:
  2139. r"""
  2140. amin(dim=None, keepdim=False) -> Tensor
  2141. See :func:`torch.amin`
  2142. """
  2143. ...
  2144. def aminmax(self, *, dim: Optional[_int] = None, keepdim: _bool = False) -> torch.return_types.aminmax:
  2145. r"""
  2146. aminmax(*, dim=None, keepdim=False) -> (Tensor min, Tensor max)
  2147. See :func:`torch.aminmax`
  2148. """
  2149. ...
  2150. def angle(self) -> Tensor:
  2151. r"""
  2152. angle() -> Tensor
  2153. See :func:`torch.angle`
  2154. """
  2155. ...
  2156. @overload
  2157. def any(self) -> Tensor:
  2158. r"""
  2159. any(dim=None, keepdim=False) -> Tensor
  2160. See :func:`torch.any`
  2161. """
  2162. ...
  2163. @overload
  2164. def any(self, dim: Optional[_size] = None, keepdim: _bool = False) -> Tensor:
  2165. r"""
  2166. any(dim=None, keepdim=False) -> Tensor
  2167. See :func:`torch.any`
  2168. """
  2169. ...
  2170. @overload
  2171. def any(self, dim: _int, keepdim: _bool = False) -> Tensor:
  2172. r"""
  2173. any(dim=None, keepdim=False) -> Tensor
  2174. See :func:`torch.any`
  2175. """
  2176. ...
  2177. @overload
  2178. def any(self, dim: Union[str, ellipsis, None], keepdim: _bool = False) -> Tensor:
  2179. r"""
  2180. any(dim=None, keepdim=False) -> Tensor
  2181. See :func:`torch.any`
  2182. """
  2183. ...
  2184. def apply_(self, callable: Callable) -> Tensor:
  2185. r"""
  2186. apply_(callable) -> Tensor
  2187. Applies the function :attr:`callable` to each element in the tensor, replacing
  2188. each element with the value returned by :attr:`callable`.
  2189. .. note::
  2190. This function only works with CPU tensors and should not be used in code
  2191. sections that require high performance.
  2192. """
  2193. ...
  2194. def arccos(self) -> Tensor:
  2195. r"""
  2196. arccos() -> Tensor
  2197. See :func:`torch.arccos`
  2198. """
  2199. ...
  2200. def arccos_(self) -> Tensor:
  2201. r"""
  2202. arccos_() -> Tensor
  2203. In-place version of :meth:`~Tensor.arccos`
  2204. """
  2205. ...
  2206. def arccosh(self) -> Tensor:
  2207. r"""
  2208. acosh() -> Tensor
  2209. See :func:`torch.arccosh`
  2210. """
  2211. ...
  2212. def arccosh_(self) -> Tensor:
  2213. r"""
  2214. acosh_() -> Tensor
  2215. In-place version of :meth:`~Tensor.arccosh`
  2216. """
  2217. ...
  2218. def arcsin(self) -> Tensor:
  2219. r"""
  2220. arcsin() -> Tensor
  2221. See :func:`torch.arcsin`
  2222. """
  2223. ...
  2224. def arcsin_(self) -> Tensor:
  2225. r"""
  2226. arcsin_() -> Tensor
  2227. In-place version of :meth:`~Tensor.arcsin`
  2228. """
  2229. ...
  2230. def arcsinh(self) -> Tensor:
  2231. r"""
  2232. arcsinh() -> Tensor
  2233. See :func:`torch.arcsinh`
  2234. """
  2235. ...
  2236. def arcsinh_(self) -> Tensor:
  2237. r"""
  2238. arcsinh_() -> Tensor
  2239. In-place version of :meth:`~Tensor.arcsinh`
  2240. """
  2241. ...
  2242. def arctan(self) -> Tensor:
  2243. r"""
  2244. arctan() -> Tensor
  2245. See :func:`torch.arctan`
  2246. """
  2247. ...
  2248. def arctan2(self, other: Tensor) -> Tensor:
  2249. r"""
  2250. arctan2(other) -> Tensor
  2251. See :func:`torch.arctan2`
  2252. """
  2253. ...
  2254. def arctan2_(self, other: Tensor) -> Tensor:
  2255. r"""
  2256. atan2_(other) -> Tensor
  2257. In-place version of :meth:`~Tensor.arctan2`
  2258. """
  2259. ...
  2260. def arctan_(self) -> Tensor:
  2261. r"""
  2262. arctan_() -> Tensor
  2263. In-place version of :meth:`~Tensor.arctan`
  2264. """
  2265. ...
  2266. def arctanh(self) -> Tensor:
  2267. r"""
  2268. arctanh() -> Tensor
  2269. See :func:`torch.arctanh`
  2270. """
  2271. ...
  2272. def arctanh_(self) -> Tensor:
  2273. r"""
  2274. arctanh_(other) -> Tensor
  2275. In-place version of :meth:`~Tensor.arctanh`
  2276. """
  2277. ...
  2278. def argmax(self, dim: Optional[_int] = None, keepdim: _bool = False) -> Tensor:
  2279. r"""
  2280. argmax(dim=None, keepdim=False) -> LongTensor
  2281. See :func:`torch.argmax`
  2282. """
  2283. ...
  2284. def argmin(self, dim: Optional[_int] = None, keepdim: _bool = False) -> Tensor:
  2285. r"""
  2286. argmin(dim=None, keepdim=False) -> LongTensor
  2287. See :func:`torch.argmin`
  2288. """
  2289. ...
  2290. @overload
  2291. def argsort(self, *, stable: _bool, dim: _int = -1, descending: _bool = False) -> Tensor:
  2292. r"""
  2293. argsort(dim=-1, descending=False) -> LongTensor
  2294. See :func:`torch.argsort`
  2295. """
  2296. ...
  2297. @overload
  2298. def argsort(self, dim: _int = -1, descending: _bool = False) -> Tensor:
  2299. r"""
  2300. argsort(dim=-1, descending=False) -> LongTensor
  2301. See :func:`torch.argsort`
  2302. """
  2303. ...
  2304. @overload
  2305. def argsort(self, dim: Union[str, ellipsis, None], descending: _bool = False) -> Tensor:
  2306. r"""
  2307. argsort(dim=-1, descending=False) -> LongTensor
  2308. See :func:`torch.argsort`
  2309. """
  2310. ...
  2311. def argwhere(self) -> Tensor:
  2312. r"""
  2313. argwhere() -> Tensor
  2314. See :func:`torch.argwhere`
  2315. """
  2316. ...
  2317. def as_strided(self, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None) -> Tensor:
  2318. r"""
  2319. as_strided(size, stride, storage_offset=None) -> Tensor
  2320. See :func:`torch.as_strided`
  2321. """
  2322. ...
  2323. def as_strided_(self, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None) -> Tensor:
  2324. r"""
  2325. as_strided_(size, stride, storage_offset=None) -> Tensor
  2326. In-place version of :meth:`~Tensor.as_strided`
  2327. """
  2328. ...
  2329. def as_strided_scatter(self, src: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None) -> Tensor:
  2330. r"""
  2331. as_strided_scatter(src, size, stride, storage_offset=None) -> Tensor
  2332. See :func:`torch.as_strided_scatter`
  2333. """
  2334. ...
  2335. def as_subclass(self, cls: Type[S]) -> S:
  2336. r"""
  2337. as_subclass(cls) -> Tensor
  2338. Makes a ``cls`` instance with the same data pointer as ``self``. Changes
  2339. in the output mirror changes in ``self``, and the output stays attached
  2340. to the autograd graph. ``cls`` must be a subclass of ``Tensor``.
  2341. """
  2342. ...
  2343. def asin(self) -> Tensor:
  2344. r"""
  2345. asin() -> Tensor
  2346. See :func:`torch.asin`
  2347. """
  2348. ...
  2349. def asin_(self) -> Tensor:
  2350. r"""
  2351. asin_() -> Tensor
  2352. In-place version of :meth:`~Tensor.asin`
  2353. """
  2354. ...
  2355. def asinh(self) -> Tensor:
  2356. r"""
  2357. asinh() -> Tensor
  2358. See :func:`torch.asinh`
  2359. """
  2360. ...
  2361. def asinh_(self) -> Tensor:
  2362. r"""
  2363. asinh_() -> Tensor
  2364. In-place version of :meth:`~Tensor.asinh`
  2365. """
  2366. ...
  2367. def atan(self) -> Tensor:
  2368. r"""
  2369. atan() -> Tensor
  2370. See :func:`torch.atan`
  2371. """
  2372. ...
  2373. def atan2(self, other: Tensor) -> Tensor:
  2374. r"""
  2375. atan2(other) -> Tensor
  2376. See :func:`torch.atan2`
  2377. """
  2378. ...
  2379. def atan2_(self, other: Tensor) -> Tensor:
  2380. r"""
  2381. atan2_(other) -> Tensor
  2382. In-place version of :meth:`~Tensor.atan2`
  2383. """
  2384. ...
  2385. def atan_(self) -> Tensor:
  2386. r"""
  2387. atan_() -> Tensor
  2388. In-place version of :meth:`~Tensor.atan`
  2389. """
  2390. ...
  2391. def atanh(self) -> Tensor:
  2392. r"""
  2393. atanh() -> Tensor
  2394. See :func:`torch.atanh`
  2395. """
  2396. ...
  2397. def atanh_(self) -> Tensor:
  2398. r"""
  2399. atanh_(other) -> Tensor
  2400. In-place version of :meth:`~Tensor.atanh`
  2401. """
  2402. ...
  2403. def baddbmm(self, batch1: Tensor, batch2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor:
  2404. r"""
  2405. baddbmm(batch1, batch2, *, beta=1, alpha=1) -> Tensor
  2406. See :func:`torch.baddbmm`
  2407. """
  2408. ...
  2409. def baddbmm_(self, batch1: Tensor, batch2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor:
  2410. r"""
  2411. baddbmm_(batch1, batch2, *, beta=1, alpha=1) -> Tensor
  2412. In-place version of :meth:`~Tensor.baddbmm`
  2413. """
  2414. ...
  2415. @overload
  2416. def bernoulli(self, *, generator: Optional[Generator] = None) -> Tensor:
  2417. r"""
  2418. bernoulli(*, generator=None) -> Tensor
  2419. Returns a result tensor where each :math:`\texttt{result[i]}` is independently
  2420. sampled from :math:`\text{Bernoulli}(\texttt{self[i]})`. :attr:`self` must have
  2421. floating point ``dtype``, and the result will have the same ``dtype``.
  2422. See :func:`torch.bernoulli`
  2423. """
  2424. ...
  2425. @overload
  2426. def bernoulli(self, p: _float, *, generator: Optional[Generator] = None) -> Tensor:
  2427. r"""
  2428. bernoulli(*, generator=None) -> Tensor
  2429. Returns a result tensor where each :math:`\texttt{result[i]}` is independently
  2430. sampled from :math:`\text{Bernoulli}(\texttt{self[i]})`. :attr:`self` must have
  2431. floating point ``dtype``, and the result will have the same ``dtype``.
  2432. See :func:`torch.bernoulli`
  2433. """
  2434. ...
  2435. @overload
  2436. def bernoulli_(self, p: Tensor, *, generator: Optional[Generator] = None) -> Tensor:
  2437. r"""
  2438. bernoulli_(p=0.5, *, generator=None) -> Tensor
  2439. Fills each location of :attr:`self` with an independent sample from
  2440. :math:`\text{Bernoulli}(\texttt{p})`. :attr:`self` can have integral
  2441. ``dtype``.
  2442. :attr:`p` should either be a scalar or tensor containing probabilities to be
  2443. used for drawing the binary random number.
  2444. If it is a tensor, the :math:`\text{i}^{th}` element of :attr:`self` tensor
  2445. will be set to a value sampled from
  2446. :math:`\text{Bernoulli}(\texttt{p\_tensor[i]})`. In this case `p` must have
  2447. floating point ``dtype``.
  2448. See also :meth:`~Tensor.bernoulli` and :func:`torch.bernoulli`
  2449. """
  2450. ...
  2451. @overload
  2452. def bernoulli_(self, p: _float = 0.5, *, generator: Optional[Generator] = None) -> Tensor:
  2453. r"""
  2454. bernoulli_(p=0.5, *, generator=None) -> Tensor
  2455. Fills each location of :attr:`self` with an independent sample from
  2456. :math:`\text{Bernoulli}(\texttt{p})`. :attr:`self` can have integral
  2457. ``dtype``.
  2458. :attr:`p` should either be a scalar or tensor containing probabilities to be
  2459. used for drawing the binary random number.
  2460. If it is a tensor, the :math:`\text{i}^{th}` element of :attr:`self` tensor
  2461. will be set to a value sampled from
  2462. :math:`\text{Bernoulli}(\texttt{p\_tensor[i]})`. In this case `p` must have
  2463. floating point ``dtype``.
  2464. See also :meth:`~Tensor.bernoulli` and :func:`torch.bernoulli`
  2465. """
  2466. ...
  2467. def bfloat16(self) -> Tensor:
  2468. r"""
  2469. bfloat16(memory_format=torch.preserve_format) -> Tensor
  2470. ``self.bfloat16()`` is equivalent to ``self.to(torch.bfloat16)``. See :func:`to`.
  2471. Args:
  2472. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  2473. returned Tensor. Default: ``torch.preserve_format``.
  2474. """
  2475. ...
  2476. def bincount(self, weights: Optional[Tensor] = None, minlength: _int = 0) -> Tensor:
  2477. r"""
  2478. bincount(weights=None, minlength=0) -> Tensor
  2479. See :func:`torch.bincount`
  2480. """
  2481. ...
  2482. @overload
  2483. def bitwise_and(self, other: Tensor) -> Tensor:
  2484. r"""
  2485. bitwise_and() -> Tensor
  2486. See :func:`torch.bitwise_and`
  2487. """
  2488. ...
  2489. @overload
  2490. def bitwise_and(self, other: Union[Number, _complex]) -> Tensor:
  2491. r"""
  2492. bitwise_and() -> Tensor
  2493. See :func:`torch.bitwise_and`
  2494. """
  2495. ...
  2496. @overload
  2497. def bitwise_and_(self, other: Tensor) -> Tensor:
  2498. r"""
  2499. bitwise_and_() -> Tensor
  2500. In-place version of :meth:`~Tensor.bitwise_and`
  2501. """
  2502. ...
  2503. @overload
  2504. def bitwise_and_(self, other: Union[Number, _complex]) -> Tensor:
  2505. r"""
  2506. bitwise_and_() -> Tensor
  2507. In-place version of :meth:`~Tensor.bitwise_and`
  2508. """
  2509. ...
  2510. @overload
  2511. def bitwise_left_shift(self, other: Tensor) -> Tensor:
  2512. r"""
  2513. bitwise_left_shift(other) -> Tensor
  2514. See :func:`torch.bitwise_left_shift`
  2515. """
  2516. ...
  2517. @overload
  2518. def bitwise_left_shift(self, other: Union[Number, _complex]) -> Tensor:
  2519. r"""
  2520. bitwise_left_shift(other) -> Tensor
  2521. See :func:`torch.bitwise_left_shift`
  2522. """
  2523. ...
  2524. @overload
  2525. def bitwise_left_shift_(self, other: Tensor) -> Tensor:
  2526. r"""
  2527. bitwise_left_shift_(other) -> Tensor
  2528. In-place version of :meth:`~Tensor.bitwise_left_shift`
  2529. """
  2530. ...
  2531. @overload
  2532. def bitwise_left_shift_(self, other: Union[Number, _complex]) -> Tensor:
  2533. r"""
  2534. bitwise_left_shift_(other) -> Tensor
  2535. In-place version of :meth:`~Tensor.bitwise_left_shift`
  2536. """
  2537. ...
  2538. def bitwise_not(self) -> Tensor:
  2539. r"""
  2540. bitwise_not() -> Tensor
  2541. See :func:`torch.bitwise_not`
  2542. """
  2543. ...
  2544. def bitwise_not_(self) -> Tensor:
  2545. r"""
  2546. bitwise_not_() -> Tensor
  2547. In-place version of :meth:`~Tensor.bitwise_not`
  2548. """
  2549. ...
  2550. @overload
  2551. def bitwise_or(self, other: Tensor) -> Tensor:
  2552. r"""
  2553. bitwise_or() -> Tensor
  2554. See :func:`torch.bitwise_or`
  2555. """
  2556. ...
  2557. @overload
  2558. def bitwise_or(self, other: Union[Number, _complex]) -> Tensor:
  2559. r"""
  2560. bitwise_or() -> Tensor
  2561. See :func:`torch.bitwise_or`
  2562. """
  2563. ...
  2564. @overload
  2565. def bitwise_or_(self, other: Tensor) -> Tensor:
  2566. r"""
  2567. bitwise_or_() -> Tensor
  2568. In-place version of :meth:`~Tensor.bitwise_or`
  2569. """
  2570. ...
  2571. @overload
  2572. def bitwise_or_(self, other: Union[Number, _complex]) -> Tensor:
  2573. r"""
  2574. bitwise_or_() -> Tensor
  2575. In-place version of :meth:`~Tensor.bitwise_or`
  2576. """
  2577. ...
  2578. @overload
  2579. def bitwise_right_shift(self, other: Tensor) -> Tensor:
  2580. r"""
  2581. bitwise_right_shift(other) -> Tensor
  2582. See :func:`torch.bitwise_right_shift`
  2583. """
  2584. ...
  2585. @overload
  2586. def bitwise_right_shift(self, other: Union[Number, _complex]) -> Tensor:
  2587. r"""
  2588. bitwise_right_shift(other) -> Tensor
  2589. See :func:`torch.bitwise_right_shift`
  2590. """
  2591. ...
  2592. @overload
  2593. def bitwise_right_shift_(self, other: Tensor) -> Tensor:
  2594. r"""
  2595. bitwise_right_shift_(other) -> Tensor
  2596. In-place version of :meth:`~Tensor.bitwise_right_shift`
  2597. """
  2598. ...
  2599. @overload
  2600. def bitwise_right_shift_(self, other: Union[Number, _complex]) -> Tensor:
  2601. r"""
  2602. bitwise_right_shift_(other) -> Tensor
  2603. In-place version of :meth:`~Tensor.bitwise_right_shift`
  2604. """
  2605. ...
  2606. @overload
  2607. def bitwise_xor(self, other: Tensor) -> Tensor:
  2608. r"""
  2609. bitwise_xor() -> Tensor
  2610. See :func:`torch.bitwise_xor`
  2611. """
  2612. ...
  2613. @overload
  2614. def bitwise_xor(self, other: Union[Number, _complex]) -> Tensor:
  2615. r"""
  2616. bitwise_xor() -> Tensor
  2617. See :func:`torch.bitwise_xor`
  2618. """
  2619. ...
  2620. @overload
  2621. def bitwise_xor_(self, other: Tensor) -> Tensor:
  2622. r"""
  2623. bitwise_xor_() -> Tensor
  2624. In-place version of :meth:`~Tensor.bitwise_xor`
  2625. """
  2626. ...
  2627. @overload
  2628. def bitwise_xor_(self, other: Union[Number, _complex]) -> Tensor:
  2629. r"""
  2630. bitwise_xor_() -> Tensor
  2631. In-place version of :meth:`~Tensor.bitwise_xor`
  2632. """
  2633. ...
  2634. def bmm(self, mat2: Tensor) -> Tensor:
  2635. r"""
  2636. bmm(batch2) -> Tensor
  2637. See :func:`torch.bmm`
  2638. """
  2639. ...
  2640. def bool(self) -> Tensor:
  2641. r"""
  2642. bool(memory_format=torch.preserve_format) -> Tensor
  2643. ``self.bool()`` is equivalent to ``self.to(torch.bool)``. See :func:`to`.
  2644. Args:
  2645. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  2646. returned Tensor. Default: ``torch.preserve_format``.
  2647. """
  2648. ...
  2649. @overload
  2650. def broadcast_to(self, size: Sequence[Union[_int, SymInt]]) -> Tensor:
  2651. r"""
  2652. broadcast_to(shape) -> Tensor
  2653. See :func:`torch.broadcast_to`.
  2654. """
  2655. ...
  2656. @overload
  2657. def broadcast_to(self, *size: _int) -> Tensor:
  2658. r"""
  2659. broadcast_to(shape) -> Tensor
  2660. See :func:`torch.broadcast_to`.
  2661. """
  2662. ...
  2663. def byte(self) -> Tensor:
  2664. r"""
  2665. byte(memory_format=torch.preserve_format) -> Tensor
  2666. ``self.byte()`` is equivalent to ``self.to(torch.uint8)``. See :func:`to`.
  2667. Args:
  2668. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  2669. returned Tensor. Default: ``torch.preserve_format``.
  2670. """
  2671. ...
  2672. def cauchy_(self, median: _float = 0, sigma: _float = 1, *, generator: Optional[Generator] = None) -> Tensor:
  2673. r"""
  2674. cauchy_(median=0, sigma=1, *, generator=None) -> Tensor
  2675. Fills the tensor with numbers drawn from the Cauchy distribution:
  2676. .. math::
  2677. f(x) = \dfrac{1}{\pi} \dfrac{\sigma}{(x - \text{median})^2 + \sigma^2}
  2678. .. note::
  2679. Sigma (:math:`\sigma`) is used to denote the scale parameter in Cauchy distribution.
  2680. """
  2681. ...
  2682. def ccol_indices(self) -> Tensor: ...
  2683. def ceil(self) -> Tensor:
  2684. r"""
  2685. ceil() -> Tensor
  2686. See :func:`torch.ceil`
  2687. """
  2688. ...
  2689. def ceil_(self) -> Tensor:
  2690. r"""
  2691. ceil_() -> Tensor
  2692. In-place version of :meth:`~Tensor.ceil`
  2693. """
  2694. ...
  2695. def chalf(self, *, memory_format: Optional[memory_format] = None) -> Tensor:
  2696. r"""
  2697. chalf(memory_format=torch.preserve_format) -> Tensor
  2698. ``self.chalf()`` is equivalent to ``self.to(torch.complex32)``. See :func:`to`.
  2699. Args:
  2700. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  2701. returned Tensor. Default: ``torch.preserve_format``.
  2702. """
  2703. ...
  2704. def char(self) -> Tensor:
  2705. r"""
  2706. char(memory_format=torch.preserve_format) -> Tensor
  2707. ``self.char()`` is equivalent to ``self.to(torch.int8)``. See :func:`to`.
  2708. Args:
  2709. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  2710. returned Tensor. Default: ``torch.preserve_format``.
  2711. """
  2712. ...
  2713. def cholesky(self, upper: _bool = False) -> Tensor:
  2714. r"""
  2715. cholesky(upper=False) -> Tensor
  2716. See :func:`torch.cholesky`
  2717. """
  2718. ...
  2719. def cholesky_inverse(self, upper: _bool = False) -> Tensor:
  2720. r"""
  2721. cholesky_inverse(upper=False) -> Tensor
  2722. See :func:`torch.cholesky_inverse`
  2723. """
  2724. ...
  2725. def cholesky_solve(self, input2: Tensor, upper: _bool = False) -> Tensor:
  2726. r"""
  2727. cholesky_solve(input2, upper=False) -> Tensor
  2728. See :func:`torch.cholesky_solve`
  2729. """
  2730. ...
  2731. def chunk(self, chunks: _int, dim: _int = 0) -> Tuple[Tensor, ...]:
  2732. r"""
  2733. chunk(chunks, dim=0) -> List of Tensors
  2734. See :func:`torch.chunk`
  2735. """
  2736. ...
  2737. @overload
  2738. def clamp(self, min: Optional[Tensor] = None, max: Optional[Tensor] = None) -> Tensor:
  2739. r"""
  2740. clamp(min=None, max=None) -> Tensor
  2741. See :func:`torch.clamp`
  2742. """
  2743. ...
  2744. @overload
  2745. def clamp(self, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None) -> Tensor:
  2746. r"""
  2747. clamp(min=None, max=None) -> Tensor
  2748. See :func:`torch.clamp`
  2749. """
  2750. ...
  2751. @overload
  2752. def clamp_(self, min: Optional[Tensor] = None, max: Optional[Tensor] = None) -> Tensor:
  2753. r"""
  2754. clamp_(min=None, max=None) -> Tensor
  2755. In-place version of :meth:`~Tensor.clamp`
  2756. """
  2757. ...
  2758. @overload
  2759. def clamp_(self, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None) -> Tensor:
  2760. r"""
  2761. clamp_(min=None, max=None) -> Tensor
  2762. In-place version of :meth:`~Tensor.clamp`
  2763. """
  2764. ...
  2765. @overload
  2766. def clamp_max(self, max: Tensor) -> Tensor: ...
  2767. @overload
  2768. def clamp_max(self, max: Union[Number, _complex]) -> Tensor: ...
  2769. @overload
  2770. def clamp_max_(self, max: Tensor) -> Tensor: ...
  2771. @overload
  2772. def clamp_max_(self, max: Union[Number, _complex]) -> Tensor: ...
  2773. @overload
  2774. def clamp_min(self, min: Tensor) -> Tensor: ...
  2775. @overload
  2776. def clamp_min(self, min: Union[Number, _complex]) -> Tensor: ...
  2777. @overload
  2778. def clamp_min_(self, min: Tensor) -> Tensor: ...
  2779. @overload
  2780. def clamp_min_(self, min: Union[Number, _complex]) -> Tensor: ...
  2781. @overload
  2782. def clip(self, min: Optional[Tensor] = None, max: Optional[Tensor] = None) -> Tensor:
  2783. r"""
  2784. clip(min=None, max=None) -> Tensor
  2785. Alias for :meth:`~Tensor.clamp`.
  2786. """
  2787. ...
  2788. @overload
  2789. def clip(self, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None) -> Tensor:
  2790. r"""
  2791. clip(min=None, max=None) -> Tensor
  2792. Alias for :meth:`~Tensor.clamp`.
  2793. """
  2794. ...
  2795. @overload
  2796. def clip_(self, min: Optional[Tensor] = None, max: Optional[Tensor] = None) -> Tensor:
  2797. r"""
  2798. clip_(min=None, max=None) -> Tensor
  2799. Alias for :meth:`~Tensor.clamp_`.
  2800. """
  2801. ...
  2802. @overload
  2803. def clip_(self, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None) -> Tensor:
  2804. r"""
  2805. clip_(min=None, max=None) -> Tensor
  2806. Alias for :meth:`~Tensor.clamp_`.
  2807. """
  2808. ...
  2809. def clone(self, *, memory_format: Optional[memory_format] = None) -> Tensor:
  2810. r"""
  2811. clone(*, memory_format=torch.preserve_format) -> Tensor
  2812. See :func:`torch.clone`
  2813. """
  2814. ...
  2815. def coalesce(self) -> Tensor:
  2816. r"""
  2817. coalesce() -> Tensor
  2818. Returns a coalesced copy of :attr:`self` if :attr:`self` is an
  2819. :ref:`uncoalesced tensor <sparse-uncoalesced-coo-docs>`.
  2820. Returns :attr:`self` if :attr:`self` is a coalesced tensor.
  2821. .. warning::
  2822. Throws an error if :attr:`self` is not a sparse COO tensor.
  2823. """
  2824. ...
  2825. def col_indices(self) -> Tensor:
  2826. r"""
  2827. col_indices() -> IntTensor
  2828. Returns the tensor containing the column indices of the :attr:`self`
  2829. tensor when :attr:`self` is a sparse CSR tensor of layout ``sparse_csr``.
  2830. The ``col_indices`` tensor is strictly of shape (:attr:`self`.nnz())
  2831. and of type ``int32`` or ``int64``. When using MKL routines such as sparse
  2832. matrix multiplication, it is necessary to use ``int32`` indexing in order
  2833. to avoid downcasting and potentially losing information.
  2834. Example::
  2835. >>> csr = torch.eye(5,5).to_sparse_csr()
  2836. >>> csr.col_indices()
  2837. tensor([0, 1, 2, 3, 4], dtype=torch.int32)
  2838. """
  2839. ...
  2840. def conj(self) -> Tensor:
  2841. r"""
  2842. conj() -> Tensor
  2843. See :func:`torch.conj`
  2844. """
  2845. ...
  2846. def conj_physical(self) -> Tensor:
  2847. r"""
  2848. conj_physical() -> Tensor
  2849. See :func:`torch.conj_physical`
  2850. """
  2851. ...
  2852. def conj_physical_(self) -> Tensor:
  2853. r"""
  2854. conj_physical_() -> Tensor
  2855. In-place version of :meth:`~Tensor.conj_physical`
  2856. """
  2857. ...
  2858. def contiguous(self, memory_format=torch.contiguous_format) -> Tensor:
  2859. r"""
  2860. contiguous(memory_format=torch.contiguous_format) -> Tensor
  2861. Returns a contiguous in memory tensor containing the same data as :attr:`self` tensor. If
  2862. :attr:`self` tensor is already in the specified memory format, this function returns the
  2863. :attr:`self` tensor.
  2864. Args:
  2865. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  2866. returned Tensor. Default: ``torch.contiguous_format``.
  2867. """
  2868. ...
  2869. def copy_(self, src: Tensor, non_blocking: _bool = False) -> Tensor:
  2870. r"""
  2871. copy_(src, non_blocking=False) -> Tensor
  2872. Copies the elements from :attr:`src` into :attr:`self` tensor and returns
  2873. :attr:`self`.
  2874. The :attr:`src` tensor must be :ref:`broadcastable <broadcasting-semantics>`
  2875. with the :attr:`self` tensor. It may be of a different data type or reside on a
  2876. different device.
  2877. Args:
  2878. src (Tensor): the source tensor to copy from
  2879. non_blocking (bool): if ``True`` and this copy is between CPU and GPU,
  2880. the copy may occur asynchronously with respect to the host. For other
  2881. cases, this argument has no effect.
  2882. """
  2883. ...
  2884. @overload
  2885. def copysign(self, other: Tensor) -> Tensor:
  2886. r"""
  2887. copysign(other) -> Tensor
  2888. See :func:`torch.copysign`
  2889. """
  2890. ...
  2891. @overload
  2892. def copysign(self, other: Union[Number, _complex]) -> Tensor:
  2893. r"""
  2894. copysign(other) -> Tensor
  2895. See :func:`torch.copysign`
  2896. """
  2897. ...
  2898. @overload
  2899. def copysign_(self, other: Tensor) -> Tensor:
  2900. r"""
  2901. copysign_(other) -> Tensor
  2902. In-place version of :meth:`~Tensor.copysign`
  2903. """
  2904. ...
  2905. @overload
  2906. def copysign_(self, other: Union[Number, _complex]) -> Tensor:
  2907. r"""
  2908. copysign_(other) -> Tensor
  2909. In-place version of :meth:`~Tensor.copysign`
  2910. """
  2911. ...
  2912. def corrcoef(self) -> Tensor:
  2913. r"""
  2914. corrcoef() -> Tensor
  2915. See :func:`torch.corrcoef`
  2916. """
  2917. ...
  2918. def cos(self) -> Tensor:
  2919. r"""
  2920. cos() -> Tensor
  2921. See :func:`torch.cos`
  2922. """
  2923. ...
  2924. def cos_(self) -> Tensor:
  2925. r"""
  2926. cos_() -> Tensor
  2927. In-place version of :meth:`~Tensor.cos`
  2928. """
  2929. ...
  2930. def cosh(self) -> Tensor:
  2931. r"""
  2932. cosh() -> Tensor
  2933. See :func:`torch.cosh`
  2934. """
  2935. ...
  2936. def cosh_(self) -> Tensor:
  2937. r"""
  2938. cosh_() -> Tensor
  2939. In-place version of :meth:`~Tensor.cosh`
  2940. """
  2941. ...
  2942. @overload
  2943. def count_nonzero(self, dim: Optional[_int] = None) -> Tensor:
  2944. r"""
  2945. count_nonzero(dim=None) -> Tensor
  2946. See :func:`torch.count_nonzero`
  2947. """
  2948. ...
  2949. @overload
  2950. def count_nonzero(self, dim: _size) -> Tensor:
  2951. r"""
  2952. count_nonzero(dim=None) -> Tensor
  2953. See :func:`torch.count_nonzero`
  2954. """
  2955. ...
  2956. @overload
  2957. def count_nonzero(self, *dim: _int) -> Tensor:
  2958. r"""
  2959. count_nonzero(dim=None) -> Tensor
  2960. See :func:`torch.count_nonzero`
  2961. """
  2962. ...
  2963. def cov(self, *, correction: _int = 1, fweights: Optional[Tensor] = None, aweights: Optional[Tensor] = None) -> Tensor:
  2964. r"""
  2965. cov(*, correction=1, fweights=None, aweights=None) -> Tensor
  2966. See :func:`torch.cov`
  2967. """
  2968. ...
  2969. def cpu(self, memory_format: torch.memory_format = torch.preserve_format) -> Tensor:
  2970. r"""
  2971. cpu(memory_format=torch.preserve_format) -> Tensor
  2972. Returns a copy of this object in CPU memory.
  2973. If this object is already in CPU memory and on the correct device,
  2974. then no copy is performed and the original object is returned.
  2975. Args:
  2976. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  2977. returned Tensor. Default: ``torch.preserve_format``.
  2978. """
  2979. ...
  2980. def cross(self, other: Tensor, dim: Optional[_int] = None) -> Tensor:
  2981. r"""
  2982. cross(other, dim=None) -> Tensor
  2983. See :func:`torch.cross`
  2984. """
  2985. ...
  2986. def crow_indices(self) -> Tensor:
  2987. r"""
  2988. crow_indices() -> IntTensor
  2989. Returns the tensor containing the compressed row indices of the :attr:`self`
  2990. tensor when :attr:`self` is a sparse CSR tensor of layout ``sparse_csr``.
  2991. The ``crow_indices`` tensor is strictly of shape (:attr:`self`.size(0) + 1)
  2992. and of type ``int32`` or ``int64``. When using MKL routines such as sparse
  2993. matrix multiplication, it is necessary to use ``int32`` indexing in order
  2994. to avoid downcasting and potentially losing information.
  2995. Example::
  2996. >>> csr = torch.eye(5,5).to_sparse_csr()
  2997. >>> csr.crow_indices()
  2998. tensor([0, 1, 2, 3, 4, 5], dtype=torch.int32)
  2999. """
  3000. ...
  3001. def cuda(self, device: Optional[Union[_device, _int, str]] = None, non_blocking: _bool = False, memory_format: torch.memory_format = torch.preserve_format) -> Tensor:
  3002. r"""
  3003. cuda(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor
  3004. Returns a copy of this object in CUDA memory.
  3005. If this object is already in CUDA memory and on the correct device,
  3006. then no copy is performed and the original object is returned.
  3007. Args:
  3008. device (:class:`torch.device`): The destination GPU device.
  3009. Defaults to the current CUDA device.
  3010. non_blocking (bool): If ``True`` and the source is in pinned memory,
  3011. the copy will be asynchronous with respect to the host.
  3012. Otherwise, the argument has no effect. Default: ``False``.
  3013. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  3014. returned Tensor. Default: ``torch.preserve_format``.
  3015. """
  3016. ...
  3017. @overload
  3018. def cummax(self, dim: _int) -> torch.return_types.cummax:
  3019. r"""
  3020. cummax(dim) -> (Tensor, Tensor)
  3021. See :func:`torch.cummax`
  3022. """
  3023. ...
  3024. @overload
  3025. def cummax(self, dim: Union[str, ellipsis, None]) -> torch.return_types.cummax:
  3026. r"""
  3027. cummax(dim) -> (Tensor, Tensor)
  3028. See :func:`torch.cummax`
  3029. """
  3030. ...
  3031. @overload
  3032. def cummin(self, dim: _int) -> torch.return_types.cummin:
  3033. r"""
  3034. cummin(dim) -> (Tensor, Tensor)
  3035. See :func:`torch.cummin`
  3036. """
  3037. ...
  3038. @overload
  3039. def cummin(self, dim: Union[str, ellipsis, None]) -> torch.return_types.cummin:
  3040. r"""
  3041. cummin(dim) -> (Tensor, Tensor)
  3042. See :func:`torch.cummin`
  3043. """
  3044. ...
  3045. @overload
  3046. def cumprod(self, dim: _int, *, dtype: Optional[_dtype] = None) -> Tensor:
  3047. r"""
  3048. cumprod(dim, dtype=None) -> Tensor
  3049. See :func:`torch.cumprod`
  3050. """
  3051. ...
  3052. @overload
  3053. def cumprod(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor:
  3054. r"""
  3055. cumprod(dim, dtype=None) -> Tensor
  3056. See :func:`torch.cumprod`
  3057. """
  3058. ...
  3059. @overload
  3060. def cumprod_(self, dim: _int, *, dtype: Optional[_dtype] = None) -> Tensor:
  3061. r"""
  3062. cumprod_(dim, dtype=None) -> Tensor
  3063. In-place version of :meth:`~Tensor.cumprod`
  3064. """
  3065. ...
  3066. @overload
  3067. def cumprod_(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor:
  3068. r"""
  3069. cumprod_(dim, dtype=None) -> Tensor
  3070. In-place version of :meth:`~Tensor.cumprod`
  3071. """
  3072. ...
  3073. @overload
  3074. def cumsum(self, dim: _int, *, dtype: Optional[_dtype] = None) -> Tensor:
  3075. r"""
  3076. cumsum(dim, dtype=None) -> Tensor
  3077. See :func:`torch.cumsum`
  3078. """
  3079. ...
  3080. @overload
  3081. def cumsum(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor:
  3082. r"""
  3083. cumsum(dim, dtype=None) -> Tensor
  3084. See :func:`torch.cumsum`
  3085. """
  3086. ...
  3087. @overload
  3088. def cumsum_(self, dim: _int, *, dtype: Optional[_dtype] = None) -> Tensor:
  3089. r"""
  3090. cumsum_(dim, dtype=None) -> Tensor
  3091. In-place version of :meth:`~Tensor.cumsum`
  3092. """
  3093. ...
  3094. @overload
  3095. def cumsum_(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor:
  3096. r"""
  3097. cumsum_(dim, dtype=None) -> Tensor
  3098. In-place version of :meth:`~Tensor.cumsum`
  3099. """
  3100. ...
  3101. def data_ptr(self) -> _int:
  3102. r"""
  3103. data_ptr() -> int
  3104. Returns the address of the first element of :attr:`self` tensor.
  3105. """
  3106. ...
  3107. def deg2rad(self) -> Tensor:
  3108. r"""
  3109. deg2rad() -> Tensor
  3110. See :func:`torch.deg2rad`
  3111. """
  3112. ...
  3113. def deg2rad_(self) -> Tensor:
  3114. r"""
  3115. deg2rad_() -> Tensor
  3116. In-place version of :meth:`~Tensor.deg2rad`
  3117. """
  3118. ...
  3119. def dense_dim(self) -> _int:
  3120. r"""
  3121. dense_dim() -> int
  3122. Return the number of dense dimensions in a :ref:`sparse tensor <sparse-docs>` :attr:`self`.
  3123. .. note::
  3124. Returns ``len(self.shape)`` if :attr:`self` is not a sparse tensor.
  3125. See also :meth:`Tensor.sparse_dim` and :ref:`hybrid tensors <sparse-hybrid-coo-docs>`.
  3126. """
  3127. ...
  3128. def dequantize(self) -> Tensor:
  3129. r"""
  3130. dequantize() -> Tensor
  3131. Given a quantized Tensor, dequantize it and return the dequantized float Tensor.
  3132. """
  3133. ...
  3134. def det(self) -> Tensor:
  3135. r"""
  3136. det() -> Tensor
  3137. See :func:`torch.det`
  3138. """
  3139. ...
  3140. def detach(self) -> Tensor: ...
  3141. def detach_(self) -> Tensor: ...
  3142. def diag(self, diagonal: _int = 0) -> Tensor:
  3143. r"""
  3144. diag(diagonal=0) -> Tensor
  3145. See :func:`torch.diag`
  3146. """
  3147. ...
  3148. def diag_embed(self, offset: _int = 0, dim1: _int = -2, dim2: _int = -1) -> Tensor:
  3149. r"""
  3150. diag_embed(offset=0, dim1=-2, dim2=-1) -> Tensor
  3151. See :func:`torch.diag_embed`
  3152. """
  3153. ...
  3154. def diagflat(self, offset: _int = 0) -> Tensor:
  3155. r"""
  3156. diagflat(offset=0) -> Tensor
  3157. See :func:`torch.diagflat`
  3158. """
  3159. ...
  3160. @overload
  3161. def diagonal(self, *, outdim: Union[str, ellipsis, None], dim1: Union[str, ellipsis, None], dim2: Union[str, ellipsis, None], offset: _int = 0) -> Tensor:
  3162. r"""
  3163. diagonal(offset=0, dim1=0, dim2=1) -> Tensor
  3164. See :func:`torch.diagonal`
  3165. """
  3166. ...
  3167. @overload
  3168. def diagonal(self, offset: _int = 0, dim1: _int = 0, dim2: _int = 1) -> Tensor:
  3169. r"""
  3170. diagonal(offset=0, dim1=0, dim2=1) -> Tensor
  3171. See :func:`torch.diagonal`
  3172. """
  3173. ...
  3174. def diagonal_scatter(self, src: Tensor, offset: _int = 0, dim1: _int = 0, dim2: _int = 1) -> Tensor:
  3175. r"""
  3176. diagonal_scatter(src, offset=0, dim1=0, dim2=1) -> Tensor
  3177. See :func:`torch.diagonal_scatter`
  3178. """
  3179. ...
  3180. def diff(self, n: _int = 1, dim: _int = -1, prepend: Optional[Tensor] = None, append: Optional[Tensor] = None) -> Tensor:
  3181. r"""
  3182. diff(n=1, dim=-1, prepend=None, append=None) -> Tensor
  3183. See :func:`torch.diff`
  3184. """
  3185. ...
  3186. def digamma(self) -> Tensor:
  3187. r"""
  3188. digamma() -> Tensor
  3189. See :func:`torch.digamma`
  3190. """
  3191. ...
  3192. def digamma_(self) -> Tensor:
  3193. r"""
  3194. digamma_() -> Tensor
  3195. In-place version of :meth:`~Tensor.digamma`
  3196. """
  3197. ...
  3198. def dim(self) -> _int:
  3199. r"""
  3200. dim() -> int
  3201. Returns the number of dimensions of :attr:`self` tensor.
  3202. """
  3203. ...
  3204. def dist(self, other: Tensor, p: Union[Number, _complex] = 2) -> Tensor:
  3205. r"""
  3206. dist(other, p=2) -> Tensor
  3207. See :func:`torch.dist`
  3208. """
  3209. ...
  3210. def div(self, other: Union[Tensor, Number], *, rounding_mode: Optional[str] = None) -> Tensor:
  3211. r"""
  3212. div(value, *, rounding_mode=None) -> Tensor
  3213. See :func:`torch.div`
  3214. """
  3215. ...
  3216. def div_(self, other: Union[Tensor, Number], *, rounding_mode: Optional[str] = None) -> Tensor:
  3217. r"""
  3218. div_(value, *, rounding_mode=None) -> Tensor
  3219. In-place version of :meth:`~Tensor.div`
  3220. """
  3221. ...
  3222. @overload
  3223. def divide(self, other: Tensor) -> Tensor:
  3224. r"""
  3225. divide(value, *, rounding_mode=None) -> Tensor
  3226. See :func:`torch.divide`
  3227. """
  3228. ...
  3229. @overload
  3230. def divide(self, other: Tensor, *, rounding_mode: Optional[str]) -> Tensor:
  3231. r"""
  3232. divide(value, *, rounding_mode=None) -> Tensor
  3233. See :func:`torch.divide`
  3234. """
  3235. ...
  3236. @overload
  3237. def divide(self, other: Union[Number, _complex], *, rounding_mode: Optional[str]) -> Tensor:
  3238. r"""
  3239. divide(value, *, rounding_mode=None) -> Tensor
  3240. See :func:`torch.divide`
  3241. """
  3242. ...
  3243. @overload
  3244. def divide(self, other: Union[Number, _complex]) -> Tensor:
  3245. r"""
  3246. divide(value, *, rounding_mode=None) -> Tensor
  3247. See :func:`torch.divide`
  3248. """
  3249. ...
  3250. @overload
  3251. def divide_(self, other: Tensor) -> Tensor:
  3252. r"""
  3253. divide_(value, *, rounding_mode=None) -> Tensor
  3254. In-place version of :meth:`~Tensor.divide`
  3255. """
  3256. ...
  3257. @overload
  3258. def divide_(self, other: Tensor, *, rounding_mode: Optional[str]) -> Tensor:
  3259. r"""
  3260. divide_(value, *, rounding_mode=None) -> Tensor
  3261. In-place version of :meth:`~Tensor.divide`
  3262. """
  3263. ...
  3264. @overload
  3265. def divide_(self, other: Union[Number, _complex], *, rounding_mode: Optional[str]) -> Tensor:
  3266. r"""
  3267. divide_(value, *, rounding_mode=None) -> Tensor
  3268. In-place version of :meth:`~Tensor.divide`
  3269. """
  3270. ...
  3271. @overload
  3272. def divide_(self, other: Union[Number, _complex]) -> Tensor:
  3273. r"""
  3274. divide_(value, *, rounding_mode=None) -> Tensor
  3275. In-place version of :meth:`~Tensor.divide`
  3276. """
  3277. ...
  3278. def dot(self, tensor: Tensor) -> Tensor:
  3279. r"""
  3280. dot(other) -> Tensor
  3281. See :func:`torch.dot`
  3282. """
  3283. ...
  3284. def double(self) -> Tensor:
  3285. r"""
  3286. double(memory_format=torch.preserve_format) -> Tensor
  3287. ``self.double()`` is equivalent to ``self.to(torch.float64)``. See :func:`to`.
  3288. Args:
  3289. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  3290. returned Tensor. Default: ``torch.preserve_format``.
  3291. """
  3292. ...
  3293. @overload
  3294. def dsplit(self, sections: _int) -> Tuple[Tensor, ...]:
  3295. r"""
  3296. dsplit(split_size_or_sections) -> List of Tensors
  3297. See :func:`torch.dsplit`
  3298. """
  3299. ...
  3300. @overload
  3301. def dsplit(self, indices: _size) -> Tuple[Tensor, ...]:
  3302. r"""
  3303. dsplit(split_size_or_sections) -> List of Tensors
  3304. See :func:`torch.dsplit`
  3305. """
  3306. ...
  3307. @overload
  3308. def dsplit(self, *indices: _int) -> Tuple[Tensor, ...]:
  3309. r"""
  3310. dsplit(split_size_or_sections) -> List of Tensors
  3311. See :func:`torch.dsplit`
  3312. """
  3313. ...
  3314. def element_size(self) -> _int:
  3315. r"""
  3316. element_size() -> int
  3317. Returns the size in bytes of an individual element.
  3318. Example::
  3319. >>> torch.tensor([]).element_size()
  3320. 4
  3321. >>> torch.tensor([], dtype=torch.uint8).element_size()
  3322. 1
  3323. """
  3324. ...
  3325. @overload
  3326. def eq(self, other: Tensor) -> Tensor:
  3327. r"""
  3328. eq(other) -> Tensor
  3329. See :func:`torch.eq`
  3330. """
  3331. ...
  3332. @overload
  3333. def eq(self, other: Union[Number, _complex]) -> Tensor:
  3334. r"""
  3335. eq(other) -> Tensor
  3336. See :func:`torch.eq`
  3337. """
  3338. ...
  3339. @overload
  3340. def eq_(self, other: Tensor) -> Tensor:
  3341. r"""
  3342. eq_(other) -> Tensor
  3343. In-place version of :meth:`~Tensor.eq`
  3344. """
  3345. ...
  3346. @overload
  3347. def eq_(self, other: Union[Number, _complex]) -> Tensor:
  3348. r"""
  3349. eq_(other) -> Tensor
  3350. In-place version of :meth:`~Tensor.eq`
  3351. """
  3352. ...
  3353. def equal(self, other: Tensor) -> _bool:
  3354. r"""
  3355. equal(other) -> bool
  3356. See :func:`torch.equal`
  3357. """
  3358. ...
  3359. def erf(self) -> Tensor:
  3360. r"""
  3361. erf() -> Tensor
  3362. See :func:`torch.erf`
  3363. """
  3364. ...
  3365. def erf_(self) -> Tensor:
  3366. r"""
  3367. erf_() -> Tensor
  3368. In-place version of :meth:`~Tensor.erf`
  3369. """
  3370. ...
  3371. def erfc(self) -> Tensor:
  3372. r"""
  3373. erfc() -> Tensor
  3374. See :func:`torch.erfc`
  3375. """
  3376. ...
  3377. def erfc_(self) -> Tensor:
  3378. r"""
  3379. erfc_() -> Tensor
  3380. In-place version of :meth:`~Tensor.erfc`
  3381. """
  3382. ...
  3383. def erfinv(self) -> Tensor:
  3384. r"""
  3385. erfinv() -> Tensor
  3386. See :func:`torch.erfinv`
  3387. """
  3388. ...
  3389. def erfinv_(self) -> Tensor:
  3390. r"""
  3391. erfinv_() -> Tensor
  3392. In-place version of :meth:`~Tensor.erfinv`
  3393. """
  3394. ...
  3395. def exp(self) -> Tensor:
  3396. r"""
  3397. exp() -> Tensor
  3398. See :func:`torch.exp`
  3399. """
  3400. ...
  3401. def exp2(self) -> Tensor:
  3402. r"""
  3403. exp2() -> Tensor
  3404. See :func:`torch.exp2`
  3405. """
  3406. ...
  3407. def exp2_(self) -> Tensor:
  3408. r"""
  3409. exp2_() -> Tensor
  3410. In-place version of :meth:`~Tensor.exp2`
  3411. """
  3412. ...
  3413. def exp_(self) -> Tensor:
  3414. r"""
  3415. exp_() -> Tensor
  3416. In-place version of :meth:`~Tensor.exp`
  3417. """
  3418. ...
  3419. @overload
  3420. def expand(self, size: Sequence[Union[_int, SymInt]], *, implicit: _bool = False) -> Tensor:
  3421. r"""
  3422. expand(*sizes) -> Tensor
  3423. Returns a new view of the :attr:`self` tensor with singleton dimensions expanded
  3424. to a larger size.
  3425. Passing -1 as the size for a dimension means not changing the size of
  3426. that dimension.
  3427. Tensor can be also expanded to a larger number of dimensions, and the
  3428. new ones will be appended at the front. For the new dimensions, the
  3429. size cannot be set to -1.
  3430. Expanding a tensor does not allocate new memory, but only creates a
  3431. new view on the existing tensor where a dimension of size one is
  3432. expanded to a larger size by setting the ``stride`` to 0. Any dimension
  3433. of size 1 can be expanded to an arbitrary value without allocating new
  3434. memory.
  3435. Args:
  3436. *sizes (torch.Size or int...): the desired expanded size
  3437. .. warning::
  3438. More than one element of an expanded tensor may refer to a single
  3439. memory location. As a result, in-place operations (especially ones that
  3440. are vectorized) may result in incorrect behavior. If you need to write
  3441. to the tensors, please clone them first.
  3442. Example::
  3443. >>> x = torch.tensor([[1], [2], [3]])
  3444. >>> x.size()
  3445. torch.Size([3, 1])
  3446. >>> x.expand(3, 4)
  3447. tensor([[ 1, 1, 1, 1],
  3448. [ 2, 2, 2, 2],
  3449. [ 3, 3, 3, 3]])
  3450. >>> x.expand(-1, 4) # -1 means not changing the size of that dimension
  3451. tensor([[ 1, 1, 1, 1],
  3452. [ 2, 2, 2, 2],
  3453. [ 3, 3, 3, 3]])
  3454. """
  3455. ...
  3456. @overload
  3457. def expand(self, *size: _int, implicit: _bool = False) -> Tensor:
  3458. r"""
  3459. expand(*sizes) -> Tensor
  3460. Returns a new view of the :attr:`self` tensor with singleton dimensions expanded
  3461. to a larger size.
  3462. Passing -1 as the size for a dimension means not changing the size of
  3463. that dimension.
  3464. Tensor can be also expanded to a larger number of dimensions, and the
  3465. new ones will be appended at the front. For the new dimensions, the
  3466. size cannot be set to -1.
  3467. Expanding a tensor does not allocate new memory, but only creates a
  3468. new view on the existing tensor where a dimension of size one is
  3469. expanded to a larger size by setting the ``stride`` to 0. Any dimension
  3470. of size 1 can be expanded to an arbitrary value without allocating new
  3471. memory.
  3472. Args:
  3473. *sizes (torch.Size or int...): the desired expanded size
  3474. .. warning::
  3475. More than one element of an expanded tensor may refer to a single
  3476. memory location. As a result, in-place operations (especially ones that
  3477. are vectorized) may result in incorrect behavior. If you need to write
  3478. to the tensors, please clone them first.
  3479. Example::
  3480. >>> x = torch.tensor([[1], [2], [3]])
  3481. >>> x.size()
  3482. torch.Size([3, 1])
  3483. >>> x.expand(3, 4)
  3484. tensor([[ 1, 1, 1, 1],
  3485. [ 2, 2, 2, 2],
  3486. [ 3, 3, 3, 3]])
  3487. >>> x.expand(-1, 4) # -1 means not changing the size of that dimension
  3488. tensor([[ 1, 1, 1, 1],
  3489. [ 2, 2, 2, 2],
  3490. [ 3, 3, 3, 3]])
  3491. """
  3492. ...
  3493. def expand_as(self, other: Tensor) -> Tensor:
  3494. r"""
  3495. expand_as(other) -> Tensor
  3496. Expand this tensor to the same size as :attr:`other`.
  3497. ``self.expand_as(other)`` is equivalent to ``self.expand(other.size())``.
  3498. Please see :meth:`~Tensor.expand` for more information about ``expand``.
  3499. Args:
  3500. other (:class:`torch.Tensor`): The result tensor has the same size
  3501. as :attr:`other`.
  3502. """
  3503. ...
  3504. def expm1(self) -> Tensor:
  3505. r"""
  3506. expm1() -> Tensor
  3507. See :func:`torch.expm1`
  3508. """
  3509. ...
  3510. def expm1_(self) -> Tensor:
  3511. r"""
  3512. expm1_() -> Tensor
  3513. In-place version of :meth:`~Tensor.expm1`
  3514. """
  3515. ...
  3516. def exponential_(self, lambd: _float = 1, *, generator: Optional[Generator] = None) -> Tensor:
  3517. r"""
  3518. exponential_(lambd=1, *, generator=None) -> Tensor
  3519. Fills :attr:`self` tensor with elements drawn from the PDF (probability density function):
  3520. .. math::
  3521. f(x) = \lambda e^{-\lambda x}, x > 0
  3522. .. note::
  3523. In probability theory, exponential distribution is supported on interval [0, :math:`\inf`) (i.e., :math:`x >= 0`)
  3524. implying that zero can be sampled from the exponential distribution.
  3525. However, :func:`torch.Tensor.exponential_` does not sample zero,
  3526. which means that its actual support is the interval (0, :math:`\inf`).
  3527. Note that :func:`torch.distributions.exponential.Exponential` is supported on the interval [0, :math:`\inf`) and can sample zero.
  3528. """
  3529. ...
  3530. @overload
  3531. def fill_(self, value: Tensor) -> Tensor:
  3532. r"""
  3533. fill_(value) -> Tensor
  3534. Fills :attr:`self` tensor with the specified value.
  3535. """
  3536. ...
  3537. @overload
  3538. def fill_(self, value: Union[Number, _complex]) -> Tensor:
  3539. r"""
  3540. fill_(value) -> Tensor
  3541. Fills :attr:`self` tensor with the specified value.
  3542. """
  3543. ...
  3544. def fill_diagonal_(self, fill_value: Union[Number, _complex], wrap: _bool = False) -> Tensor:
  3545. r"""
  3546. fill_diagonal_(fill_value, wrap=False) -> Tensor
  3547. Fill the main diagonal of a tensor that has at least 2-dimensions.
  3548. When dims>2, all dimensions of input must be of equal length.
  3549. This function modifies the input tensor in-place, and returns the input tensor.
  3550. Arguments:
  3551. fill_value (Scalar): the fill value
  3552. wrap (bool): the diagonal 'wrapped' after N columns for tall matrices.
  3553. Example::
  3554. >>> a = torch.zeros(3, 3)
  3555. >>> a.fill_diagonal_(5)
  3556. tensor([[5., 0., 0.],
  3557. [0., 5., 0.],
  3558. [0., 0., 5.]])
  3559. >>> b = torch.zeros(7, 3)
  3560. >>> b.fill_diagonal_(5)
  3561. tensor([[5., 0., 0.],
  3562. [0., 5., 0.],
  3563. [0., 0., 5.],
  3564. [0., 0., 0.],
  3565. [0., 0., 0.],
  3566. [0., 0., 0.],
  3567. [0., 0., 0.]])
  3568. >>> c = torch.zeros(7, 3)
  3569. >>> c.fill_diagonal_(5, wrap=True)
  3570. tensor([[5., 0., 0.],
  3571. [0., 5., 0.],
  3572. [0., 0., 5.],
  3573. [0., 0., 0.],
  3574. [5., 0., 0.],
  3575. [0., 5., 0.],
  3576. [0., 0., 5.]])
  3577. """
  3578. ...
  3579. def fix(self) -> Tensor:
  3580. r"""
  3581. fix() -> Tensor
  3582. See :func:`torch.fix`.
  3583. """
  3584. ...
  3585. def fix_(self) -> Tensor:
  3586. r"""
  3587. fix_() -> Tensor
  3588. In-place version of :meth:`~Tensor.fix`
  3589. """
  3590. ...
  3591. @overload
  3592. def flatten(self, start_dim: _int = 0, end_dim: _int = -1) -> Tensor:
  3593. r"""
  3594. flatten(start_dim=0, end_dim=-1) -> Tensor
  3595. See :func:`torch.flatten`
  3596. """
  3597. ...
  3598. @overload
  3599. def flatten(self, start_dim: _int, end_dim: _int, out_dim: Union[str, ellipsis, None]) -> Tensor:
  3600. r"""
  3601. flatten(start_dim=0, end_dim=-1) -> Tensor
  3602. See :func:`torch.flatten`
  3603. """
  3604. ...
  3605. @overload
  3606. def flatten(self, start_dim: Union[str, ellipsis, None], end_dim: Union[str, ellipsis, None], out_dim: Union[str, ellipsis, None]) -> Tensor:
  3607. r"""
  3608. flatten(start_dim=0, end_dim=-1) -> Tensor
  3609. See :func:`torch.flatten`
  3610. """
  3611. ...
  3612. @overload
  3613. def flatten(self, dims: Sequence[Union[str, ellipsis, None]], out_dim: Union[str, ellipsis, None]) -> Tensor:
  3614. r"""
  3615. flatten(start_dim=0, end_dim=-1) -> Tensor
  3616. See :func:`torch.flatten`
  3617. """
  3618. ...
  3619. @overload
  3620. def flip(self, dims: _size) -> Tensor:
  3621. r"""
  3622. flip(dims) -> Tensor
  3623. See :func:`torch.flip`
  3624. """
  3625. ...
  3626. @overload
  3627. def flip(self, *dims: _int) -> Tensor:
  3628. r"""
  3629. flip(dims) -> Tensor
  3630. See :func:`torch.flip`
  3631. """
  3632. ...
  3633. def fliplr(self) -> Tensor:
  3634. r"""
  3635. fliplr() -> Tensor
  3636. See :func:`torch.fliplr`
  3637. """
  3638. ...
  3639. def flipud(self) -> Tensor:
  3640. r"""
  3641. flipud() -> Tensor
  3642. See :func:`torch.flipud`
  3643. """
  3644. ...
  3645. def float(self) -> Tensor:
  3646. r"""
  3647. float(memory_format=torch.preserve_format) -> Tensor
  3648. ``self.float()`` is equivalent to ``self.to(torch.float32)``. See :func:`to`.
  3649. Args:
  3650. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  3651. returned Tensor. Default: ``torch.preserve_format``.
  3652. """
  3653. ...
  3654. @overload
  3655. def float_power(self, exponent: Tensor) -> Tensor:
  3656. r"""
  3657. float_power(exponent) -> Tensor
  3658. See :func:`torch.float_power`
  3659. """
  3660. ...
  3661. @overload
  3662. def float_power(self, exponent: Union[Number, _complex]) -> Tensor:
  3663. r"""
  3664. float_power(exponent) -> Tensor
  3665. See :func:`torch.float_power`
  3666. """
  3667. ...
  3668. @overload
  3669. def float_power_(self, exponent: Tensor) -> Tensor:
  3670. r"""
  3671. float_power_(exponent) -> Tensor
  3672. In-place version of :meth:`~Tensor.float_power`
  3673. """
  3674. ...
  3675. @overload
  3676. def float_power_(self, exponent: Union[Number, _complex]) -> Tensor:
  3677. r"""
  3678. float_power_(exponent) -> Tensor
  3679. In-place version of :meth:`~Tensor.float_power`
  3680. """
  3681. ...
  3682. def floor(self) -> Tensor:
  3683. r"""
  3684. floor() -> Tensor
  3685. See :func:`torch.floor`
  3686. """
  3687. ...
  3688. def floor_(self) -> Tensor:
  3689. r"""
  3690. floor_() -> Tensor
  3691. In-place version of :meth:`~Tensor.floor`
  3692. """
  3693. ...
  3694. def floor_divide(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat], *, out: Optional[Tensor] = None) -> Tensor:
  3695. r"""
  3696. floor_divide(value) -> Tensor
  3697. See :func:`torch.floor_divide`
  3698. """
  3699. ...
  3700. def floor_divide_(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat]) -> Tensor:
  3701. r"""
  3702. floor_divide_(value) -> Tensor
  3703. In-place version of :meth:`~Tensor.floor_divide`
  3704. """
  3705. ...
  3706. def fmax(self, other: Tensor) -> Tensor:
  3707. r"""
  3708. fmax(other) -> Tensor
  3709. See :func:`torch.fmax`
  3710. """
  3711. ...
  3712. def fmin(self, other: Tensor) -> Tensor:
  3713. r"""
  3714. fmin(other) -> Tensor
  3715. See :func:`torch.fmin`
  3716. """
  3717. ...
  3718. @overload
  3719. def fmod(self, other: Tensor) -> Tensor:
  3720. r"""
  3721. fmod(divisor) -> Tensor
  3722. See :func:`torch.fmod`
  3723. """
  3724. ...
  3725. @overload
  3726. def fmod(self, other: Union[Number, _complex]) -> Tensor:
  3727. r"""
  3728. fmod(divisor) -> Tensor
  3729. See :func:`torch.fmod`
  3730. """
  3731. ...
  3732. @overload
  3733. def fmod_(self, other: Tensor) -> Tensor:
  3734. r"""
  3735. fmod_(divisor) -> Tensor
  3736. In-place version of :meth:`~Tensor.fmod`
  3737. """
  3738. ...
  3739. @overload
  3740. def fmod_(self, other: Union[Number, _complex]) -> Tensor:
  3741. r"""
  3742. fmod_(divisor) -> Tensor
  3743. In-place version of :meth:`~Tensor.fmod`
  3744. """
  3745. ...
  3746. def frac(self) -> Tensor:
  3747. r"""
  3748. frac() -> Tensor
  3749. See :func:`torch.frac`
  3750. """
  3751. ...
  3752. def frac_(self) -> Tensor:
  3753. r"""
  3754. frac_() -> Tensor
  3755. In-place version of :meth:`~Tensor.frac`
  3756. """
  3757. ...
  3758. def frexp(self) -> torch.return_types.frexp:
  3759. r"""
  3760. frexp(input) -> (Tensor mantissa, Tensor exponent)
  3761. See :func:`torch.frexp`
  3762. """
  3763. ...
  3764. @overload
  3765. def gather(self, dim: _int, index: Tensor, *, sparse_grad: _bool = False) -> Tensor:
  3766. r"""
  3767. gather(dim, index) -> Tensor
  3768. See :func:`torch.gather`
  3769. """
  3770. ...
  3771. @overload
  3772. def gather(self, dim: Union[str, ellipsis, None], index: Tensor, *, sparse_grad: _bool = False) -> Tensor:
  3773. r"""
  3774. gather(dim, index) -> Tensor
  3775. See :func:`torch.gather`
  3776. """
  3777. ...
  3778. def gcd(self, other: Tensor) -> Tensor:
  3779. r"""
  3780. gcd(other) -> Tensor
  3781. See :func:`torch.gcd`
  3782. """
  3783. ...
  3784. def gcd_(self, other: Tensor) -> Tensor:
  3785. r"""
  3786. gcd_(other) -> Tensor
  3787. In-place version of :meth:`~Tensor.gcd`
  3788. """
  3789. ...
  3790. @overload
  3791. def ge(self, other: Tensor) -> Tensor:
  3792. r"""
  3793. ge(other) -> Tensor
  3794. See :func:`torch.ge`.
  3795. """
  3796. ...
  3797. @overload
  3798. def ge(self, other: Union[Number, _complex]) -> Tensor:
  3799. r"""
  3800. ge(other) -> Tensor
  3801. See :func:`torch.ge`.
  3802. """
  3803. ...
  3804. @overload
  3805. def ge_(self, other: Tensor) -> Tensor:
  3806. r"""
  3807. ge_(other) -> Tensor
  3808. In-place version of :meth:`~Tensor.ge`.
  3809. """
  3810. ...
  3811. @overload
  3812. def ge_(self, other: Union[Number, _complex]) -> Tensor:
  3813. r"""
  3814. ge_(other) -> Tensor
  3815. In-place version of :meth:`~Tensor.ge`.
  3816. """
  3817. ...
  3818. def geometric_(self, p: _float, *, generator: Optional[Generator] = None) -> Tensor:
  3819. r"""
  3820. geometric_(p, *, generator=None) -> Tensor
  3821. Fills :attr:`self` tensor with elements drawn from the geometric distribution:
  3822. .. math::
  3823. P(X=k) = (1 - p)^{k - 1} p, k = 1, 2, ...
  3824. .. note::
  3825. :func:`torch.Tensor.geometric_` `k`-th trial is the first success hence draws samples in :math:`\{1, 2, \ldots\}`, whereas
  3826. :func:`torch.distributions.geometric.Geometric` :math:`(k+1)`-th trial is the first success
  3827. hence draws samples in :math:`\{0, 1, \ldots\}`.
  3828. """
  3829. ...
  3830. def geqrf(self) -> torch.return_types.geqrf:
  3831. r"""
  3832. geqrf() -> (Tensor, Tensor)
  3833. See :func:`torch.geqrf`
  3834. """
  3835. ...
  3836. def ger(self, vec2: Tensor) -> Tensor:
  3837. r"""
  3838. ger(vec2) -> Tensor
  3839. See :func:`torch.ger`
  3840. """
  3841. ...
  3842. def get_device(self) -> _int:
  3843. r"""
  3844. get_device() -> Device ordinal (Integer)
  3845. For CUDA tensors, this function returns the device ordinal of the GPU on which the tensor resides.
  3846. For CPU tensors, this function returns `-1`.
  3847. Example::
  3848. >>> x = torch.randn(3, 4, 5, device='cuda:0')
  3849. >>> x.get_device()
  3850. 0
  3851. >>> x.cpu().get_device()
  3852. -1
  3853. """
  3854. ...
  3855. @overload
  3856. def greater(self, other: Tensor) -> Tensor:
  3857. r"""
  3858. greater(other) -> Tensor
  3859. See :func:`torch.greater`.
  3860. """
  3861. ...
  3862. @overload
  3863. def greater(self, other: Union[Number, _complex]) -> Tensor:
  3864. r"""
  3865. greater(other) -> Tensor
  3866. See :func:`torch.greater`.
  3867. """
  3868. ...
  3869. @overload
  3870. def greater_(self, other: Tensor) -> Tensor:
  3871. r"""
  3872. greater_(other) -> Tensor
  3873. In-place version of :meth:`~Tensor.greater`.
  3874. """
  3875. ...
  3876. @overload
  3877. def greater_(self, other: Union[Number, _complex]) -> Tensor:
  3878. r"""
  3879. greater_(other) -> Tensor
  3880. In-place version of :meth:`~Tensor.greater`.
  3881. """
  3882. ...
  3883. @overload
  3884. def greater_equal(self, other: Tensor) -> Tensor:
  3885. r"""
  3886. greater_equal(other) -> Tensor
  3887. See :func:`torch.greater_equal`.
  3888. """
  3889. ...
  3890. @overload
  3891. def greater_equal(self, other: Union[Number, _complex]) -> Tensor:
  3892. r"""
  3893. greater_equal(other) -> Tensor
  3894. See :func:`torch.greater_equal`.
  3895. """
  3896. ...
  3897. @overload
  3898. def greater_equal_(self, other: Tensor) -> Tensor:
  3899. r"""
  3900. greater_equal_(other) -> Tensor
  3901. In-place version of :meth:`~Tensor.greater_equal`.
  3902. """
  3903. ...
  3904. @overload
  3905. def greater_equal_(self, other: Union[Number, _complex]) -> Tensor:
  3906. r"""
  3907. greater_equal_(other) -> Tensor
  3908. In-place version of :meth:`~Tensor.greater_equal`.
  3909. """
  3910. ...
  3911. @overload
  3912. def gt(self, other: Tensor) -> Tensor:
  3913. r"""
  3914. gt(other) -> Tensor
  3915. See :func:`torch.gt`.
  3916. """
  3917. ...
  3918. @overload
  3919. def gt(self, other: Union[Number, _complex]) -> Tensor:
  3920. r"""
  3921. gt(other) -> Tensor
  3922. See :func:`torch.gt`.
  3923. """
  3924. ...
  3925. @overload
  3926. def gt_(self, other: Tensor) -> Tensor:
  3927. r"""
  3928. gt_(other) -> Tensor
  3929. In-place version of :meth:`~Tensor.gt`.
  3930. """
  3931. ...
  3932. @overload
  3933. def gt_(self, other: Union[Number, _complex]) -> Tensor:
  3934. r"""
  3935. gt_(other) -> Tensor
  3936. In-place version of :meth:`~Tensor.gt`.
  3937. """
  3938. ...
  3939. def half(self) -> Tensor:
  3940. r"""
  3941. half(memory_format=torch.preserve_format) -> Tensor
  3942. ``self.half()`` is equivalent to ``self.to(torch.float16)``. See :func:`to`.
  3943. Args:
  3944. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  3945. returned Tensor. Default: ``torch.preserve_format``.
  3946. """
  3947. ...
  3948. def hardshrink(self, lambd: Union[Number, _complex] = 0.5) -> Tensor:
  3949. r"""
  3950. hardshrink(lambd=0.5) -> Tensor
  3951. See :func:`torch.nn.functional.hardshrink`
  3952. """
  3953. ...
  3954. def has_names(self) -> _bool:
  3955. r"""
  3956. Is ``True`` if any of this tensor's dimensions are named. Otherwise, is ``False``.
  3957. """
  3958. ...
  3959. def heaviside(self, values: Tensor) -> Tensor:
  3960. r"""
  3961. heaviside(values) -> Tensor
  3962. See :func:`torch.heaviside`
  3963. """
  3964. ...
  3965. def heaviside_(self, values: Tensor) -> Tensor:
  3966. r"""
  3967. heaviside_(values) -> Tensor
  3968. In-place version of :meth:`~Tensor.heaviside`
  3969. """
  3970. ...
  3971. def histc(self, bins: _int = 100, min: Union[Number, _complex] = 0, max: Union[Number, _complex] = 0) -> Tensor:
  3972. r"""
  3973. histc(bins=100, min=0, max=0) -> Tensor
  3974. See :func:`torch.histc`
  3975. """
  3976. ...
  3977. @overload
  3978. def histogram(self, bins: Tensor, *, weight: Optional[Tensor] = None, density: _bool = False) -> torch.return_types.histogram:
  3979. r"""
  3980. histogram(input, bins, *, range=None, weight=None, density=False) -> (Tensor, Tensor)
  3981. See :func:`torch.histogram`
  3982. """
  3983. ...
  3984. @overload
  3985. def histogram(self, bins: _int = 100, *, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> torch.return_types.histogram:
  3986. r"""
  3987. histogram(input, bins, *, range=None, weight=None, density=False) -> (Tensor, Tensor)
  3988. See :func:`torch.histogram`
  3989. """
  3990. ...
  3991. @overload
  3992. def hsplit(self, sections: _int) -> Tuple[Tensor, ...]:
  3993. r"""
  3994. hsplit(split_size_or_sections) -> List of Tensors
  3995. See :func:`torch.hsplit`
  3996. """
  3997. ...
  3998. @overload
  3999. def hsplit(self, indices: _size) -> Tuple[Tensor, ...]:
  4000. r"""
  4001. hsplit(split_size_or_sections) -> List of Tensors
  4002. See :func:`torch.hsplit`
  4003. """
  4004. ...
  4005. @overload
  4006. def hsplit(self, *indices: _int) -> Tuple[Tensor, ...]:
  4007. r"""
  4008. hsplit(split_size_or_sections) -> List of Tensors
  4009. See :func:`torch.hsplit`
  4010. """
  4011. ...
  4012. def hypot(self, other: Tensor) -> Tensor:
  4013. r"""
  4014. hypot(other) -> Tensor
  4015. See :func:`torch.hypot`
  4016. """
  4017. ...
  4018. def hypot_(self, other: Tensor) -> Tensor:
  4019. r"""
  4020. hypot_(other) -> Tensor
  4021. In-place version of :meth:`~Tensor.hypot`
  4022. """
  4023. ...
  4024. def i0(self) -> Tensor:
  4025. r"""
  4026. i0() -> Tensor
  4027. See :func:`torch.i0`
  4028. """
  4029. ...
  4030. def i0_(self) -> Tensor:
  4031. r"""
  4032. i0_() -> Tensor
  4033. In-place version of :meth:`~Tensor.i0`
  4034. """
  4035. ...
  4036. def igamma(self, other: Tensor) -> Tensor:
  4037. r"""
  4038. igamma(other) -> Tensor
  4039. See :func:`torch.igamma`
  4040. """
  4041. ...
  4042. def igamma_(self, other: Tensor) -> Tensor:
  4043. r"""
  4044. igamma_(other) -> Tensor
  4045. In-place version of :meth:`~Tensor.igamma`
  4046. """
  4047. ...
  4048. def igammac(self, other: Tensor) -> Tensor:
  4049. r"""
  4050. igammac(other) -> Tensor
  4051. See :func:`torch.igammac`
  4052. """
  4053. ...
  4054. def igammac_(self, other: Tensor) -> Tensor:
  4055. r"""
  4056. igammac_(other) -> Tensor
  4057. In-place version of :meth:`~Tensor.igammac`
  4058. """
  4059. ...
  4060. @overload
  4061. def index_add(self, dim: _int, index: Tensor, source: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor:
  4062. r"""
  4063. index_add(dim, index, source, *, alpha=1) -> Tensor
  4064. Out-of-place version of :meth:`torch.Tensor.index_add_`.
  4065. """
  4066. ...
  4067. @overload
  4068. def index_add(self, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor:
  4069. r"""
  4070. index_add(dim, index, source, *, alpha=1) -> Tensor
  4071. Out-of-place version of :meth:`torch.Tensor.index_add_`.
  4072. """
  4073. ...
  4074. def index_add_(self, dim: _int, index: Tensor, source: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor:
  4075. r"""
  4076. index_add_(dim, index, source, *, alpha=1) -> Tensor
  4077. Accumulate the elements of :attr:`alpha` times ``source`` into the :attr:`self`
  4078. tensor by adding to the indices in the order given in :attr:`index`. For example,
  4079. if ``dim == 0``, ``index[i] == j``, and ``alpha=-1``, then the ``i``\ th row of
  4080. ``source`` is subtracted from the ``j``\ th row of :attr:`self`.
  4081. The :attr:`dim`\ th dimension of ``source`` must have the same size as the
  4082. length of :attr:`index` (which must be a vector), and all other dimensions must
  4083. match :attr:`self`, or an error will be raised.
  4084. For a 3-D tensor the output is given as::
  4085. self[index[i], :, :] += alpha * src[i, :, :] # if dim == 0
  4086. self[:, index[i], :] += alpha * src[:, i, :] # if dim == 1
  4087. self[:, :, index[i]] += alpha * src[:, :, i] # if dim == 2
  4088. Note:
  4089. This operation may behave nondeterministically when given tensors on a CUDA device. See :doc:`/notes/randomness` for more information.
  4090. Args:
  4091. dim (int): dimension along which to index
  4092. index (Tensor): indices of ``source`` to select from,
  4093. should have dtype either `torch.int64` or `torch.int32`
  4094. source (Tensor): the tensor containing values to add
  4095. Keyword args:
  4096. alpha (Number): the scalar multiplier for ``source``
  4097. Example::
  4098. >>> x = torch.ones(5, 3)
  4099. >>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
  4100. >>> index = torch.tensor([0, 4, 2])
  4101. >>> x.index_add_(0, index, t)
  4102. tensor([[ 2., 3., 4.],
  4103. [ 1., 1., 1.],
  4104. [ 8., 9., 10.],
  4105. [ 1., 1., 1.],
  4106. [ 5., 6., 7.]])
  4107. >>> x.index_add_(0, index, t, alpha=-1)
  4108. tensor([[ 1., 1., 1.],
  4109. [ 1., 1., 1.],
  4110. [ 1., 1., 1.],
  4111. [ 1., 1., 1.],
  4112. [ 1., 1., 1.]])
  4113. """
  4114. ...
  4115. @overload
  4116. def index_copy(self, dim: _int, index: Tensor, source: Tensor) -> Tensor:
  4117. r"""
  4118. index_copy(dim, index, tensor2) -> Tensor
  4119. Out-of-place version of :meth:`torch.Tensor.index_copy_`.
  4120. """
  4121. ...
  4122. @overload
  4123. def index_copy(self, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor) -> Tensor:
  4124. r"""
  4125. index_copy(dim, index, tensor2) -> Tensor
  4126. Out-of-place version of :meth:`torch.Tensor.index_copy_`.
  4127. """
  4128. ...
  4129. @overload
  4130. def index_copy_(self, dim: _int, index: Tensor, source: Tensor) -> Tensor:
  4131. r"""
  4132. index_copy_(dim, index, tensor) -> Tensor
  4133. Copies the elements of :attr:`tensor` into the :attr:`self` tensor by selecting
  4134. the indices in the order given in :attr:`index`. For example, if ``dim == 0``
  4135. and ``index[i] == j``, then the ``i``\ th row of :attr:`tensor` is copied to the
  4136. ``j``\ th row of :attr:`self`.
  4137. The :attr:`dim`\ th dimension of :attr:`tensor` must have the same size as the
  4138. length of :attr:`index` (which must be a vector), and all other dimensions must
  4139. match :attr:`self`, or an error will be raised.
  4140. .. note::
  4141. If :attr:`index` contains duplicate entries, multiple elements from
  4142. :attr:`tensor` will be copied to the same index of :attr:`self`. The result
  4143. is nondeterministic since it depends on which copy occurs last.
  4144. Args:
  4145. dim (int): dimension along which to index
  4146. index (LongTensor): indices of :attr:`tensor` to select from
  4147. tensor (Tensor): the tensor containing values to copy
  4148. Example::
  4149. >>> x = torch.zeros(5, 3)
  4150. >>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
  4151. >>> index = torch.tensor([0, 4, 2])
  4152. >>> x.index_copy_(0, index, t)
  4153. tensor([[ 1., 2., 3.],
  4154. [ 0., 0., 0.],
  4155. [ 7., 8., 9.],
  4156. [ 0., 0., 0.],
  4157. [ 4., 5., 6.]])
  4158. """
  4159. ...
  4160. @overload
  4161. def index_copy_(self, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor) -> Tensor:
  4162. r"""
  4163. index_copy_(dim, index, tensor) -> Tensor
  4164. Copies the elements of :attr:`tensor` into the :attr:`self` tensor by selecting
  4165. the indices in the order given in :attr:`index`. For example, if ``dim == 0``
  4166. and ``index[i] == j``, then the ``i``\ th row of :attr:`tensor` is copied to the
  4167. ``j``\ th row of :attr:`self`.
  4168. The :attr:`dim`\ th dimension of :attr:`tensor` must have the same size as the
  4169. length of :attr:`index` (which must be a vector), and all other dimensions must
  4170. match :attr:`self`, or an error will be raised.
  4171. .. note::
  4172. If :attr:`index` contains duplicate entries, multiple elements from
  4173. :attr:`tensor` will be copied to the same index of :attr:`self`. The result
  4174. is nondeterministic since it depends on which copy occurs last.
  4175. Args:
  4176. dim (int): dimension along which to index
  4177. index (LongTensor): indices of :attr:`tensor` to select from
  4178. tensor (Tensor): the tensor containing values to copy
  4179. Example::
  4180. >>> x = torch.zeros(5, 3)
  4181. >>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
  4182. >>> index = torch.tensor([0, 4, 2])
  4183. >>> x.index_copy_(0, index, t)
  4184. tensor([[ 1., 2., 3.],
  4185. [ 0., 0., 0.],
  4186. [ 7., 8., 9.],
  4187. [ 0., 0., 0.],
  4188. [ 4., 5., 6.]])
  4189. """
  4190. ...
  4191. @overload
  4192. def index_fill(self, dim: _int, index: Tensor, value: Tensor) -> Tensor:
  4193. r"""
  4194. index_fill(dim, index, value) -> Tensor
  4195. Out-of-place version of :meth:`torch.Tensor.index_fill_`.
  4196. """
  4197. ...
  4198. @overload
  4199. def index_fill(self, dim: Union[str, ellipsis, None], index: Tensor, value: Tensor) -> Tensor:
  4200. r"""
  4201. index_fill(dim, index, value) -> Tensor
  4202. Out-of-place version of :meth:`torch.Tensor.index_fill_`.
  4203. """
  4204. ...
  4205. @overload
  4206. def index_fill(self, dim: _int, index: Tensor, value: Union[Number, _complex]) -> Tensor:
  4207. r"""
  4208. index_fill(dim, index, value) -> Tensor
  4209. Out-of-place version of :meth:`torch.Tensor.index_fill_`.
  4210. """
  4211. ...
  4212. @overload
  4213. def index_fill(self, dim: Union[str, ellipsis, None], index: Tensor, value: Union[Number, _complex]) -> Tensor:
  4214. r"""
  4215. index_fill(dim, index, value) -> Tensor
  4216. Out-of-place version of :meth:`torch.Tensor.index_fill_`.
  4217. """
  4218. ...
  4219. @overload
  4220. def index_fill_(self, dim: _int, index: Tensor, value: Tensor) -> Tensor:
  4221. r"""
  4222. index_fill_(dim, index, value) -> Tensor
  4223. Fills the elements of the :attr:`self` tensor with value :attr:`value` by
  4224. selecting the indices in the order given in :attr:`index`.
  4225. Args:
  4226. dim (int): dimension along which to index
  4227. index (LongTensor): indices of :attr:`self` tensor to fill in
  4228. value (float): the value to fill with
  4229. Example::
  4230. >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
  4231. >>> index = torch.tensor([0, 2])
  4232. >>> x.index_fill_(1, index, -1)
  4233. tensor([[-1., 2., -1.],
  4234. [-1., 5., -1.],
  4235. [-1., 8., -1.]])
  4236. """
  4237. ...
  4238. @overload
  4239. def index_fill_(self, dim: Union[str, ellipsis, None], index: Tensor, value: Tensor) -> Tensor:
  4240. r"""
  4241. index_fill_(dim, index, value) -> Tensor
  4242. Fills the elements of the :attr:`self` tensor with value :attr:`value` by
  4243. selecting the indices in the order given in :attr:`index`.
  4244. Args:
  4245. dim (int): dimension along which to index
  4246. index (LongTensor): indices of :attr:`self` tensor to fill in
  4247. value (float): the value to fill with
  4248. Example::
  4249. >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
  4250. >>> index = torch.tensor([0, 2])
  4251. >>> x.index_fill_(1, index, -1)
  4252. tensor([[-1., 2., -1.],
  4253. [-1., 5., -1.],
  4254. [-1., 8., -1.]])
  4255. """
  4256. ...
  4257. @overload
  4258. def index_fill_(self, dim: _int, index: Tensor, value: Union[Number, _complex]) -> Tensor:
  4259. r"""
  4260. index_fill_(dim, index, value) -> Tensor
  4261. Fills the elements of the :attr:`self` tensor with value :attr:`value` by
  4262. selecting the indices in the order given in :attr:`index`.
  4263. Args:
  4264. dim (int): dimension along which to index
  4265. index (LongTensor): indices of :attr:`self` tensor to fill in
  4266. value (float): the value to fill with
  4267. Example::
  4268. >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
  4269. >>> index = torch.tensor([0, 2])
  4270. >>> x.index_fill_(1, index, -1)
  4271. tensor([[-1., 2., -1.],
  4272. [-1., 5., -1.],
  4273. [-1., 8., -1.]])
  4274. """
  4275. ...
  4276. @overload
  4277. def index_fill_(self, dim: Union[str, ellipsis, None], index: Tensor, value: Union[Number, _complex]) -> Tensor:
  4278. r"""
  4279. index_fill_(dim, index, value) -> Tensor
  4280. Fills the elements of the :attr:`self` tensor with value :attr:`value` by
  4281. selecting the indices in the order given in :attr:`index`.
  4282. Args:
  4283. dim (int): dimension along which to index
  4284. index (LongTensor): indices of :attr:`self` tensor to fill in
  4285. value (float): the value to fill with
  4286. Example::
  4287. >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
  4288. >>> index = torch.tensor([0, 2])
  4289. >>> x.index_fill_(1, index, -1)
  4290. tensor([[-1., 2., -1.],
  4291. [-1., 5., -1.],
  4292. [-1., 8., -1.]])
  4293. """
  4294. ...
  4295. def index_put(self, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False) -> Tensor:
  4296. r"""
  4297. index_put(indices, values, accumulate=False) -> Tensor
  4298. Out-place version of :meth:`~Tensor.index_put_`.
  4299. """
  4300. ...
  4301. def index_put_(self, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False) -> Tensor:
  4302. r"""
  4303. index_put_(indices, values, accumulate=False) -> Tensor
  4304. Puts values from the tensor :attr:`values` into the tensor :attr:`self` using
  4305. the indices specified in :attr:`indices` (which is a tuple of Tensors). The
  4306. expression ``tensor.index_put_(indices, values)`` is equivalent to
  4307. ``tensor[indices] = values``. Returns :attr:`self`.
  4308. If :attr:`accumulate` is ``True``, the elements in :attr:`values` are added to
  4309. :attr:`self`. If accumulate is ``False``, the behavior is undefined if indices
  4310. contain duplicate elements.
  4311. Args:
  4312. indices (tuple of LongTensor): tensors used to index into `self`.
  4313. values (Tensor): tensor of same dtype as `self`.
  4314. accumulate (bool): whether to accumulate into self
  4315. """
  4316. ...
  4317. def index_reduce(self, dim: _int, index: Tensor, source: Tensor, reduce: str, *, include_self: _bool = True) -> Tensor: ...
  4318. def index_reduce_(self, dim: _int, index: Tensor, source: Tensor, reduce: str, *, include_self: _bool = True) -> Tensor:
  4319. r"""
  4320. index_reduce_(dim, index, source, reduce, *, include_self=True) -> Tensor
  4321. Accumulate the elements of ``source`` into the :attr:`self`
  4322. tensor by accumulating to the indices in the order given in :attr:`index`
  4323. using the reduction given by the ``reduce`` argument. For example, if ``dim == 0``,
  4324. ``index[i] == j``, ``reduce == prod`` and ``include_self == True`` then the ``i``\ th
  4325. row of ``source`` is multiplied by the ``j``\ th row of :attr:`self`. If
  4326. :obj:`include_self="True"`, the values in the :attr:`self` tensor are included
  4327. in the reduction, otherwise, rows in the :attr:`self` tensor that are accumulated
  4328. to are treated as if they were filled with the reduction identites.
  4329. The :attr:`dim`\ th dimension of ``source`` must have the same size as the
  4330. length of :attr:`index` (which must be a vector), and all other dimensions must
  4331. match :attr:`self`, or an error will be raised.
  4332. For a 3-D tensor with :obj:`reduce="prod"` and :obj:`include_self=True` the
  4333. output is given as::
  4334. self[index[i], :, :] *= src[i, :, :] # if dim == 0
  4335. self[:, index[i], :] *= src[:, i, :] # if dim == 1
  4336. self[:, :, index[i]] *= src[:, :, i] # if dim == 2
  4337. Note:
  4338. This operation may behave nondeterministically when given tensors on a CUDA device. See :doc:`/notes/randomness` for more information.
  4339. .. note::
  4340. This function only supports floating point tensors.
  4341. .. warning::
  4342. This function is in beta and may change in the near future.
  4343. Args:
  4344. dim (int): dimension along which to index
  4345. index (Tensor): indices of ``source`` to select from,
  4346. should have dtype either `torch.int64` or `torch.int32`
  4347. source (FloatTensor): the tensor containing values to accumulate
  4348. reduce (str): the reduction operation to apply
  4349. (:obj:`"prod"`, :obj:`"mean"`, :obj:`"amax"`, :obj:`"amin"`)
  4350. Keyword args:
  4351. include_self (bool): whether the elements from the ``self`` tensor are
  4352. included in the reduction
  4353. Example::
  4354. >>> x = torch.empty(5, 3).fill_(2)
  4355. >>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]], dtype=torch.float)
  4356. >>> index = torch.tensor([0, 4, 2, 0])
  4357. >>> x.index_reduce_(0, index, t, 'prod')
  4358. tensor([[20., 44., 72.],
  4359. [ 2., 2., 2.],
  4360. [14., 16., 18.],
  4361. [ 2., 2., 2.],
  4362. [ 8., 10., 12.]])
  4363. >>> x = torch.empty(5, 3).fill_(2)
  4364. >>> x.index_reduce_(0, index, t, 'prod', include_self=False)
  4365. tensor([[10., 22., 36.],
  4366. [ 2., 2., 2.],
  4367. [ 7., 8., 9.],
  4368. [ 2., 2., 2.],
  4369. [ 4., 5., 6.]])
  4370. """
  4371. ...
  4372. @overload
  4373. def index_select(self, dim: _int, index: Tensor) -> Tensor:
  4374. r"""
  4375. index_select(dim, index) -> Tensor
  4376. See :func:`torch.index_select`
  4377. """
  4378. ...
  4379. @overload
  4380. def index_select(self, dim: Union[str, ellipsis, None], index: Tensor) -> Tensor:
  4381. r"""
  4382. index_select(dim, index) -> Tensor
  4383. See :func:`torch.index_select`
  4384. """
  4385. ...
  4386. def indices(self) -> Tensor:
  4387. r"""
  4388. indices() -> Tensor
  4389. Return the indices tensor of a :ref:`sparse COO tensor <sparse-coo-docs>`.
  4390. .. warning::
  4391. Throws an error if :attr:`self` is not a sparse COO tensor.
  4392. See also :meth:`Tensor.values`.
  4393. .. note::
  4394. This method can only be called on a coalesced sparse tensor. See
  4395. :meth:`Tensor.coalesce` for details.
  4396. """
  4397. ...
  4398. def inner(self, other: Tensor) -> Tensor:
  4399. r"""
  4400. inner(other) -> Tensor
  4401. See :func:`torch.inner`.
  4402. """
  4403. ...
  4404. def int(self) -> Tensor:
  4405. r"""
  4406. int(memory_format=torch.preserve_format) -> Tensor
  4407. ``self.int()`` is equivalent to ``self.to(torch.int32)``. See :func:`to`.
  4408. Args:
  4409. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  4410. returned Tensor. Default: ``torch.preserve_format``.
  4411. """
  4412. ...
  4413. def int_repr(self) -> Tensor:
  4414. r"""
  4415. int_repr() -> Tensor
  4416. Given a quantized Tensor,
  4417. ``self.int_repr()`` returns a CPU Tensor with uint8_t as data type that stores the
  4418. underlying uint8_t values of the given Tensor.
  4419. """
  4420. ...
  4421. def inverse(self) -> Tensor:
  4422. r"""
  4423. inverse() -> Tensor
  4424. See :func:`torch.inverse`
  4425. """
  4426. ...
  4427. def is_coalesced(self) -> _bool:
  4428. r"""
  4429. is_coalesced() -> bool
  4430. Returns ``True`` if :attr:`self` is a :ref:`sparse COO tensor
  4431. <sparse-coo-docs>` that is coalesced, ``False`` otherwise.
  4432. .. warning::
  4433. Throws an error if :attr:`self` is not a sparse COO tensor.
  4434. See :meth:`coalesce` and :ref:`uncoalesced tensors <sparse-uncoalesced-coo-docs>`.
  4435. """
  4436. ...
  4437. def is_complex(self) -> _bool:
  4438. r"""
  4439. is_complex() -> bool
  4440. Returns True if the data type of :attr:`self` is a complex data type.
  4441. """
  4442. ...
  4443. def is_conj(self) -> _bool:
  4444. r"""
  4445. is_conj() -> bool
  4446. Returns True if the conjugate bit of :attr:`self` is set to true.
  4447. """
  4448. ...
  4449. def is_contiguous(self, memory_format=torch.contiguous_format) -> _bool:
  4450. r"""
  4451. is_contiguous(memory_format=torch.contiguous_format) -> bool
  4452. Returns True if :attr:`self` tensor is contiguous in memory in the order specified
  4453. by memory format.
  4454. Args:
  4455. memory_format (:class:`torch.memory_format`, optional): Specifies memory allocation
  4456. order. Default: ``torch.contiguous_format``.
  4457. """
  4458. ...
  4459. is_cpu: _bool
  4460. r"""Is ``True`` if the Tensor is stored on the CPU, ``False`` otherwise."""
  4461. is_cuda: _bool
  4462. r"""Is ``True`` if the Tensor is stored on the GPU, ``False`` otherwise."""
  4463. def is_distributed(self) -> _bool: ...
  4464. def is_floating_point(self) -> _bool:
  4465. r"""
  4466. is_floating_point() -> bool
  4467. Returns True if the data type of :attr:`self` is a floating point data type.
  4468. """
  4469. ...
  4470. def is_inference(self) -> _bool:
  4471. r"""
  4472. is_inference() -> bool
  4473. See :func:`torch.is_inference`
  4474. """
  4475. ...
  4476. is_ipu: _bool
  4477. r"""Is ``True`` if the Tensor is stored on the IPU, ``False`` otherwise."""
  4478. is_leaf: _bool
  4479. r"""All Tensors that have :attr:`requires_grad` which is ``False`` will be leaf Tensors by convention.
  4480. For Tensors that have :attr:`requires_grad` which is ``True``, they will be leaf Tensors if they were
  4481. created by the user. This means that they are not the result of an operation and so
  4482. :attr:`grad_fn` is None.
  4483. Only leaf Tensors will have their :attr:`grad` populated during a call to :func:`backward`.
  4484. To get :attr:`grad` populated for non-leaf Tensors, you can use :func:`retain_grad`.
  4485. Example::
  4486. >>> a = torch.rand(10, requires_grad=True)
  4487. >>> a.is_leaf
  4488. True
  4489. >>> b = torch.rand(10, requires_grad=True).cuda()
  4490. >>> b.is_leaf
  4491. False
  4492. # b was created by the operation that cast a cpu Tensor into a cuda Tensor
  4493. >>> c = torch.rand(10, requires_grad=True) + 2
  4494. >>> c.is_leaf
  4495. False
  4496. # c was created by the addition operation
  4497. >>> d = torch.rand(10).cuda()
  4498. >>> d.is_leaf
  4499. True
  4500. # d does not require gradients and so has no operation creating it (that is tracked by the autograd engine)
  4501. >>> e = torch.rand(10).cuda().requires_grad_()
  4502. >>> e.is_leaf
  4503. True
  4504. # e requires gradients and has no operations creating it
  4505. >>> f = torch.rand(10, requires_grad=True, device="cuda")
  4506. >>> f.is_leaf
  4507. True
  4508. # f requires grad, has no operation creating it"""
  4509. is_maia: _bool
  4510. is_meta: _bool
  4511. r"""Is ``True`` if the Tensor is a meta tensor, ``False`` otherwise. Meta tensors
  4512. are like normal tensors, but they carry no data."""
  4513. is_mkldnn: _bool
  4514. is_mps: _bool
  4515. r"""Is ``True`` if the Tensor is stored on the MPS device, ``False`` otherwise."""
  4516. is_mtia: _bool
  4517. def is_neg(self) -> _bool:
  4518. r"""
  4519. is_neg() -> bool
  4520. Returns True if the negative bit of :attr:`self` is set to true.
  4521. """
  4522. ...
  4523. is_nested: _bool
  4524. def is_nonzero(self) -> _bool: ...
  4525. def is_pinned(self, device: Optional[Optional[DeviceLikeType]] = None) -> _bool:
  4526. r"""
  4527. Returns true if this tensor resides in pinned memory.
  4528. """
  4529. ...
  4530. is_quantized: _bool
  4531. r"""Is ``True`` if the Tensor is quantized, ``False`` otherwise."""
  4532. def is_same_size(self, other: Tensor) -> _bool: ...
  4533. def is_set_to(self, tensor: Tensor) -> _bool:
  4534. r"""
  4535. is_set_to(tensor) -> bool
  4536. Returns True if both tensors are pointing to the exact same memory (same
  4537. storage, offset, size and stride).
  4538. """
  4539. ...
  4540. def is_signed(self) -> _bool:
  4541. r"""
  4542. is_signed() -> bool
  4543. Returns True if the data type of :attr:`self` is a signed data type.
  4544. """
  4545. ...
  4546. is_sparse: _bool
  4547. r"""Is ``True`` if the Tensor uses sparse COO storage layout, ``False`` otherwise."""
  4548. is_sparse_csr: _bool
  4549. r"""Is ``True`` if the Tensor uses sparse CSR storage layout, ``False`` otherwise."""
  4550. is_vulkan: _bool
  4551. def isclose(self, other: Tensor, rtol: _float = 1e-05, atol: _float = 1e-08, equal_nan: _bool = False) -> Tensor:
  4552. r"""
  4553. isclose(other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor
  4554. See :func:`torch.isclose`
  4555. """
  4556. ...
  4557. def isfinite(self) -> Tensor:
  4558. r"""
  4559. isfinite() -> Tensor
  4560. See :func:`torch.isfinite`
  4561. """
  4562. ...
  4563. def isinf(self) -> Tensor:
  4564. r"""
  4565. isinf() -> Tensor
  4566. See :func:`torch.isinf`
  4567. """
  4568. ...
  4569. def isnan(self) -> Tensor:
  4570. r"""
  4571. isnan() -> Tensor
  4572. See :func:`torch.isnan`
  4573. """
  4574. ...
  4575. def isneginf(self) -> Tensor:
  4576. r"""
  4577. isneginf() -> Tensor
  4578. See :func:`torch.isneginf`
  4579. """
  4580. ...
  4581. def isposinf(self) -> Tensor:
  4582. r"""
  4583. isposinf() -> Tensor
  4584. See :func:`torch.isposinf`
  4585. """
  4586. ...
  4587. def isreal(self) -> Tensor:
  4588. r"""
  4589. isreal() -> Tensor
  4590. See :func:`torch.isreal`
  4591. """
  4592. ...
  4593. def istft(self, n_fft: _int, hop_length: Optional[_int] = None, win_length: Optional[_int] = None, window: Optional[Tensor] = None, center: _bool = True, normalized: _bool = False, onesided: Optional[_bool] = None, length: Optional[_int] = None, return_complex: _bool = False) -> Tensor:
  4594. r"""
  4595. istft(n_fft, hop_length=None, win_length=None, window=None,
  4596. center=True, normalized=False, onesided=True, length=None) -> Tensor
  4597. See :func:`torch.istft`
  4598. """
  4599. ...
  4600. def item(self) -> Number:
  4601. r"""
  4602. item() -> number
  4603. Returns the value of this tensor as a standard Python number. This only works
  4604. for tensors with one element. For other cases, see :meth:`~Tensor.tolist`.
  4605. This operation is not differentiable.
  4606. Example::
  4607. >>> x = torch.tensor([1.0])
  4608. >>> x.item()
  4609. 1.0
  4610. """
  4611. ...
  4612. def kron(self, other: Tensor) -> Tensor:
  4613. r"""
  4614. kron(other) -> Tensor
  4615. See :func:`torch.kron`
  4616. """
  4617. ...
  4618. @overload
  4619. def kthvalue(self, k: _int, dim: _int = -1, keepdim: _bool = False) -> torch.return_types.kthvalue:
  4620. r"""
  4621. kthvalue(k, dim=None, keepdim=False) -> (Tensor, LongTensor)
  4622. See :func:`torch.kthvalue`
  4623. """
  4624. ...
  4625. @overload
  4626. def kthvalue(self, k: _int, dim: Union[str, ellipsis, None], keepdim: _bool = False) -> torch.return_types.kthvalue:
  4627. r"""
  4628. kthvalue(k, dim=None, keepdim=False) -> (Tensor, LongTensor)
  4629. See :func:`torch.kthvalue`
  4630. """
  4631. ...
  4632. def lcm(self, other: Tensor) -> Tensor:
  4633. r"""
  4634. lcm(other) -> Tensor
  4635. See :func:`torch.lcm`
  4636. """
  4637. ...
  4638. def lcm_(self, other: Tensor) -> Tensor:
  4639. r"""
  4640. lcm_(other) -> Tensor
  4641. In-place version of :meth:`~Tensor.lcm`
  4642. """
  4643. ...
  4644. def ldexp(self, other: Tensor) -> Tensor:
  4645. r"""
  4646. ldexp(other) -> Tensor
  4647. See :func:`torch.ldexp`
  4648. """
  4649. ...
  4650. def ldexp_(self, other: Tensor) -> Tensor:
  4651. r"""
  4652. ldexp_(other) -> Tensor
  4653. In-place version of :meth:`~Tensor.ldexp`
  4654. """
  4655. ...
  4656. @overload
  4657. def le(self, other: Tensor) -> Tensor:
  4658. r"""
  4659. le(other) -> Tensor
  4660. See :func:`torch.le`.
  4661. """
  4662. ...
  4663. @overload
  4664. def le(self, other: Union[Number, _complex]) -> Tensor:
  4665. r"""
  4666. le(other) -> Tensor
  4667. See :func:`torch.le`.
  4668. """
  4669. ...
  4670. @overload
  4671. def le_(self, other: Tensor) -> Tensor:
  4672. r"""
  4673. le_(other) -> Tensor
  4674. In-place version of :meth:`~Tensor.le`.
  4675. """
  4676. ...
  4677. @overload
  4678. def le_(self, other: Union[Number, _complex]) -> Tensor:
  4679. r"""
  4680. le_(other) -> Tensor
  4681. In-place version of :meth:`~Tensor.le`.
  4682. """
  4683. ...
  4684. @overload
  4685. def lerp(self, end: Tensor, weight: Tensor) -> Tensor:
  4686. r"""
  4687. lerp(end, weight) -> Tensor
  4688. See :func:`torch.lerp`
  4689. """
  4690. ...
  4691. @overload
  4692. def lerp(self, end: Tensor, weight: Union[Number, _complex]) -> Tensor:
  4693. r"""
  4694. lerp(end, weight) -> Tensor
  4695. See :func:`torch.lerp`
  4696. """
  4697. ...
  4698. @overload
  4699. def lerp_(self, end: Tensor, weight: Tensor) -> Tensor:
  4700. r"""
  4701. lerp_(end, weight) -> Tensor
  4702. In-place version of :meth:`~Tensor.lerp`
  4703. """
  4704. ...
  4705. @overload
  4706. def lerp_(self, end: Tensor, weight: Union[Number, _complex]) -> Tensor:
  4707. r"""
  4708. lerp_(end, weight) -> Tensor
  4709. In-place version of :meth:`~Tensor.lerp`
  4710. """
  4711. ...
  4712. @overload
  4713. def less(self, other: Tensor) -> Tensor:
  4714. r"""
  4715. lt(other) -> Tensor
  4716. See :func:`torch.less`.
  4717. """
  4718. ...
  4719. @overload
  4720. def less(self, other: Union[Number, _complex]) -> Tensor:
  4721. r"""
  4722. lt(other) -> Tensor
  4723. See :func:`torch.less`.
  4724. """
  4725. ...
  4726. @overload
  4727. def less_(self, other: Tensor) -> Tensor:
  4728. r"""
  4729. less_(other) -> Tensor
  4730. In-place version of :meth:`~Tensor.less`.
  4731. """
  4732. ...
  4733. @overload
  4734. def less_(self, other: Union[Number, _complex]) -> Tensor:
  4735. r"""
  4736. less_(other) -> Tensor
  4737. In-place version of :meth:`~Tensor.less`.
  4738. """
  4739. ...
  4740. @overload
  4741. def less_equal(self, other: Tensor) -> Tensor:
  4742. r"""
  4743. less_equal(other) -> Tensor
  4744. See :func:`torch.less_equal`.
  4745. """
  4746. ...
  4747. @overload
  4748. def less_equal(self, other: Union[Number, _complex]) -> Tensor:
  4749. r"""
  4750. less_equal(other) -> Tensor
  4751. See :func:`torch.less_equal`.
  4752. """
  4753. ...
  4754. @overload
  4755. def less_equal_(self, other: Tensor) -> Tensor:
  4756. r"""
  4757. less_equal_(other) -> Tensor
  4758. In-place version of :meth:`~Tensor.less_equal`.
  4759. """
  4760. ...
  4761. @overload
  4762. def less_equal_(self, other: Union[Number, _complex]) -> Tensor:
  4763. r"""
  4764. less_equal_(other) -> Tensor
  4765. In-place version of :meth:`~Tensor.less_equal`.
  4766. """
  4767. ...
  4768. def lgamma(self) -> Tensor:
  4769. r"""
  4770. lgamma() -> Tensor
  4771. See :func:`torch.lgamma`
  4772. """
  4773. ...
  4774. def lgamma_(self) -> Tensor:
  4775. r"""
  4776. lgamma_() -> Tensor
  4777. In-place version of :meth:`~Tensor.lgamma`
  4778. """
  4779. ...
  4780. def log(self) -> Tensor:
  4781. r"""
  4782. log() -> Tensor
  4783. See :func:`torch.log`
  4784. """
  4785. ...
  4786. def log10(self) -> Tensor:
  4787. r"""
  4788. log10() -> Tensor
  4789. See :func:`torch.log10`
  4790. """
  4791. ...
  4792. def log10_(self) -> Tensor:
  4793. r"""
  4794. log10_() -> Tensor
  4795. In-place version of :meth:`~Tensor.log10`
  4796. """
  4797. ...
  4798. def log1p(self) -> Tensor:
  4799. r"""
  4800. log1p() -> Tensor
  4801. See :func:`torch.log1p`
  4802. """
  4803. ...
  4804. def log1p_(self) -> Tensor:
  4805. r"""
  4806. log1p_() -> Tensor
  4807. In-place version of :meth:`~Tensor.log1p`
  4808. """
  4809. ...
  4810. def log2(self) -> Tensor:
  4811. r"""
  4812. log2() -> Tensor
  4813. See :func:`torch.log2`
  4814. """
  4815. ...
  4816. def log2_(self) -> Tensor:
  4817. r"""
  4818. log2_() -> Tensor
  4819. In-place version of :meth:`~Tensor.log2`
  4820. """
  4821. ...
  4822. def log_(self) -> Tensor:
  4823. r"""
  4824. log_() -> Tensor
  4825. In-place version of :meth:`~Tensor.log`
  4826. """
  4827. ...
  4828. def log_normal_(self, mean: _float = 1, std: _float = 2, *, generator: Optional[Generator] = None) -> Tensor:
  4829. r"""
  4830. log_normal_(mean=1, std=2, *, generator=None)
  4831. Fills :attr:`self` tensor with numbers samples from the log-normal distribution
  4832. parameterized by the given mean :math:`\mu` and standard deviation
  4833. :math:`\sigma`. Note that :attr:`mean` and :attr:`std` are the mean and
  4834. standard deviation of the underlying normal distribution, and not of the
  4835. returned distribution:
  4836. .. math::
  4837. f(x) = \dfrac{1}{x \sigma \sqrt{2\pi}}\ e^{-\frac{(\ln x - \mu)^2}{2\sigma^2}}
  4838. """
  4839. ...
  4840. @overload
  4841. def log_softmax(self, dim: _int, dtype: Optional[_dtype] = None) -> Tensor: ...
  4842. @overload
  4843. def log_softmax(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor: ...
  4844. def logaddexp(self, other: Tensor) -> Tensor:
  4845. r"""
  4846. logaddexp(other) -> Tensor
  4847. See :func:`torch.logaddexp`
  4848. """
  4849. ...
  4850. def logaddexp2(self, other: Tensor) -> Tensor:
  4851. r"""
  4852. logaddexp2(other) -> Tensor
  4853. See :func:`torch.logaddexp2`
  4854. """
  4855. ...
  4856. @overload
  4857. def logcumsumexp(self, dim: _int) -> Tensor:
  4858. r"""
  4859. logcumsumexp(dim) -> Tensor
  4860. See :func:`torch.logcumsumexp`
  4861. """
  4862. ...
  4863. @overload
  4864. def logcumsumexp(self, dim: Union[str, ellipsis, None]) -> Tensor:
  4865. r"""
  4866. logcumsumexp(dim) -> Tensor
  4867. See :func:`torch.logcumsumexp`
  4868. """
  4869. ...
  4870. def logdet(self) -> Tensor:
  4871. r"""
  4872. logdet() -> Tensor
  4873. See :func:`torch.logdet`
  4874. """
  4875. ...
  4876. def logical_and(self, other: Tensor) -> Tensor:
  4877. r"""
  4878. logical_and() -> Tensor
  4879. See :func:`torch.logical_and`
  4880. """
  4881. ...
  4882. def logical_and_(self, other: Tensor) -> Tensor:
  4883. r"""
  4884. logical_and_() -> Tensor
  4885. In-place version of :meth:`~Tensor.logical_and`
  4886. """
  4887. ...
  4888. def logical_not(self) -> Tensor:
  4889. r"""
  4890. logical_not() -> Tensor
  4891. See :func:`torch.logical_not`
  4892. """
  4893. ...
  4894. def logical_not_(self) -> Tensor:
  4895. r"""
  4896. logical_not_() -> Tensor
  4897. In-place version of :meth:`~Tensor.logical_not`
  4898. """
  4899. ...
  4900. def logical_or(self, other: Tensor) -> Tensor:
  4901. r"""
  4902. logical_or() -> Tensor
  4903. See :func:`torch.logical_or`
  4904. """
  4905. ...
  4906. def logical_or_(self, other: Tensor) -> Tensor:
  4907. r"""
  4908. logical_or_() -> Tensor
  4909. In-place version of :meth:`~Tensor.logical_or`
  4910. """
  4911. ...
  4912. def logical_xor(self, other: Tensor) -> Tensor:
  4913. r"""
  4914. logical_xor() -> Tensor
  4915. See :func:`torch.logical_xor`
  4916. """
  4917. ...
  4918. def logical_xor_(self, other: Tensor) -> Tensor:
  4919. r"""
  4920. logical_xor_() -> Tensor
  4921. In-place version of :meth:`~Tensor.logical_xor`
  4922. """
  4923. ...
  4924. def logit(self, eps: Optional[_float] = None) -> Tensor:
  4925. r"""
  4926. logit() -> Tensor
  4927. See :func:`torch.logit`
  4928. """
  4929. ...
  4930. def logit_(self, eps: Optional[_float] = None) -> Tensor:
  4931. r"""
  4932. logit_() -> Tensor
  4933. In-place version of :meth:`~Tensor.logit`
  4934. """
  4935. ...
  4936. @overload
  4937. def logsumexp(self, dim: Union[_int, _size], keepdim: _bool = False) -> Tensor:
  4938. r"""
  4939. logsumexp(dim, keepdim=False) -> Tensor
  4940. See :func:`torch.logsumexp`
  4941. """
  4942. ...
  4943. @overload
  4944. def logsumexp(self, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool = False) -> Tensor:
  4945. r"""
  4946. logsumexp(dim, keepdim=False) -> Tensor
  4947. See :func:`torch.logsumexp`
  4948. """
  4949. ...
  4950. def long(self) -> Tensor:
  4951. r"""
  4952. long(memory_format=torch.preserve_format) -> Tensor
  4953. ``self.long()`` is equivalent to ``self.to(torch.int64)``. See :func:`to`.
  4954. Args:
  4955. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  4956. returned Tensor. Default: ``torch.preserve_format``.
  4957. """
  4958. ...
  4959. @overload
  4960. def lt(self, other: Tensor) -> Tensor:
  4961. r"""
  4962. lt(other) -> Tensor
  4963. See :func:`torch.lt`.
  4964. """
  4965. ...
  4966. @overload
  4967. def lt(self, other: Union[Number, _complex]) -> Tensor:
  4968. r"""
  4969. lt(other) -> Tensor
  4970. See :func:`torch.lt`.
  4971. """
  4972. ...
  4973. @overload
  4974. def lt_(self, other: Tensor) -> Tensor:
  4975. r"""
  4976. lt_(other) -> Tensor
  4977. In-place version of :meth:`~Tensor.lt`.
  4978. """
  4979. ...
  4980. @overload
  4981. def lt_(self, other: Union[Number, _complex]) -> Tensor:
  4982. r"""
  4983. lt_(other) -> Tensor
  4984. In-place version of :meth:`~Tensor.lt`.
  4985. """
  4986. ...
  4987. def lu_solve(self, LU_data: Tensor, LU_pivots: Tensor) -> Tensor:
  4988. r"""
  4989. lu_solve(LU_data, LU_pivots) -> Tensor
  4990. See :func:`torch.lu_solve`
  4991. """
  4992. ...
  4993. def map2_(self, x: Tensor, y: Tensor, callable: Callable) -> Tensor: ...
  4994. def map_(self, tensor: Tensor, callable: Callable) -> Tensor:
  4995. r"""
  4996. map_(tensor, callable)
  4997. Applies :attr:`callable` for each element in :attr:`self` tensor and the given
  4998. :attr:`tensor` and stores the results in :attr:`self` tensor. :attr:`self` tensor and
  4999. the given :attr:`tensor` must be :ref:`broadcastable <broadcasting-semantics>`.
  5000. The :attr:`callable` should have the signature::
  5001. def callable(a, b) -> number
  5002. """
  5003. ...
  5004. @overload
  5005. def masked_fill(self, mask: Tensor, value: Tensor) -> Tensor:
  5006. r"""
  5007. masked_fill(mask, value) -> Tensor
  5008. Out-of-place version of :meth:`torch.Tensor.masked_fill_`
  5009. """
  5010. ...
  5011. @overload
  5012. def masked_fill(self, mask: Tensor, value: Union[Number, _complex]) -> Tensor:
  5013. r"""
  5014. masked_fill(mask, value) -> Tensor
  5015. Out-of-place version of :meth:`torch.Tensor.masked_fill_`
  5016. """
  5017. ...
  5018. @overload
  5019. def masked_fill_(self, mask: Tensor, value: Tensor) -> Tensor:
  5020. r"""
  5021. masked_fill_(mask, value)
  5022. Fills elements of :attr:`self` tensor with :attr:`value` where :attr:`mask` is
  5023. True. The shape of :attr:`mask` must be
  5024. :ref:`broadcastable <broadcasting-semantics>` with the shape of the underlying
  5025. tensor.
  5026. Args:
  5027. mask (BoolTensor): the boolean mask
  5028. value (float): the value to fill in with
  5029. """
  5030. ...
  5031. @overload
  5032. def masked_fill_(self, mask: Tensor, value: Union[Number, _complex]) -> Tensor:
  5033. r"""
  5034. masked_fill_(mask, value)
  5035. Fills elements of :attr:`self` tensor with :attr:`value` where :attr:`mask` is
  5036. True. The shape of :attr:`mask` must be
  5037. :ref:`broadcastable <broadcasting-semantics>` with the shape of the underlying
  5038. tensor.
  5039. Args:
  5040. mask (BoolTensor): the boolean mask
  5041. value (float): the value to fill in with
  5042. """
  5043. ...
  5044. def masked_scatter(self, mask: Tensor, source: Tensor) -> Tensor:
  5045. r"""
  5046. masked_scatter(mask, tensor) -> Tensor
  5047. Out-of-place version of :meth:`torch.Tensor.masked_scatter_`
  5048. .. note::
  5049. The inputs :attr:`self` and :attr:`mask`
  5050. :ref:`broadcast <broadcasting-semantics>`.
  5051. Example:
  5052. >>> self = torch.tensor([0, 0, 0, 0, 0])
  5053. >>> mask = torch.tensor([[0, 0, 0, 1, 1], [1, 1, 0, 1, 1]], dtype=torch.bool)
  5054. >>> source = torch.tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
  5055. >>> self.masked_scatter(mask, source)
  5056. tensor([[0, 0, 0, 0, 1],
  5057. [2, 3, 0, 4, 5]])
  5058. """
  5059. ...
  5060. def masked_scatter_(self, mask: Tensor, source: Tensor) -> Tensor:
  5061. r"""
  5062. masked_scatter_(mask, source)
  5063. Copies elements from :attr:`source` into :attr:`self` tensor at positions where
  5064. the :attr:`mask` is True. Elements from :attr:`source` are copied into :attr:`self`
  5065. starting at position 0 of :attr:`source` and continuing in order one-by-one for each
  5066. occurrence of :attr:`mask` being True.
  5067. The shape of :attr:`mask` must be :ref:`broadcastable <broadcasting-semantics>`
  5068. with the shape of the underlying tensor. The :attr:`source` should have at least
  5069. as many elements as the number of ones in :attr:`mask`.
  5070. Args:
  5071. mask (BoolTensor): the boolean mask
  5072. source (Tensor): the tensor to copy from
  5073. .. note::
  5074. The :attr:`mask` operates on the :attr:`self` tensor, not on the given
  5075. :attr:`source` tensor.
  5076. Example:
  5077. >>> self = torch.tensor([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])
  5078. >>> mask = torch.tensor([[0, 0, 0, 1, 1], [1, 1, 0, 1, 1]], dtype=torch.bool)
  5079. >>> source = torch.tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
  5080. >>> self.masked_scatter_(mask, source)
  5081. tensor([[0, 0, 0, 0, 1],
  5082. [2, 3, 0, 4, 5]])
  5083. """
  5084. ...
  5085. def masked_select(self, mask: Tensor) -> Tensor:
  5086. r"""
  5087. masked_select(mask) -> Tensor
  5088. See :func:`torch.masked_select`
  5089. """
  5090. ...
  5091. def matmul(self, other: Tensor) -> Tensor:
  5092. r"""
  5093. matmul(tensor2) -> Tensor
  5094. See :func:`torch.matmul`
  5095. """
  5096. ...
  5097. def matrix_exp(self) -> Tensor:
  5098. r"""
  5099. matrix_exp() -> Tensor
  5100. See :func:`torch.matrix_exp`
  5101. """
  5102. ...
  5103. def matrix_power(self, n: _int) -> Tensor:
  5104. r"""
  5105. matrix_power(n) -> Tensor
  5106. .. note:: :meth:`~Tensor.matrix_power` is deprecated, use :func:`torch.linalg.matrix_power` instead.
  5107. Alias for :func:`torch.linalg.matrix_power`
  5108. """
  5109. ...
  5110. @overload
  5111. def max(self) -> Tensor:
  5112. r"""
  5113. max(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
  5114. See :func:`torch.max`
  5115. """
  5116. ...
  5117. @overload
  5118. def max(self, other: Tensor) -> Tensor:
  5119. r"""
  5120. max(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
  5121. See :func:`torch.max`
  5122. """
  5123. ...
  5124. @overload
  5125. def max(self, dim: _int, keepdim: _bool = False) -> torch.return_types.max:
  5126. r"""
  5127. max(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
  5128. See :func:`torch.max`
  5129. """
  5130. ...
  5131. @overload
  5132. def max(self, dim: Union[str, ellipsis, None], keepdim: _bool = False) -> torch.return_types.max:
  5133. r"""
  5134. max(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
  5135. See :func:`torch.max`
  5136. """
  5137. ...
  5138. def maximum(self, other: Tensor) -> Tensor:
  5139. r"""
  5140. maximum(other) -> Tensor
  5141. See :func:`torch.maximum`
  5142. """
  5143. ...
  5144. @overload
  5145. def mean(self, *, dtype: Optional[_dtype] = None) -> Tensor:
  5146. r"""
  5147. mean(dim=None, keepdim=False, *, dtype=None) -> Tensor
  5148. See :func:`torch.mean`
  5149. """
  5150. ...
  5151. @overload
  5152. def mean(self, dim: Optional[Union[_int, _size]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor:
  5153. r"""
  5154. mean(dim=None, keepdim=False, *, dtype=None) -> Tensor
  5155. See :func:`torch.mean`
  5156. """
  5157. ...
  5158. @overload
  5159. def mean(self, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor:
  5160. r"""
  5161. mean(dim=None, keepdim=False, *, dtype=None) -> Tensor
  5162. See :func:`torch.mean`
  5163. """
  5164. ...
  5165. @overload
  5166. def median(self) -> Tensor:
  5167. r"""
  5168. median(dim=None, keepdim=False) -> (Tensor, LongTensor)
  5169. See :func:`torch.median`
  5170. """
  5171. ...
  5172. @overload
  5173. def median(self, dim: _int, keepdim: _bool = False) -> torch.return_types.median:
  5174. r"""
  5175. median(dim=None, keepdim=False) -> (Tensor, LongTensor)
  5176. See :func:`torch.median`
  5177. """
  5178. ...
  5179. @overload
  5180. def median(self, dim: Union[str, ellipsis, None], keepdim: _bool = False) -> torch.return_types.median:
  5181. r"""
  5182. median(dim=None, keepdim=False) -> (Tensor, LongTensor)
  5183. See :func:`torch.median`
  5184. """
  5185. ...
  5186. @overload
  5187. def min(self) -> Tensor:
  5188. r"""
  5189. min(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
  5190. See :func:`torch.min`
  5191. """
  5192. ...
  5193. @overload
  5194. def min(self, other: Tensor) -> Tensor:
  5195. r"""
  5196. min(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
  5197. See :func:`torch.min`
  5198. """
  5199. ...
  5200. @overload
  5201. def min(self, dim: _int, keepdim: _bool = False) -> torch.return_types.min:
  5202. r"""
  5203. min(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
  5204. See :func:`torch.min`
  5205. """
  5206. ...
  5207. @overload
  5208. def min(self, dim: Union[str, ellipsis, None], keepdim: _bool = False) -> torch.return_types.min:
  5209. r"""
  5210. min(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
  5211. See :func:`torch.min`
  5212. """
  5213. ...
  5214. def minimum(self, other: Tensor) -> Tensor:
  5215. r"""
  5216. minimum(other) -> Tensor
  5217. See :func:`torch.minimum`
  5218. """
  5219. ...
  5220. def mm(self, mat2: Tensor) -> Tensor:
  5221. r"""
  5222. mm(mat2) -> Tensor
  5223. See :func:`torch.mm`
  5224. """
  5225. ...
  5226. @overload
  5227. def mode(self, dim: _int = -1, keepdim: _bool = False) -> torch.return_types.mode:
  5228. r"""
  5229. mode(dim=None, keepdim=False) -> (Tensor, LongTensor)
  5230. See :func:`torch.mode`
  5231. """
  5232. ...
  5233. @overload
  5234. def mode(self, dim: Union[str, ellipsis, None], keepdim: _bool = False) -> torch.return_types.mode:
  5235. r"""
  5236. mode(dim=None, keepdim=False) -> (Tensor, LongTensor)
  5237. See :func:`torch.mode`
  5238. """
  5239. ...
  5240. @overload
  5241. def moveaxis(self, source: _int, destination: _int) -> Tensor:
  5242. r"""
  5243. moveaxis(source, destination) -> Tensor
  5244. See :func:`torch.moveaxis`
  5245. """
  5246. ...
  5247. @overload
  5248. def moveaxis(self, source: _size, destination: _size) -> Tensor:
  5249. r"""
  5250. moveaxis(source, destination) -> Tensor
  5251. See :func:`torch.moveaxis`
  5252. """
  5253. ...
  5254. @overload
  5255. def movedim(self, source: _int, destination: _int) -> Tensor:
  5256. r"""
  5257. movedim(source, destination) -> Tensor
  5258. See :func:`torch.movedim`
  5259. """
  5260. ...
  5261. @overload
  5262. def movedim(self, source: _size, destination: _size) -> Tensor:
  5263. r"""
  5264. movedim(source, destination) -> Tensor
  5265. See :func:`torch.movedim`
  5266. """
  5267. ...
  5268. def msort(self) -> Tensor:
  5269. r"""
  5270. msort() -> Tensor
  5271. See :func:`torch.msort`
  5272. """
  5273. ...
  5274. def mul(self, other: Union[Tensor, Number, _complex, torch.SymInt, torch.SymFloat], *, out: Optional[Tensor] = None) -> Tensor:
  5275. r"""
  5276. mul(value) -> Tensor
  5277. See :func:`torch.mul`.
  5278. """
  5279. ...
  5280. def mul_(self, other: Union[Tensor, Number, _complex, torch.SymInt, torch.SymFloat]) -> Tensor:
  5281. r"""
  5282. mul_(value) -> Tensor
  5283. In-place version of :meth:`~Tensor.mul`.
  5284. """
  5285. ...
  5286. def multinomial(self, num_samples: _int, replacement: _bool = False, *, generator: Optional[Generator] = None) -> Tensor:
  5287. r"""
  5288. multinomial(num_samples, replacement=False, *, generator=None) -> Tensor
  5289. See :func:`torch.multinomial`
  5290. """
  5291. ...
  5292. @overload
  5293. def multiply(self, other: Tensor) -> Tensor:
  5294. r"""
  5295. multiply(value) -> Tensor
  5296. See :func:`torch.multiply`.
  5297. """
  5298. ...
  5299. @overload
  5300. def multiply(self, other: Union[Number, _complex]) -> Tensor:
  5301. r"""
  5302. multiply(value) -> Tensor
  5303. See :func:`torch.multiply`.
  5304. """
  5305. ...
  5306. @overload
  5307. def multiply_(self, other: Tensor) -> Tensor:
  5308. r"""
  5309. multiply_(value) -> Tensor
  5310. In-place version of :meth:`~Tensor.multiply`.
  5311. """
  5312. ...
  5313. @overload
  5314. def multiply_(self, other: Union[Number, _complex]) -> Tensor:
  5315. r"""
  5316. multiply_(value) -> Tensor
  5317. In-place version of :meth:`~Tensor.multiply`.
  5318. """
  5319. ...
  5320. def mv(self, vec: Tensor) -> Tensor:
  5321. r"""
  5322. mv(vec) -> Tensor
  5323. See :func:`torch.mv`
  5324. """
  5325. ...
  5326. def mvlgamma(self, p: _int) -> Tensor:
  5327. r"""
  5328. mvlgamma(p) -> Tensor
  5329. See :func:`torch.mvlgamma`
  5330. """
  5331. ...
  5332. def mvlgamma_(self, p: _int) -> Tensor:
  5333. r"""
  5334. mvlgamma_(p) -> Tensor
  5335. In-place version of :meth:`~Tensor.mvlgamma`
  5336. """
  5337. ...
  5338. def nan_to_num(self, nan: Optional[_float] = None, posinf: Optional[_float] = None, neginf: Optional[_float] = None) -> Tensor:
  5339. r"""
  5340. nan_to_num(nan=0.0, posinf=None, neginf=None) -> Tensor
  5341. See :func:`torch.nan_to_num`.
  5342. """
  5343. ...
  5344. def nan_to_num_(self, nan: Optional[_float] = None, posinf: Optional[_float] = None, neginf: Optional[_float] = None) -> Tensor:
  5345. r"""
  5346. nan_to_num_(nan=0.0, posinf=None, neginf=None) -> Tensor
  5347. In-place version of :meth:`~Tensor.nan_to_num`.
  5348. """
  5349. ...
  5350. def nanmean(self, dim: Optional[Union[_int, _size]] = None, keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor:
  5351. r"""
  5352. nanmean(dim=None, keepdim=False, *, dtype=None) -> Tensor
  5353. See :func:`torch.nanmean`
  5354. """
  5355. ...
  5356. @overload
  5357. def nanmedian(self) -> Tensor:
  5358. r"""
  5359. nanmedian(dim=None, keepdim=False) -> (Tensor, LongTensor)
  5360. See :func:`torch.nanmedian`
  5361. """
  5362. ...
  5363. @overload
  5364. def nanmedian(self, dim: _int, keepdim: _bool = False) -> torch.return_types.nanmedian:
  5365. r"""
  5366. nanmedian(dim=None, keepdim=False) -> (Tensor, LongTensor)
  5367. See :func:`torch.nanmedian`
  5368. """
  5369. ...
  5370. @overload
  5371. def nanmedian(self, dim: Union[str, ellipsis, None], keepdim: _bool = False) -> torch.return_types.nanmedian:
  5372. r"""
  5373. nanmedian(dim=None, keepdim=False) -> (Tensor, LongTensor)
  5374. See :func:`torch.nanmedian`
  5375. """
  5376. ...
  5377. @overload
  5378. def nanquantile(self, q: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear") -> Tensor:
  5379. r"""
  5380. nanquantile(q, dim=None, keepdim=False, *, interpolation='linear') -> Tensor
  5381. See :func:`torch.nanquantile`
  5382. """
  5383. ...
  5384. @overload
  5385. def nanquantile(self, q: _float, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear") -> Tensor:
  5386. r"""
  5387. nanquantile(q, dim=None, keepdim=False, *, interpolation='linear') -> Tensor
  5388. See :func:`torch.nanquantile`
  5389. """
  5390. ...
  5391. def nansum(self, dim: Optional[Union[_int, _size]] = None, keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor:
  5392. r"""
  5393. nansum(dim=None, keepdim=False, dtype=None) -> Tensor
  5394. See :func:`torch.nansum`
  5395. """
  5396. ...
  5397. @overload
  5398. def narrow(self, dim: _int, start: Tensor, length: Union[_int, SymInt]) -> Tensor:
  5399. r"""
  5400. narrow(dimension, start, length) -> Tensor
  5401. See :func:`torch.narrow`.
  5402. """
  5403. ...
  5404. @overload
  5405. def narrow(self, dim: _int, start: Union[_int, SymInt], length: Union[_int, SymInt]) -> Tensor:
  5406. r"""
  5407. narrow(dimension, start, length) -> Tensor
  5408. See :func:`torch.narrow`.
  5409. """
  5410. ...
  5411. def narrow_copy(self, dim: _int, start: Union[_int, SymInt], length: Union[_int, SymInt]) -> Tensor:
  5412. r"""
  5413. narrow_copy(dimension, start, length) -> Tensor
  5414. See :func:`torch.narrow_copy`.
  5415. """
  5416. ...
  5417. def ndimension(self) -> _int:
  5418. r"""
  5419. ndimension() -> int
  5420. Alias for :meth:`~Tensor.dim()`
  5421. """
  5422. ...
  5423. @overload
  5424. def ne(self, other: Tensor) -> Tensor:
  5425. r"""
  5426. ne(other) -> Tensor
  5427. See :func:`torch.ne`.
  5428. """
  5429. ...
  5430. @overload
  5431. def ne(self, other: Union[Number, _complex]) -> Tensor:
  5432. r"""
  5433. ne(other) -> Tensor
  5434. See :func:`torch.ne`.
  5435. """
  5436. ...
  5437. @overload
  5438. def ne_(self, other: Tensor) -> Tensor:
  5439. r"""
  5440. ne_(other) -> Tensor
  5441. In-place version of :meth:`~Tensor.ne`.
  5442. """
  5443. ...
  5444. @overload
  5445. def ne_(self, other: Union[Number, _complex]) -> Tensor:
  5446. r"""
  5447. ne_(other) -> Tensor
  5448. In-place version of :meth:`~Tensor.ne`.
  5449. """
  5450. ...
  5451. def neg(self) -> Tensor:
  5452. r"""
  5453. neg() -> Tensor
  5454. See :func:`torch.neg`
  5455. """
  5456. ...
  5457. def neg_(self) -> Tensor:
  5458. r"""
  5459. neg_() -> Tensor
  5460. In-place version of :meth:`~Tensor.neg`
  5461. """
  5462. ...
  5463. def negative(self) -> Tensor:
  5464. r"""
  5465. negative() -> Tensor
  5466. See :func:`torch.negative`
  5467. """
  5468. ...
  5469. def negative_(self) -> Tensor:
  5470. r"""
  5471. negative_() -> Tensor
  5472. In-place version of :meth:`~Tensor.negative`
  5473. """
  5474. ...
  5475. def nelement(self) -> _int:
  5476. r"""
  5477. nelement() -> int
  5478. Alias for :meth:`~Tensor.numel`
  5479. """
  5480. ...
  5481. @overload
  5482. def new(cls, *args: Any, device: Optional[DeviceLikeType] = None) -> Self: ...
  5483. @overload
  5484. def new(cls, storage: Storage) -> Self: ...
  5485. @overload
  5486. def new(cls, other: Tensor) -> Self: ...
  5487. @overload
  5488. def new(cls, size: _size, *, device: Optional[DeviceLikeType] = None) -> Self: ...
  5489. @overload
  5490. def new_empty(self, size: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  5491. r"""
  5492. new_empty(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor
  5493. Returns a Tensor of size :attr:`size` filled with uninitialized data.
  5494. By default, the returned Tensor has the same :class:`torch.dtype` and
  5495. :class:`torch.device` as this tensor.
  5496. Args:
  5497. size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
  5498. shape of the output tensor.
  5499. Keyword args:
  5500. dtype (:class:`torch.dtype`, optional): the desired type of returned tensor.
  5501. Default: if None, same :class:`torch.dtype` as this tensor.
  5502. device (:class:`torch.device`, optional): the desired device of returned tensor.
  5503. Default: if None, same :class:`torch.device` as this tensor.
  5504. requires_grad (bool, optional): If autograd should record operations on the
  5505. returned tensor. Default: ``False``.
  5506. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  5507. Default: ``torch.strided``.
  5508. pin_memory (bool, optional): If set, returned tensor would be allocated in
  5509. the pinned memory. Works only for CPU tensors. Default: ``False``.
  5510. Example::
  5511. >>> tensor = torch.ones(())
  5512. >>> tensor.new_empty((2, 3))
  5513. tensor([[ 5.8182e-18, 4.5765e-41, -1.0545e+30],
  5514. [ 3.0949e-41, 4.4842e-44, 0.0000e+00]])
  5515. """
  5516. ...
  5517. @overload
  5518. def new_empty(self, *size: _int, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  5519. r"""
  5520. new_empty(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor
  5521. Returns a Tensor of size :attr:`size` filled with uninitialized data.
  5522. By default, the returned Tensor has the same :class:`torch.dtype` and
  5523. :class:`torch.device` as this tensor.
  5524. Args:
  5525. size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
  5526. shape of the output tensor.
  5527. Keyword args:
  5528. dtype (:class:`torch.dtype`, optional): the desired type of returned tensor.
  5529. Default: if None, same :class:`torch.dtype` as this tensor.
  5530. device (:class:`torch.device`, optional): the desired device of returned tensor.
  5531. Default: if None, same :class:`torch.device` as this tensor.
  5532. requires_grad (bool, optional): If autograd should record operations on the
  5533. returned tensor. Default: ``False``.
  5534. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  5535. Default: ``torch.strided``.
  5536. pin_memory (bool, optional): If set, returned tensor would be allocated in
  5537. the pinned memory. Works only for CPU tensors. Default: ``False``.
  5538. Example::
  5539. >>> tensor = torch.ones(())
  5540. >>> tensor.new_empty((2, 3))
  5541. tensor([[ 5.8182e-18, 4.5765e-41, -1.0545e+30],
  5542. [ 3.0949e-41, 4.4842e-44, 0.0000e+00]])
  5543. """
  5544. ...
  5545. def new_empty_strided(self, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  5546. r"""
  5547. new_empty_strided(size, stride, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor
  5548. Returns a Tensor of size :attr:`size` and strides :attr:`stride` filled with
  5549. uninitialized data. By default, the returned Tensor has the same
  5550. :class:`torch.dtype` and :class:`torch.device` as this tensor.
  5551. Args:
  5552. size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
  5553. shape of the output tensor.
  5554. Keyword args:
  5555. dtype (:class:`torch.dtype`, optional): the desired type of returned tensor.
  5556. Default: if None, same :class:`torch.dtype` as this tensor.
  5557. device (:class:`torch.device`, optional): the desired device of returned tensor.
  5558. Default: if None, same :class:`torch.device` as this tensor.
  5559. requires_grad (bool, optional): If autograd should record operations on the
  5560. returned tensor. Default: ``False``.
  5561. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  5562. Default: ``torch.strided``.
  5563. pin_memory (bool, optional): If set, returned tensor would be allocated in
  5564. the pinned memory. Works only for CPU tensors. Default: ``False``.
  5565. Example::
  5566. >>> tensor = torch.ones(())
  5567. >>> tensor.new_empty_strided((2, 3), (3, 1))
  5568. tensor([[ 5.8182e-18, 4.5765e-41, -1.0545e+30],
  5569. [ 3.0949e-41, 4.4842e-44, 0.0000e+00]])
  5570. """
  5571. ...
  5572. def new_full(self, size: Sequence[Union[_int, SymInt]], fill_value: Union[Number, _complex], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  5573. r"""
  5574. new_full(size, fill_value, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor
  5575. Returns a Tensor of size :attr:`size` filled with :attr:`fill_value`.
  5576. By default, the returned Tensor has the same :class:`torch.dtype` and
  5577. :class:`torch.device` as this tensor.
  5578. Args:
  5579. fill_value (scalar): the number to fill the output tensor with.
  5580. Keyword args:
  5581. dtype (:class:`torch.dtype`, optional): the desired type of returned tensor.
  5582. Default: if None, same :class:`torch.dtype` as this tensor.
  5583. device (:class:`torch.device`, optional): the desired device of returned tensor.
  5584. Default: if None, same :class:`torch.device` as this tensor.
  5585. requires_grad (bool, optional): If autograd should record operations on the
  5586. returned tensor. Default: ``False``.
  5587. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  5588. Default: ``torch.strided``.
  5589. pin_memory (bool, optional): If set, returned tensor would be allocated in
  5590. the pinned memory. Works only for CPU tensors. Default: ``False``.
  5591. Example::
  5592. >>> tensor = torch.ones((2,), dtype=torch.float64)
  5593. >>> tensor.new_full((3, 4), 3.141592)
  5594. tensor([[ 3.1416, 3.1416, 3.1416, 3.1416],
  5595. [ 3.1416, 3.1416, 3.1416, 3.1416],
  5596. [ 3.1416, 3.1416, 3.1416, 3.1416]], dtype=torch.float64)
  5597. """
  5598. ...
  5599. @overload
  5600. def new_ones(self, size: _size, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
  5601. r"""
  5602. new_ones(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor
  5603. Returns a Tensor of size :attr:`size` filled with ``1``.
  5604. By default, the returned Tensor has the same :class:`torch.dtype` and
  5605. :class:`torch.device` as this tensor.
  5606. Args:
  5607. size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
  5608. shape of the output tensor.
  5609. Keyword args:
  5610. dtype (:class:`torch.dtype`, optional): the desired type of returned tensor.
  5611. Default: if None, same :class:`torch.dtype` as this tensor.
  5612. device (:class:`torch.device`, optional): the desired device of returned tensor.
  5613. Default: if None, same :class:`torch.device` as this tensor.
  5614. requires_grad (bool, optional): If autograd should record operations on the
  5615. returned tensor. Default: ``False``.
  5616. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  5617. Default: ``torch.strided``.
  5618. pin_memory (bool, optional): If set, returned tensor would be allocated in
  5619. the pinned memory. Works only for CPU tensors. Default: ``False``.
  5620. Example::
  5621. >>> tensor = torch.tensor((), dtype=torch.int32)
  5622. >>> tensor.new_ones((2, 3))
  5623. tensor([[ 1, 1, 1],
  5624. [ 1, 1, 1]], dtype=torch.int32)
  5625. """
  5626. ...
  5627. @overload
  5628. def new_ones(self, size: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  5629. r"""
  5630. new_ones(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor
  5631. Returns a Tensor of size :attr:`size` filled with ``1``.
  5632. By default, the returned Tensor has the same :class:`torch.dtype` and
  5633. :class:`torch.device` as this tensor.
  5634. Args:
  5635. size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
  5636. shape of the output tensor.
  5637. Keyword args:
  5638. dtype (:class:`torch.dtype`, optional): the desired type of returned tensor.
  5639. Default: if None, same :class:`torch.dtype` as this tensor.
  5640. device (:class:`torch.device`, optional): the desired device of returned tensor.
  5641. Default: if None, same :class:`torch.device` as this tensor.
  5642. requires_grad (bool, optional): If autograd should record operations on the
  5643. returned tensor. Default: ``False``.
  5644. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  5645. Default: ``torch.strided``.
  5646. pin_memory (bool, optional): If set, returned tensor would be allocated in
  5647. the pinned memory. Works only for CPU tensors. Default: ``False``.
  5648. Example::
  5649. >>> tensor = torch.tensor((), dtype=torch.int32)
  5650. >>> tensor.new_ones((2, 3))
  5651. tensor([[ 1, 1, 1],
  5652. [ 1, 1, 1]], dtype=torch.int32)
  5653. """
  5654. ...
  5655. @overload
  5656. def new_ones(self, *size: _int, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  5657. r"""
  5658. new_ones(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor
  5659. Returns a Tensor of size :attr:`size` filled with ``1``.
  5660. By default, the returned Tensor has the same :class:`torch.dtype` and
  5661. :class:`torch.device` as this tensor.
  5662. Args:
  5663. size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
  5664. shape of the output tensor.
  5665. Keyword args:
  5666. dtype (:class:`torch.dtype`, optional): the desired type of returned tensor.
  5667. Default: if None, same :class:`torch.dtype` as this tensor.
  5668. device (:class:`torch.device`, optional): the desired device of returned tensor.
  5669. Default: if None, same :class:`torch.device` as this tensor.
  5670. requires_grad (bool, optional): If autograd should record operations on the
  5671. returned tensor. Default: ``False``.
  5672. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  5673. Default: ``torch.strided``.
  5674. pin_memory (bool, optional): If set, returned tensor would be allocated in
  5675. the pinned memory. Works only for CPU tensors. Default: ``False``.
  5676. Example::
  5677. >>> tensor = torch.tensor((), dtype=torch.int32)
  5678. >>> tensor.new_ones((2, 3))
  5679. tensor([[ 1, 1, 1],
  5680. [ 1, 1, 1]], dtype=torch.int32)
  5681. """
  5682. ...
  5683. def new_tensor(self, data: Any, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
  5684. r"""
  5685. new_tensor(data, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor
  5686. Returns a new Tensor with :attr:`data` as the tensor data.
  5687. By default, the returned Tensor has the same :class:`torch.dtype` and
  5688. :class:`torch.device` as this tensor.
  5689. .. warning::
  5690. :func:`new_tensor` always copies :attr:`data`. If you have a Tensor
  5691. ``data`` and want to avoid a copy, use :func:`torch.Tensor.requires_grad_`
  5692. or :func:`torch.Tensor.detach`.
  5693. If you have a numpy array and want to avoid a copy, use
  5694. :func:`torch.from_numpy`.
  5695. .. warning::
  5696. When data is a tensor `x`, :func:`new_tensor()` reads out 'the data' from whatever it is passed,
  5697. and constructs a leaf variable. Therefore ``tensor.new_tensor(x)`` is equivalent to ``x.clone().detach()``
  5698. and ``tensor.new_tensor(x, requires_grad=True)`` is equivalent to ``x.clone().detach().requires_grad_(True)``.
  5699. The equivalents using ``clone()`` and ``detach()`` are recommended.
  5700. Args:
  5701. data (array_like): The returned Tensor copies :attr:`data`.
  5702. Keyword args:
  5703. dtype (:class:`torch.dtype`, optional): the desired type of returned tensor.
  5704. Default: if None, same :class:`torch.dtype` as this tensor.
  5705. device (:class:`torch.device`, optional): the desired device of returned tensor.
  5706. Default: if None, same :class:`torch.device` as this tensor.
  5707. requires_grad (bool, optional): If autograd should record operations on the
  5708. returned tensor. Default: ``False``.
  5709. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  5710. Default: ``torch.strided``.
  5711. pin_memory (bool, optional): If set, returned tensor would be allocated in
  5712. the pinned memory. Works only for CPU tensors. Default: ``False``.
  5713. Example::
  5714. >>> tensor = torch.ones((2,), dtype=torch.int8)
  5715. >>> data = [[0, 1], [2, 3]]
  5716. >>> tensor.new_tensor(data)
  5717. tensor([[ 0, 1],
  5718. [ 2, 3]], dtype=torch.int8)
  5719. """
  5720. ...
  5721. @overload
  5722. def new_zeros(self, size: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  5723. r"""
  5724. new_zeros(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor
  5725. Returns a Tensor of size :attr:`size` filled with ``0``.
  5726. By default, the returned Tensor has the same :class:`torch.dtype` and
  5727. :class:`torch.device` as this tensor.
  5728. Args:
  5729. size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
  5730. shape of the output tensor.
  5731. Keyword args:
  5732. dtype (:class:`torch.dtype`, optional): the desired type of returned tensor.
  5733. Default: if None, same :class:`torch.dtype` as this tensor.
  5734. device (:class:`torch.device`, optional): the desired device of returned tensor.
  5735. Default: if None, same :class:`torch.device` as this tensor.
  5736. requires_grad (bool, optional): If autograd should record operations on the
  5737. returned tensor. Default: ``False``.
  5738. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  5739. Default: ``torch.strided``.
  5740. pin_memory (bool, optional): If set, returned tensor would be allocated in
  5741. the pinned memory. Works only for CPU tensors. Default: ``False``.
  5742. Example::
  5743. >>> tensor = torch.tensor((), dtype=torch.float64)
  5744. >>> tensor.new_zeros((2, 3))
  5745. tensor([[ 0., 0., 0.],
  5746. [ 0., 0., 0.]], dtype=torch.float64)
  5747. """
  5748. ...
  5749. @overload
  5750. def new_zeros(self, *size: _int, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
  5751. r"""
  5752. new_zeros(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor
  5753. Returns a Tensor of size :attr:`size` filled with ``0``.
  5754. By default, the returned Tensor has the same :class:`torch.dtype` and
  5755. :class:`torch.device` as this tensor.
  5756. Args:
  5757. size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
  5758. shape of the output tensor.
  5759. Keyword args:
  5760. dtype (:class:`torch.dtype`, optional): the desired type of returned tensor.
  5761. Default: if None, same :class:`torch.dtype` as this tensor.
  5762. device (:class:`torch.device`, optional): the desired device of returned tensor.
  5763. Default: if None, same :class:`torch.device` as this tensor.
  5764. requires_grad (bool, optional): If autograd should record operations on the
  5765. returned tensor. Default: ``False``.
  5766. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  5767. Default: ``torch.strided``.
  5768. pin_memory (bool, optional): If set, returned tensor would be allocated in
  5769. the pinned memory. Works only for CPU tensors. Default: ``False``.
  5770. Example::
  5771. >>> tensor = torch.tensor((), dtype=torch.float64)
  5772. >>> tensor.new_zeros((2, 3))
  5773. tensor([[ 0., 0., 0.],
  5774. [ 0., 0., 0.]], dtype=torch.float64)
  5775. """
  5776. ...
  5777. def nextafter(self, other: Tensor) -> Tensor:
  5778. r"""
  5779. nextafter(other) -> Tensor
  5780. See :func:`torch.nextafter`
  5781. """
  5782. ...
  5783. def nextafter_(self, other: Tensor) -> Tensor:
  5784. r"""
  5785. nextafter_(other) -> Tensor
  5786. In-place version of :meth:`~Tensor.nextafter`
  5787. """
  5788. ...
  5789. @overload
  5790. def nonzero(self, *, as_tuple: Literal[False] = False) -> Tensor:
  5791. r"""
  5792. nonzero() -> LongTensor
  5793. See :func:`torch.nonzero`
  5794. """
  5795. ...
  5796. @overload
  5797. def nonzero(self, *, as_tuple: Literal[True]) -> Tuple[Tensor, ...]:
  5798. r"""
  5799. nonzero() -> LongTensor
  5800. See :func:`torch.nonzero`
  5801. """
  5802. ...
  5803. def nonzero_static(self, *, size: _int, fill_value: _int = -1) -> Tensor:
  5804. r"""
  5805. nonzero_static(input, *, size, fill_value=-1) -> Tensor
  5806. Returns a 2-D tensor where each row is the index for a non-zero value.
  5807. The returned Tensor has the same `torch.dtype` as `torch.nonzero()`.
  5808. Args:
  5809. input (Tensor): the input tensor to count non-zero elements.
  5810. Keyword args:
  5811. size (int): the size of non-zero elements expected to be included in the out
  5812. tensor. Pad the out tensor with `fill_value` if the `size` is larger
  5813. than total number of non-zero elements, truncate out tensor if `size`
  5814. is smaller. The size must be a non-negative integer.
  5815. fill_value (int): the value to fill the output tensor with when `size` is larger
  5816. than the total number of non-zero elements. Default is `-1` to represent
  5817. invalid index.
  5818. Example:
  5819. # Example 1: Padding
  5820. >>> input_tensor = torch.tensor([[1, 0], [3, 2]])
  5821. >>> static_size = 4
  5822. >>> t = torch.nonzero_static(input_tensor, size = static_size)
  5823. tensor([[ 0, 0],
  5824. [ 1, 0],
  5825. [ 1, 1],
  5826. [ -1, -1]], dtype=torch.int64)
  5827. # Example 2: Truncating
  5828. >>> input_tensor = torch.tensor([[1, 0], [3, 2]])
  5829. >>> static_size = 2
  5830. >>> t = torch.nonzero_static(input_tensor, size = static_size)
  5831. tensor([[ 0, 0],
  5832. [ 1, 0]], dtype=torch.int64)
  5833. # Example 3: 0 size
  5834. >>> input_tensor = torch.tensor([10])
  5835. >>> static_size = 0
  5836. >>> t = torch.nonzero_static(input_tensor, size = static_size)
  5837. tensor([], size=(0, 1), dtype=torch.int64)
  5838. # Example 4: 0 rank input
  5839. >>> input_tensor = torch.tensor(10)
  5840. >>> static_size = 2
  5841. >>> t = torch.nonzero_static(input_tensor, size = static_size)
  5842. tensor([], size=(2, 0), dtype=torch.int64)
  5843. """
  5844. ...
  5845. def normal_(self, mean: _float = 0, std: _float = 1, *, generator: Optional[Generator] = None) -> Tensor:
  5846. r"""
  5847. normal_(mean=0, std=1, *, generator=None) -> Tensor
  5848. Fills :attr:`self` tensor with elements samples from the normal distribution
  5849. parameterized by :attr:`mean` and :attr:`std`.
  5850. """
  5851. ...
  5852. @overload
  5853. def not_equal(self, other: Tensor) -> Tensor:
  5854. r"""
  5855. not_equal(other) -> Tensor
  5856. See :func:`torch.not_equal`.
  5857. """
  5858. ...
  5859. @overload
  5860. def not_equal(self, other: Union[Number, _complex]) -> Tensor:
  5861. r"""
  5862. not_equal(other) -> Tensor
  5863. See :func:`torch.not_equal`.
  5864. """
  5865. ...
  5866. @overload
  5867. def not_equal_(self, other: Tensor) -> Tensor:
  5868. r"""
  5869. not_equal_(other) -> Tensor
  5870. In-place version of :meth:`~Tensor.not_equal`.
  5871. """
  5872. ...
  5873. @overload
  5874. def not_equal_(self, other: Union[Number, _complex]) -> Tensor:
  5875. r"""
  5876. not_equal_(other) -> Tensor
  5877. In-place version of :meth:`~Tensor.not_equal`.
  5878. """
  5879. ...
  5880. def numel(self) -> _int:
  5881. r"""
  5882. numel() -> int
  5883. See :func:`torch.numel`
  5884. """
  5885. ...
  5886. def numpy(self, *, force: _bool = False) -> numpy.ndarray:
  5887. r"""
  5888. numpy(*, force=False) -> numpy.ndarray
  5889. Returns the tensor as a NumPy :class:`ndarray`.
  5890. If :attr:`force` is ``False`` (the default), the conversion
  5891. is performed only if the tensor is on the CPU, does not require grad,
  5892. does not have its conjugate bit set, and is a dtype and layout that
  5893. NumPy supports. The returned ndarray and the tensor will share their
  5894. storage, so changes to the tensor will be reflected in the ndarray
  5895. and vice versa.
  5896. If :attr:`force` is ``True`` this is equivalent to
  5897. calling ``t.detach().cpu().resolve_conj().resolve_neg().numpy()``.
  5898. If the tensor isn't on the CPU or the conjugate or negative bit is set,
  5899. the tensor won't share its storage with the returned ndarray.
  5900. Setting :attr:`force` to ``True`` can be a useful shorthand.
  5901. Args:
  5902. force (bool): if ``True``, the ndarray may be a copy of the tensor
  5903. instead of always sharing memory, defaults to ``False``.
  5904. """
  5905. ...
  5906. def orgqr(self, input2: Tensor) -> Tensor:
  5907. r"""
  5908. orgqr(input2) -> Tensor
  5909. See :func:`torch.orgqr`
  5910. """
  5911. ...
  5912. def ormqr(self, input2: Tensor, input3: Tensor, left: _bool = True, transpose: _bool = False) -> Tensor:
  5913. r"""
  5914. ormqr(input2, input3, left=True, transpose=False) -> Tensor
  5915. See :func:`torch.ormqr`
  5916. """
  5917. ...
  5918. def outer(self, vec2: Tensor) -> Tensor:
  5919. r"""
  5920. outer(vec2) -> Tensor
  5921. See :func:`torch.outer`.
  5922. """
  5923. ...
  5924. @overload
  5925. def permute(self, dims: _size) -> Tensor:
  5926. r"""
  5927. permute(*dims) -> Tensor
  5928. See :func:`torch.permute`
  5929. """
  5930. ...
  5931. @overload
  5932. def permute(self, *dims: _int) -> Tensor:
  5933. r"""
  5934. permute(*dims) -> Tensor
  5935. See :func:`torch.permute`
  5936. """
  5937. ...
  5938. def pin_memory(self, device: Optional[Optional[DeviceLikeType]] = None) -> Tensor:
  5939. r"""
  5940. pin_memory() -> Tensor
  5941. Copies the tensor to pinned memory, if it's not already pinned.
  5942. """
  5943. ...
  5944. def pinverse(self, rcond: _float = 1e-15) -> Tensor:
  5945. r"""
  5946. pinverse() -> Tensor
  5947. See :func:`torch.pinverse`
  5948. """
  5949. ...
  5950. def polygamma(self, n: _int) -> Tensor:
  5951. r"""
  5952. polygamma(n) -> Tensor
  5953. See :func:`torch.polygamma`
  5954. """
  5955. ...
  5956. def polygamma_(self, n: _int) -> Tensor:
  5957. r"""
  5958. polygamma_(n) -> Tensor
  5959. In-place version of :meth:`~Tensor.polygamma`
  5960. """
  5961. ...
  5962. def positive(self) -> Tensor:
  5963. r"""
  5964. positive() -> Tensor
  5965. See :func:`torch.positive`
  5966. """
  5967. ...
  5968. @overload
  5969. def pow(self, exponent: Tensor) -> Tensor:
  5970. r"""
  5971. pow(exponent) -> Tensor
  5972. See :func:`torch.pow`
  5973. """
  5974. ...
  5975. @overload
  5976. def pow(self, exponent: Union[Number, _complex]) -> Tensor:
  5977. r"""
  5978. pow(exponent) -> Tensor
  5979. See :func:`torch.pow`
  5980. """
  5981. ...
  5982. @overload
  5983. def pow_(self, exponent: Tensor) -> Tensor:
  5984. r"""
  5985. pow_(exponent) -> Tensor
  5986. In-place version of :meth:`~Tensor.pow`
  5987. """
  5988. ...
  5989. @overload
  5990. def pow_(self, exponent: Union[Number, _complex]) -> Tensor:
  5991. r"""
  5992. pow_(exponent) -> Tensor
  5993. In-place version of :meth:`~Tensor.pow`
  5994. """
  5995. ...
  5996. def prelu(self, weight: Tensor) -> Tensor: ...
  5997. @overload
  5998. def prod(self, *, dtype: Optional[_dtype] = None) -> Tensor:
  5999. r"""
  6000. prod(dim=None, keepdim=False, dtype=None) -> Tensor
  6001. See :func:`torch.prod`
  6002. """
  6003. ...
  6004. @overload
  6005. def prod(self, dim: _int, keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor:
  6006. r"""
  6007. prod(dim=None, keepdim=False, dtype=None) -> Tensor
  6008. See :func:`torch.prod`
  6009. """
  6010. ...
  6011. @overload
  6012. def prod(self, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor:
  6013. r"""
  6014. prod(dim=None, keepdim=False, dtype=None) -> Tensor
  6015. See :func:`torch.prod`
  6016. """
  6017. ...
  6018. def put(self, index: Tensor, source: Tensor, accumulate: _bool = False) -> Tensor:
  6019. r"""
  6020. put(input, index, source, accumulate=False) -> Tensor
  6021. Out-of-place version of :meth:`torch.Tensor.put_`.
  6022. `input` corresponds to `self` in :meth:`torch.Tensor.put_`.
  6023. """
  6024. ...
  6025. def put_(self, index: Tensor, source: Tensor, accumulate: _bool = False) -> Tensor:
  6026. r"""
  6027. put_(index, source, accumulate=False) -> Tensor
  6028. Copies the elements from :attr:`source` into the positions specified by
  6029. :attr:`index`. For the purpose of indexing, the :attr:`self` tensor is treated as if
  6030. it were a 1-D tensor.
  6031. :attr:`index` and :attr:`source` need to have the same number of elements, but not necessarily
  6032. the same shape.
  6033. If :attr:`accumulate` is ``True``, the elements in :attr:`source` are added to
  6034. :attr:`self`. If accumulate is ``False``, the behavior is undefined if :attr:`index`
  6035. contain duplicate elements.
  6036. Args:
  6037. index (LongTensor): the indices into self
  6038. source (Tensor): the tensor containing values to copy from
  6039. accumulate (bool): whether to accumulate into self
  6040. Example::
  6041. >>> src = torch.tensor([[4, 3, 5],
  6042. ... [6, 7, 8]])
  6043. >>> src.put_(torch.tensor([1, 3]), torch.tensor([9, 10]))
  6044. tensor([[ 4, 9, 5],
  6045. [ 10, 7, 8]])
  6046. """
  6047. ...
  6048. def q_per_channel_axis(self) -> _int:
  6049. r"""
  6050. q_per_channel_axis() -> int
  6051. Given a Tensor quantized by linear (affine) per-channel quantization,
  6052. returns the index of dimension on which per-channel quantization is applied.
  6053. """
  6054. ...
  6055. def q_per_channel_scales(self) -> Tensor:
  6056. r"""
  6057. q_per_channel_scales() -> Tensor
  6058. Given a Tensor quantized by linear (affine) per-channel quantization,
  6059. returns a Tensor of scales of the underlying quantizer. It has the number of
  6060. elements that matches the corresponding dimensions (from q_per_channel_axis) of
  6061. the tensor.
  6062. """
  6063. ...
  6064. def q_per_channel_zero_points(self) -> Tensor:
  6065. r"""
  6066. q_per_channel_zero_points() -> Tensor
  6067. Given a Tensor quantized by linear (affine) per-channel quantization,
  6068. returns a tensor of zero_points of the underlying quantizer. It has the number of
  6069. elements that matches the corresponding dimensions (from q_per_channel_axis) of
  6070. the tensor.
  6071. """
  6072. ...
  6073. def q_scale(self) -> _float:
  6074. r"""
  6075. q_scale() -> float
  6076. Given a Tensor quantized by linear(affine) quantization,
  6077. returns the scale of the underlying quantizer().
  6078. """
  6079. ...
  6080. def q_zero_point(self) -> _int:
  6081. r"""
  6082. q_zero_point() -> int
  6083. Given a Tensor quantized by linear(affine) quantization,
  6084. returns the zero_point of the underlying quantizer().
  6085. """
  6086. ...
  6087. def qr(self, some: _bool = True) -> torch.return_types.qr:
  6088. r"""
  6089. qr(some=True) -> (Tensor, Tensor)
  6090. See :func:`torch.qr`
  6091. """
  6092. ...
  6093. def qscheme(self) -> _qscheme:
  6094. r"""
  6095. qscheme() -> torch.qscheme
  6096. Returns the quantization scheme of a given QTensor.
  6097. """
  6098. ...
  6099. @overload
  6100. def quantile(self, q: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear") -> Tensor:
  6101. r"""
  6102. quantile(q, dim=None, keepdim=False, *, interpolation='linear') -> Tensor
  6103. See :func:`torch.quantile`
  6104. """
  6105. ...
  6106. @overload
  6107. def quantile(self, q: _float, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear") -> Tensor:
  6108. r"""
  6109. quantile(q, dim=None, keepdim=False, *, interpolation='linear') -> Tensor
  6110. See :func:`torch.quantile`
  6111. """
  6112. ...
  6113. def rad2deg(self) -> Tensor:
  6114. r"""
  6115. rad2deg() -> Tensor
  6116. See :func:`torch.rad2deg`
  6117. """
  6118. ...
  6119. def rad2deg_(self) -> Tensor:
  6120. r"""
  6121. rad2deg_() -> Tensor
  6122. In-place version of :meth:`~Tensor.rad2deg`
  6123. """
  6124. ...
  6125. @overload
  6126. def random_(self, *, generator: Optional[Generator] = None) -> Tensor:
  6127. r"""
  6128. random_(from=0, to=None, *, generator=None) -> Tensor
  6129. Fills :attr:`self` tensor with numbers sampled from the discrete uniform
  6130. distribution over ``[from, to - 1]``. If not specified, the values are usually
  6131. only bounded by :attr:`self` tensor's data type. However, for floating point
  6132. types, if unspecified, range will be ``[0, 2^mantissa]`` to ensure that every
  6133. value is representable. For example, `torch.tensor(1, dtype=torch.double).random_()`
  6134. will be uniform in ``[0, 2^53]``.
  6135. """
  6136. ...
  6137. @overload
  6138. def random_(self, from_: _int, to: Optional[_int], *, generator: Optional[Generator] = None) -> Tensor:
  6139. r"""
  6140. random_(from=0, to=None, *, generator=None) -> Tensor
  6141. Fills :attr:`self` tensor with numbers sampled from the discrete uniform
  6142. distribution over ``[from, to - 1]``. If not specified, the values are usually
  6143. only bounded by :attr:`self` tensor's data type. However, for floating point
  6144. types, if unspecified, range will be ``[0, 2^mantissa]`` to ensure that every
  6145. value is representable. For example, `torch.tensor(1, dtype=torch.double).random_()`
  6146. will be uniform in ``[0, 2^53]``.
  6147. """
  6148. ...
  6149. @overload
  6150. def random_(self, to: _int, *, generator: Optional[Generator] = None) -> Tensor:
  6151. r"""
  6152. random_(from=0, to=None, *, generator=None) -> Tensor
  6153. Fills :attr:`self` tensor with numbers sampled from the discrete uniform
  6154. distribution over ``[from, to - 1]``. If not specified, the values are usually
  6155. only bounded by :attr:`self` tensor's data type. However, for floating point
  6156. types, if unspecified, range will be ``[0, 2^mantissa]`` to ensure that every
  6157. value is representable. For example, `torch.tensor(1, dtype=torch.double).random_()`
  6158. will be uniform in ``[0, 2^53]``.
  6159. """
  6160. ...
  6161. def ravel(self) -> Tensor:
  6162. r"""
  6163. ravel() -> Tensor
  6164. see :func:`torch.ravel`
  6165. """
  6166. ...
  6167. def reciprocal(self) -> Tensor:
  6168. r"""
  6169. reciprocal() -> Tensor
  6170. See :func:`torch.reciprocal`
  6171. """
  6172. ...
  6173. def reciprocal_(self) -> Tensor:
  6174. r"""
  6175. reciprocal_() -> Tensor
  6176. In-place version of :meth:`~Tensor.reciprocal`
  6177. """
  6178. ...
  6179. def record_stream(self, s: Stream) -> None:
  6180. r"""
  6181. record_stream(stream)
  6182. Marks the tensor as having been used by this stream. When the tensor
  6183. is deallocated, ensure the tensor memory is not reused for another tensor
  6184. until all work queued on :attr:`stream` at the time of deallocation is
  6185. complete.
  6186. .. note::
  6187. The caching allocator is aware of only the stream where a tensor was
  6188. allocated. Due to the awareness, it already correctly manages the life
  6189. cycle of tensors on only one stream. But if a tensor is used on a stream
  6190. different from the stream of origin, the allocator might reuse the memory
  6191. unexpectedly. Calling this method lets the allocator know which streams
  6192. have used the tensor.
  6193. .. warning::
  6194. This method is most suitable for use cases where you are providing a
  6195. function that created a tensor on a side stream, and want users to be able
  6196. to make use of the tensor without having to think carefully about stream
  6197. safety when making use of them. These safety guarantees come at some
  6198. performance and predictability cost (analogous to the tradeoff between GC
  6199. and manual memory management), so if you are in a situation where
  6200. you manage the full lifetime of your tensors, you may consider instead
  6201. manually managing CUDA events so that calling this method is not necessary.
  6202. In particular, when you call this method, on later allocations the
  6203. allocator will poll the recorded stream to see if all operations have
  6204. completed yet; you can potentially race with side stream computation and
  6205. non-deterministically reuse or fail to reuse memory for an allocation.
  6206. You can safely use tensors allocated on side streams without
  6207. :meth:`~Tensor.record_stream`; you must manually ensure that
  6208. any non-creation stream uses of a tensor are synced back to the creation
  6209. stream before you deallocate the tensor. As the CUDA caching allocator
  6210. guarantees that the memory will only be reused with the same creation stream,
  6211. this is sufficient to ensure that writes to future reallocations of the
  6212. memory will be delayed until non-creation stream uses are done.
  6213. (Counterintuitively, you may observe that on the CPU side we have already
  6214. reallocated the tensor, even though CUDA kernels on the old tensor are
  6215. still in progress. This is fine, because CUDA operations on the new
  6216. tensor will appropriately wait for the old operations to complete, as they
  6217. are all on the same stream.)
  6218. Concretely, this looks like this::
  6219. with torch.cuda.stream(s0):
  6220. x = torch.zeros(N)
  6221. s1.wait_stream(s0)
  6222. with torch.cuda.stream(s1):
  6223. y = some_comm_op(x)
  6224. ... some compute on s0 ...
  6225. # synchronize creation stream s0 to side stream s1
  6226. # before deallocating x
  6227. s0.wait_stream(s1)
  6228. del x
  6229. Note that some discretion is required when deciding when to perform
  6230. ``s0.wait_stream(s1)``. In particular, if we were to wait immediately
  6231. after ``some_comm_op``, there wouldn't be any point in having the side
  6232. stream; it would be equivalent to have run ``some_comm_op`` on ``s0``.
  6233. Instead, the synchronization must be placed at some appropriate, later
  6234. point in time where you expect the side stream ``s1`` to have finished
  6235. work. This location is typically identified via profiling, e.g., using
  6236. Chrome traces produced
  6237. :meth:`torch.autograd.profiler.profile.export_chrome_trace`. If you
  6238. place the wait too early, work on s0 will block until ``s1`` has finished,
  6239. preventing further overlapping of communication and computation. If you
  6240. place the wait too late, you will use more memory than is strictly
  6241. necessary (as you are keeping ``x`` live for longer.) For a concrete
  6242. example of how this guidance can be applied in practice, see this post:
  6243. `FSDP and CUDACachingAllocator
  6244. <https://dev-discuss.pytorch.org/t/fsdp-cudacachingallocator-an-outsider-newb-perspective/1486>`_.
  6245. """
  6246. ...
  6247. def refine_names(self, names: Sequence[Union[str, ellipsis, None]]) -> Tensor: ...
  6248. def relu(self) -> Tensor: ...
  6249. def relu_(self) -> Tensor: ...
  6250. @overload
  6251. def remainder(self, other: Tensor) -> Tensor:
  6252. r"""
  6253. remainder(divisor) -> Tensor
  6254. See :func:`torch.remainder`
  6255. """
  6256. ...
  6257. @overload
  6258. def remainder(self, other: Union[Number, _complex]) -> Tensor:
  6259. r"""
  6260. remainder(divisor) -> Tensor
  6261. See :func:`torch.remainder`
  6262. """
  6263. ...
  6264. @overload
  6265. def remainder_(self, other: Tensor) -> Tensor:
  6266. r"""
  6267. remainder_(divisor) -> Tensor
  6268. In-place version of :meth:`~Tensor.remainder`
  6269. """
  6270. ...
  6271. @overload
  6272. def remainder_(self, other: Union[Number, _complex]) -> Tensor:
  6273. r"""
  6274. remainder_(divisor) -> Tensor
  6275. In-place version of :meth:`~Tensor.remainder`
  6276. """
  6277. ...
  6278. def rename(self, names: Optional[Sequence[Union[str, ellipsis, None]]]) -> Tensor: ...
  6279. def rename_(self, names: Optional[Sequence[Union[str, ellipsis, None]]]) -> Tensor: ...
  6280. def renorm(self, p: Union[Number, _complex], dim: _int, maxnorm: Union[Number, _complex]) -> Tensor:
  6281. r"""
  6282. renorm(p, dim, maxnorm) -> Tensor
  6283. See :func:`torch.renorm`
  6284. """
  6285. ...
  6286. def renorm_(self, p: Union[Number, _complex], dim: _int, maxnorm: Union[Number, _complex]) -> Tensor:
  6287. r"""
  6288. renorm_(p, dim, maxnorm) -> Tensor
  6289. In-place version of :meth:`~Tensor.renorm`
  6290. """
  6291. ...
  6292. @overload
  6293. def repeat(self, repeats: Sequence[Union[_int, SymInt]]) -> Tensor:
  6294. r"""
  6295. repeat(*sizes) -> Tensor
  6296. Repeats this tensor along the specified dimensions.
  6297. Unlike :meth:`~Tensor.expand`, this function copies the tensor's data.
  6298. .. warning::
  6299. :meth:`~Tensor.repeat` behaves differently from
  6300. `numpy.repeat <https://docs.scipy.org/doc/numpy/reference/generated/numpy.repeat.html>`_,
  6301. but is more similar to
  6302. `numpy.tile <https://docs.scipy.org/doc/numpy/reference/generated/numpy.tile.html>`_.
  6303. For the operator similar to `numpy.repeat`, see :func:`torch.repeat_interleave`.
  6304. Args:
  6305. sizes (torch.Size or int...): The number of times to repeat this tensor along each
  6306. dimension
  6307. Example::
  6308. >>> x = torch.tensor([1, 2, 3])
  6309. >>> x.repeat(4, 2)
  6310. tensor([[ 1, 2, 3, 1, 2, 3],
  6311. [ 1, 2, 3, 1, 2, 3],
  6312. [ 1, 2, 3, 1, 2, 3],
  6313. [ 1, 2, 3, 1, 2, 3]])
  6314. >>> x.repeat(4, 2, 1).size()
  6315. torch.Size([4, 2, 3])
  6316. """
  6317. ...
  6318. @overload
  6319. def repeat(self, *repeats: _int) -> Tensor:
  6320. r"""
  6321. repeat(*sizes) -> Tensor
  6322. Repeats this tensor along the specified dimensions.
  6323. Unlike :meth:`~Tensor.expand`, this function copies the tensor's data.
  6324. .. warning::
  6325. :meth:`~Tensor.repeat` behaves differently from
  6326. `numpy.repeat <https://docs.scipy.org/doc/numpy/reference/generated/numpy.repeat.html>`_,
  6327. but is more similar to
  6328. `numpy.tile <https://docs.scipy.org/doc/numpy/reference/generated/numpy.tile.html>`_.
  6329. For the operator similar to `numpy.repeat`, see :func:`torch.repeat_interleave`.
  6330. Args:
  6331. sizes (torch.Size or int...): The number of times to repeat this tensor along each
  6332. dimension
  6333. Example::
  6334. >>> x = torch.tensor([1, 2, 3])
  6335. >>> x.repeat(4, 2)
  6336. tensor([[ 1, 2, 3, 1, 2, 3],
  6337. [ 1, 2, 3, 1, 2, 3],
  6338. [ 1, 2, 3, 1, 2, 3],
  6339. [ 1, 2, 3, 1, 2, 3]])
  6340. >>> x.repeat(4, 2, 1).size()
  6341. torch.Size([4, 2, 3])
  6342. """
  6343. ...
  6344. @overload
  6345. def repeat_interleave(self, repeats: Tensor, dim: Optional[_int] = None, *, output_size: Optional[Union[_int, SymInt]] = None) -> Tensor:
  6346. r"""
  6347. repeat_interleave(repeats, dim=None, *, output_size=None) -> Tensor
  6348. See :func:`torch.repeat_interleave`.
  6349. """
  6350. ...
  6351. @overload
  6352. def repeat_interleave(self, repeats: Union[_int, SymInt], dim: Optional[_int] = None, *, output_size: Optional[Union[_int, SymInt]] = None) -> Tensor:
  6353. r"""
  6354. repeat_interleave(repeats, dim=None, *, output_size=None) -> Tensor
  6355. See :func:`torch.repeat_interleave`.
  6356. """
  6357. ...
  6358. def requires_grad_(self, mode: _bool = True) -> Tensor:
  6359. r"""
  6360. requires_grad_(requires_grad=True) -> Tensor
  6361. Change if autograd should record operations on this tensor: sets this tensor's
  6362. :attr:`requires_grad` attribute in-place. Returns this tensor.
  6363. :func:`requires_grad_`'s main use case is to tell autograd to begin recording
  6364. operations on a Tensor ``tensor``. If ``tensor`` has ``requires_grad=False``
  6365. (because it was obtained through a DataLoader, or required preprocessing or
  6366. initialization), ``tensor.requires_grad_()`` makes it so that autograd will
  6367. begin to record operations on ``tensor``.
  6368. Args:
  6369. requires_grad (bool): If autograd should record operations on this tensor.
  6370. Default: ``True``.
  6371. Example::
  6372. >>> # Let's say we want to preprocess some saved weights and use
  6373. >>> # the result as new weights.
  6374. >>> saved_weights = [0.1, 0.2, 0.3, 0.25]
  6375. >>> loaded_weights = torch.tensor(saved_weights)
  6376. >>> weights = preprocess(loaded_weights) # some function
  6377. >>> weights
  6378. tensor([-0.5503, 0.4926, -2.1158, -0.8303])
  6379. >>> # Now, start to record operations done to weights
  6380. >>> weights.requires_grad_()
  6381. >>> out = weights.pow(2).sum()
  6382. >>> out.backward()
  6383. >>> weights.grad
  6384. tensor([-1.1007, 0.9853, -4.2316, -1.6606])
  6385. """
  6386. ...
  6387. @overload
  6388. def reshape(self, shape: Sequence[Union[_int, SymInt]]) -> Tensor:
  6389. r"""
  6390. reshape(*shape) -> Tensor
  6391. Returns a tensor with the same data and number of elements as :attr:`self`
  6392. but with the specified shape. This method returns a view if :attr:`shape` is
  6393. compatible with the current shape. See :meth:`torch.Tensor.view` on when it is
  6394. possible to return a view.
  6395. See :func:`torch.reshape`
  6396. Args:
  6397. shape (tuple of ints or int...): the desired shape
  6398. """
  6399. ...
  6400. @overload
  6401. def reshape(self, *shape: _int) -> Tensor:
  6402. r"""
  6403. reshape(*shape) -> Tensor
  6404. Returns a tensor with the same data and number of elements as :attr:`self`
  6405. but with the specified shape. This method returns a view if :attr:`shape` is
  6406. compatible with the current shape. See :meth:`torch.Tensor.view` on when it is
  6407. possible to return a view.
  6408. See :func:`torch.reshape`
  6409. Args:
  6410. shape (tuple of ints or int...): the desired shape
  6411. """
  6412. ...
  6413. def reshape_as(self, other: Tensor) -> Tensor:
  6414. r"""
  6415. reshape_as(other) -> Tensor
  6416. Returns this tensor as the same shape as :attr:`other`.
  6417. ``self.reshape_as(other)`` is equivalent to ``self.reshape(other.sizes())``.
  6418. This method returns a view if ``other.sizes()`` is compatible with the current
  6419. shape. See :meth:`torch.Tensor.view` on when it is possible to return a view.
  6420. Please see :meth:`reshape` for more information about ``reshape``.
  6421. Args:
  6422. other (:class:`torch.Tensor`): The result tensor has the same shape
  6423. as :attr:`other`.
  6424. """
  6425. ...
  6426. @overload
  6427. def resize_(self, size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format] = None) -> Tensor:
  6428. r"""
  6429. resize_(*sizes, memory_format=torch.contiguous_format) -> Tensor
  6430. Resizes :attr:`self` tensor to the specified size. If the number of elements is
  6431. larger than the current storage size, then the underlying storage is resized
  6432. to fit the new number of elements. If the number of elements is smaller, the
  6433. underlying storage is not changed. Existing elements are preserved but any new
  6434. memory is uninitialized.
  6435. .. warning::
  6436. This is a low-level method. The storage is reinterpreted as C-contiguous,
  6437. ignoring the current strides (unless the target size equals the current
  6438. size, in which case the tensor is left unchanged). For most purposes, you
  6439. will instead want to use :meth:`~Tensor.view()`, which checks for
  6440. contiguity, or :meth:`~Tensor.reshape()`, which copies data if needed. To
  6441. change the size in-place with custom strides, see :meth:`~Tensor.set_()`.
  6442. .. note::
  6443. If :func:`torch.use_deterministic_algorithms()` and
  6444. :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
  6445. ``True``, new elements are initialized to prevent nondeterministic behavior
  6446. from using the result as an input to an operation. Floating point and
  6447. complex values are set to NaN, and integer values are set to the maximum
  6448. value.
  6449. Args:
  6450. sizes (torch.Size or int...): the desired size
  6451. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  6452. Tensor. Default: ``torch.contiguous_format``. Note that memory format of
  6453. :attr:`self` is going to be unaffected if ``self.size()`` matches ``sizes``.
  6454. Example::
  6455. >>> x = torch.tensor([[1, 2], [3, 4], [5, 6]])
  6456. >>> x.resize_(2, 2)
  6457. tensor([[ 1, 2],
  6458. [ 3, 4]])
  6459. """
  6460. ...
  6461. @overload
  6462. def resize_(self, *size: _int, memory_format: Optional[memory_format] = None) -> Tensor:
  6463. r"""
  6464. resize_(*sizes, memory_format=torch.contiguous_format) -> Tensor
  6465. Resizes :attr:`self` tensor to the specified size. If the number of elements is
  6466. larger than the current storage size, then the underlying storage is resized
  6467. to fit the new number of elements. If the number of elements is smaller, the
  6468. underlying storage is not changed. Existing elements are preserved but any new
  6469. memory is uninitialized.
  6470. .. warning::
  6471. This is a low-level method. The storage is reinterpreted as C-contiguous,
  6472. ignoring the current strides (unless the target size equals the current
  6473. size, in which case the tensor is left unchanged). For most purposes, you
  6474. will instead want to use :meth:`~Tensor.view()`, which checks for
  6475. contiguity, or :meth:`~Tensor.reshape()`, which copies data if needed. To
  6476. change the size in-place with custom strides, see :meth:`~Tensor.set_()`.
  6477. .. note::
  6478. If :func:`torch.use_deterministic_algorithms()` and
  6479. :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
  6480. ``True``, new elements are initialized to prevent nondeterministic behavior
  6481. from using the result as an input to an operation. Floating point and
  6482. complex values are set to NaN, and integer values are set to the maximum
  6483. value.
  6484. Args:
  6485. sizes (torch.Size or int...): the desired size
  6486. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  6487. Tensor. Default: ``torch.contiguous_format``. Note that memory format of
  6488. :attr:`self` is going to be unaffected if ``self.size()`` matches ``sizes``.
  6489. Example::
  6490. >>> x = torch.tensor([[1, 2], [3, 4], [5, 6]])
  6491. >>> x.resize_(2, 2)
  6492. tensor([[ 1, 2],
  6493. [ 3, 4]])
  6494. """
  6495. ...
  6496. def resize_as_(self, the_template: Tensor, *, memory_format: Optional[memory_format] = None) -> Tensor:
  6497. r"""
  6498. resize_as_(tensor, memory_format=torch.contiguous_format) -> Tensor
  6499. Resizes the :attr:`self` tensor to be the same size as the specified
  6500. :attr:`tensor`. This is equivalent to ``self.resize_(tensor.size())``.
  6501. Args:
  6502. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  6503. Tensor. Default: ``torch.contiguous_format``. Note that memory format of
  6504. :attr:`self` is going to be unaffected if ``self.size()`` matches ``tensor.size()``.
  6505. """
  6506. ...
  6507. def resize_as_sparse_(self, the_template: Tensor) -> Tensor: ...
  6508. def resolve_conj(self) -> Tensor:
  6509. r"""
  6510. resolve_conj() -> Tensor
  6511. See :func:`torch.resolve_conj`
  6512. """
  6513. ...
  6514. def resolve_neg(self) -> Tensor:
  6515. r"""
  6516. resolve_neg() -> Tensor
  6517. See :func:`torch.resolve_neg`
  6518. """
  6519. ...
  6520. def retain_grad(self) -> None:
  6521. r"""
  6522. retain_grad() -> None
  6523. Enables this Tensor to have their :attr:`grad` populated during
  6524. :func:`backward`. This is a no-op for leaf tensors.
  6525. """
  6526. ...
  6527. def roll(self, shifts: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]], dims: Union[_int, _size] = ()) -> Tensor:
  6528. r"""
  6529. roll(shifts, dims) -> Tensor
  6530. See :func:`torch.roll`
  6531. """
  6532. ...
  6533. def rot90(self, k: _int = 1, dims: _size = (0,1)) -> Tensor:
  6534. r"""
  6535. rot90(k, dims) -> Tensor
  6536. See :func:`torch.rot90`
  6537. """
  6538. ...
  6539. @overload
  6540. def round(self) -> Tensor:
  6541. r"""
  6542. round(decimals=0) -> Tensor
  6543. See :func:`torch.round`
  6544. """
  6545. ...
  6546. @overload
  6547. def round(self, *, decimals: _int) -> Tensor:
  6548. r"""
  6549. round(decimals=0) -> Tensor
  6550. See :func:`torch.round`
  6551. """
  6552. ...
  6553. @overload
  6554. def round_(self) -> Tensor:
  6555. r"""
  6556. round_(decimals=0) -> Tensor
  6557. In-place version of :meth:`~Tensor.round`
  6558. """
  6559. ...
  6560. @overload
  6561. def round_(self, *, decimals: _int) -> Tensor:
  6562. r"""
  6563. round_(decimals=0) -> Tensor
  6564. In-place version of :meth:`~Tensor.round`
  6565. """
  6566. ...
  6567. def row_indices(self) -> Tensor: ...
  6568. def rsqrt(self) -> Tensor:
  6569. r"""
  6570. rsqrt() -> Tensor
  6571. See :func:`torch.rsqrt`
  6572. """
  6573. ...
  6574. def rsqrt_(self) -> Tensor:
  6575. r"""
  6576. rsqrt_() -> Tensor
  6577. In-place version of :meth:`~Tensor.rsqrt`
  6578. """
  6579. ...
  6580. @overload
  6581. def scatter(self, dim: _int, index: Tensor, src: Tensor) -> Tensor:
  6582. r"""
  6583. scatter(dim, index, src) -> Tensor
  6584. Out-of-place version of :meth:`torch.Tensor.scatter_`
  6585. """
  6586. ...
  6587. @overload
  6588. def scatter(self, dim: _int, index: Tensor, src: Tensor, *, reduce: str) -> Tensor:
  6589. r"""
  6590. scatter(dim, index, src) -> Tensor
  6591. Out-of-place version of :meth:`torch.Tensor.scatter_`
  6592. """
  6593. ...
  6594. @overload
  6595. def scatter(self, dim: _int, index: Tensor, value: Union[Number, _complex], *, reduce: str) -> Tensor:
  6596. r"""
  6597. scatter(dim, index, src) -> Tensor
  6598. Out-of-place version of :meth:`torch.Tensor.scatter_`
  6599. """
  6600. ...
  6601. @overload
  6602. def scatter(self, dim: Union[str, ellipsis, None], index: Tensor, src: Tensor) -> Tensor:
  6603. r"""
  6604. scatter(dim, index, src) -> Tensor
  6605. Out-of-place version of :meth:`torch.Tensor.scatter_`
  6606. """
  6607. ...
  6608. @overload
  6609. def scatter(self, dim: _int, index: Tensor, value: Union[Number, _complex]) -> Tensor:
  6610. r"""
  6611. scatter(dim, index, src) -> Tensor
  6612. Out-of-place version of :meth:`torch.Tensor.scatter_`
  6613. """
  6614. ...
  6615. @overload
  6616. def scatter(self, dim: Union[str, ellipsis, None], index: Tensor, value: Union[Number, _complex]) -> Tensor:
  6617. r"""
  6618. scatter(dim, index, src) -> Tensor
  6619. Out-of-place version of :meth:`torch.Tensor.scatter_`
  6620. """
  6621. ...
  6622. @overload
  6623. def scatter_(self, dim: _int, index: Tensor, src: Tensor) -> Tensor:
  6624. r"""
  6625. scatter_(dim, index, src, *, reduce=None) -> Tensor
  6626. Writes all values from the tensor :attr:`src` into :attr:`self` at the indices
  6627. specified in the :attr:`index` tensor. For each value in :attr:`src`, its output
  6628. index is specified by its index in :attr:`src` for ``dimension != dim`` and by
  6629. the corresponding value in :attr:`index` for ``dimension = dim``.
  6630. For a 3-D tensor, :attr:`self` is updated as::
  6631. self[index[i][j][k]][j][k] = src[i][j][k] # if dim == 0
  6632. self[i][index[i][j][k]][k] = src[i][j][k] # if dim == 1
  6633. self[i][j][index[i][j][k]] = src[i][j][k] # if dim == 2
  6634. This is the reverse operation of the manner described in :meth:`~Tensor.gather`.
  6635. :attr:`self`, :attr:`index` and :attr:`src` (if it is a Tensor) should all have
  6636. the same number of dimensions. It is also required that
  6637. ``index.size(d) <= src.size(d)`` for all dimensions ``d``, and that
  6638. ``index.size(d) <= self.size(d)`` for all dimensions ``d != dim``.
  6639. Note that ``index`` and ``src`` do not broadcast.
  6640. Moreover, as for :meth:`~Tensor.gather`, the values of :attr:`index` must be
  6641. between ``0`` and ``self.size(dim) - 1`` inclusive.
  6642. .. warning::
  6643. When indices are not unique, the behavior is non-deterministic (one of the
  6644. values from ``src`` will be picked arbitrarily) and the gradient will be
  6645. incorrect (it will be propagated to all locations in the source that
  6646. correspond to the same index)!
  6647. .. note::
  6648. The backward pass is implemented only for ``src.shape == index.shape``.
  6649. Additionally accepts an optional :attr:`reduce` argument that allows
  6650. specification of an optional reduction operation, which is applied to all
  6651. values in the tensor :attr:`src` into :attr:`self` at the indices
  6652. specified in the :attr:`index`. For each value in :attr:`src`, the reduction
  6653. operation is applied to an index in :attr:`self` which is specified by
  6654. its index in :attr:`src` for ``dimension != dim`` and by the corresponding
  6655. value in :attr:`index` for ``dimension = dim``.
  6656. Given a 3-D tensor and reduction using the multiplication operation, :attr:`self`
  6657. is updated as::
  6658. self[index[i][j][k]][j][k] *= src[i][j][k] # if dim == 0
  6659. self[i][index[i][j][k]][k] *= src[i][j][k] # if dim == 1
  6660. self[i][j][index[i][j][k]] *= src[i][j][k] # if dim == 2
  6661. Reducing with the addition operation is the same as using
  6662. :meth:`~torch.Tensor.scatter_add_`.
  6663. .. warning::
  6664. The reduce argument with Tensor ``src`` is deprecated and will be removed in
  6665. a future PyTorch release. Please use :meth:`~torch.Tensor.scatter_reduce_`
  6666. instead for more reduction options.
  6667. Args:
  6668. dim (int): the axis along which to index
  6669. index (LongTensor): the indices of elements to scatter, can be either empty
  6670. or of the same dimensionality as ``src``. When empty, the operation
  6671. returns ``self`` unchanged.
  6672. src (Tensor): the source element(s) to scatter.
  6673. Keyword args:
  6674. reduce (str, optional): reduction operation to apply, can be either
  6675. ``'add'`` or ``'multiply'``.
  6676. Example::
  6677. >>> src = torch.arange(1, 11).reshape((2, 5))
  6678. >>> src
  6679. tensor([[ 1, 2, 3, 4, 5],
  6680. [ 6, 7, 8, 9, 10]])
  6681. >>> index = torch.tensor([[0, 1, 2, 0]])
  6682. >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(0, index, src)
  6683. tensor([[1, 0, 0, 4, 0],
  6684. [0, 2, 0, 0, 0],
  6685. [0, 0, 3, 0, 0]])
  6686. >>> index = torch.tensor([[0, 1, 2], [0, 1, 4]])
  6687. >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(1, index, src)
  6688. tensor([[1, 2, 3, 0, 0],
  6689. [6, 7, 0, 0, 8],
  6690. [0, 0, 0, 0, 0]])
  6691. >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
  6692. ... 1.23, reduce='multiply')
  6693. tensor([[2.0000, 2.0000, 2.4600, 2.0000],
  6694. [2.0000, 2.0000, 2.0000, 2.4600]])
  6695. >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
  6696. ... 1.23, reduce='add')
  6697. tensor([[2.0000, 2.0000, 3.2300, 2.0000],
  6698. [2.0000, 2.0000, 2.0000, 3.2300]])
  6699. .. function:: scatter_(dim, index, value, *, reduce=None) -> Tensor:
  6700. :noindex:
  6701. Writes the value from :attr:`value` into :attr:`self` at the indices
  6702. specified in the :attr:`index` tensor. This operation is equivalent to the previous version,
  6703. with the :attr:`src` tensor filled entirely with :attr:`value`.
  6704. Args:
  6705. dim (int): the axis along which to index
  6706. index (LongTensor): the indices of elements to scatter, can be either empty
  6707. or of the same dimensionality as ``src``. When empty, the operation
  6708. returns ``self`` unchanged.
  6709. value (Scalar): the value to scatter.
  6710. Keyword args:
  6711. reduce (str, optional): reduction operation to apply, can be either
  6712. ``'add'`` or ``'multiply'``.
  6713. Example::
  6714. >>> index = torch.tensor([[0, 1]])
  6715. >>> value = 2
  6716. >>> torch.zeros(3, 5).scatter_(0, index, value)
  6717. tensor([[2., 0., 0., 0., 0.],
  6718. [0., 2., 0., 0., 0.],
  6719. [0., 0., 0., 0., 0.]])
  6720. """
  6721. ...
  6722. @overload
  6723. def scatter_(self, dim: _int, index: Tensor, src: Tensor, *, reduce: str) -> Tensor:
  6724. r"""
  6725. scatter_(dim, index, src, *, reduce=None) -> Tensor
  6726. Writes all values from the tensor :attr:`src` into :attr:`self` at the indices
  6727. specified in the :attr:`index` tensor. For each value in :attr:`src`, its output
  6728. index is specified by its index in :attr:`src` for ``dimension != dim`` and by
  6729. the corresponding value in :attr:`index` for ``dimension = dim``.
  6730. For a 3-D tensor, :attr:`self` is updated as::
  6731. self[index[i][j][k]][j][k] = src[i][j][k] # if dim == 0
  6732. self[i][index[i][j][k]][k] = src[i][j][k] # if dim == 1
  6733. self[i][j][index[i][j][k]] = src[i][j][k] # if dim == 2
  6734. This is the reverse operation of the manner described in :meth:`~Tensor.gather`.
  6735. :attr:`self`, :attr:`index` and :attr:`src` (if it is a Tensor) should all have
  6736. the same number of dimensions. It is also required that
  6737. ``index.size(d) <= src.size(d)`` for all dimensions ``d``, and that
  6738. ``index.size(d) <= self.size(d)`` for all dimensions ``d != dim``.
  6739. Note that ``index`` and ``src`` do not broadcast.
  6740. Moreover, as for :meth:`~Tensor.gather`, the values of :attr:`index` must be
  6741. between ``0`` and ``self.size(dim) - 1`` inclusive.
  6742. .. warning::
  6743. When indices are not unique, the behavior is non-deterministic (one of the
  6744. values from ``src`` will be picked arbitrarily) and the gradient will be
  6745. incorrect (it will be propagated to all locations in the source that
  6746. correspond to the same index)!
  6747. .. note::
  6748. The backward pass is implemented only for ``src.shape == index.shape``.
  6749. Additionally accepts an optional :attr:`reduce` argument that allows
  6750. specification of an optional reduction operation, which is applied to all
  6751. values in the tensor :attr:`src` into :attr:`self` at the indices
  6752. specified in the :attr:`index`. For each value in :attr:`src`, the reduction
  6753. operation is applied to an index in :attr:`self` which is specified by
  6754. its index in :attr:`src` for ``dimension != dim`` and by the corresponding
  6755. value in :attr:`index` for ``dimension = dim``.
  6756. Given a 3-D tensor and reduction using the multiplication operation, :attr:`self`
  6757. is updated as::
  6758. self[index[i][j][k]][j][k] *= src[i][j][k] # if dim == 0
  6759. self[i][index[i][j][k]][k] *= src[i][j][k] # if dim == 1
  6760. self[i][j][index[i][j][k]] *= src[i][j][k] # if dim == 2
  6761. Reducing with the addition operation is the same as using
  6762. :meth:`~torch.Tensor.scatter_add_`.
  6763. .. warning::
  6764. The reduce argument with Tensor ``src`` is deprecated and will be removed in
  6765. a future PyTorch release. Please use :meth:`~torch.Tensor.scatter_reduce_`
  6766. instead for more reduction options.
  6767. Args:
  6768. dim (int): the axis along which to index
  6769. index (LongTensor): the indices of elements to scatter, can be either empty
  6770. or of the same dimensionality as ``src``. When empty, the operation
  6771. returns ``self`` unchanged.
  6772. src (Tensor): the source element(s) to scatter.
  6773. Keyword args:
  6774. reduce (str, optional): reduction operation to apply, can be either
  6775. ``'add'`` or ``'multiply'``.
  6776. Example::
  6777. >>> src = torch.arange(1, 11).reshape((2, 5))
  6778. >>> src
  6779. tensor([[ 1, 2, 3, 4, 5],
  6780. [ 6, 7, 8, 9, 10]])
  6781. >>> index = torch.tensor([[0, 1, 2, 0]])
  6782. >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(0, index, src)
  6783. tensor([[1, 0, 0, 4, 0],
  6784. [0, 2, 0, 0, 0],
  6785. [0, 0, 3, 0, 0]])
  6786. >>> index = torch.tensor([[0, 1, 2], [0, 1, 4]])
  6787. >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(1, index, src)
  6788. tensor([[1, 2, 3, 0, 0],
  6789. [6, 7, 0, 0, 8],
  6790. [0, 0, 0, 0, 0]])
  6791. >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
  6792. ... 1.23, reduce='multiply')
  6793. tensor([[2.0000, 2.0000, 2.4600, 2.0000],
  6794. [2.0000, 2.0000, 2.0000, 2.4600]])
  6795. >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
  6796. ... 1.23, reduce='add')
  6797. tensor([[2.0000, 2.0000, 3.2300, 2.0000],
  6798. [2.0000, 2.0000, 2.0000, 3.2300]])
  6799. .. function:: scatter_(dim, index, value, *, reduce=None) -> Tensor:
  6800. :noindex:
  6801. Writes the value from :attr:`value` into :attr:`self` at the indices
  6802. specified in the :attr:`index` tensor. This operation is equivalent to the previous version,
  6803. with the :attr:`src` tensor filled entirely with :attr:`value`.
  6804. Args:
  6805. dim (int): the axis along which to index
  6806. index (LongTensor): the indices of elements to scatter, can be either empty
  6807. or of the same dimensionality as ``src``. When empty, the operation
  6808. returns ``self`` unchanged.
  6809. value (Scalar): the value to scatter.
  6810. Keyword args:
  6811. reduce (str, optional): reduction operation to apply, can be either
  6812. ``'add'`` or ``'multiply'``.
  6813. Example::
  6814. >>> index = torch.tensor([[0, 1]])
  6815. >>> value = 2
  6816. >>> torch.zeros(3, 5).scatter_(0, index, value)
  6817. tensor([[2., 0., 0., 0., 0.],
  6818. [0., 2., 0., 0., 0.],
  6819. [0., 0., 0., 0., 0.]])
  6820. """
  6821. ...
  6822. @overload
  6823. def scatter_(self, dim: _int, index: Tensor, value: Union[Number, _complex], *, reduce: str) -> Tensor:
  6824. r"""
  6825. scatter_(dim, index, src, *, reduce=None) -> Tensor
  6826. Writes all values from the tensor :attr:`src` into :attr:`self` at the indices
  6827. specified in the :attr:`index` tensor. For each value in :attr:`src`, its output
  6828. index is specified by its index in :attr:`src` for ``dimension != dim`` and by
  6829. the corresponding value in :attr:`index` for ``dimension = dim``.
  6830. For a 3-D tensor, :attr:`self` is updated as::
  6831. self[index[i][j][k]][j][k] = src[i][j][k] # if dim == 0
  6832. self[i][index[i][j][k]][k] = src[i][j][k] # if dim == 1
  6833. self[i][j][index[i][j][k]] = src[i][j][k] # if dim == 2
  6834. This is the reverse operation of the manner described in :meth:`~Tensor.gather`.
  6835. :attr:`self`, :attr:`index` and :attr:`src` (if it is a Tensor) should all have
  6836. the same number of dimensions. It is also required that
  6837. ``index.size(d) <= src.size(d)`` for all dimensions ``d``, and that
  6838. ``index.size(d) <= self.size(d)`` for all dimensions ``d != dim``.
  6839. Note that ``index`` and ``src`` do not broadcast.
  6840. Moreover, as for :meth:`~Tensor.gather`, the values of :attr:`index` must be
  6841. between ``0`` and ``self.size(dim) - 1`` inclusive.
  6842. .. warning::
  6843. When indices are not unique, the behavior is non-deterministic (one of the
  6844. values from ``src`` will be picked arbitrarily) and the gradient will be
  6845. incorrect (it will be propagated to all locations in the source that
  6846. correspond to the same index)!
  6847. .. note::
  6848. The backward pass is implemented only for ``src.shape == index.shape``.
  6849. Additionally accepts an optional :attr:`reduce` argument that allows
  6850. specification of an optional reduction operation, which is applied to all
  6851. values in the tensor :attr:`src` into :attr:`self` at the indices
  6852. specified in the :attr:`index`. For each value in :attr:`src`, the reduction
  6853. operation is applied to an index in :attr:`self` which is specified by
  6854. its index in :attr:`src` for ``dimension != dim`` and by the corresponding
  6855. value in :attr:`index` for ``dimension = dim``.
  6856. Given a 3-D tensor and reduction using the multiplication operation, :attr:`self`
  6857. is updated as::
  6858. self[index[i][j][k]][j][k] *= src[i][j][k] # if dim == 0
  6859. self[i][index[i][j][k]][k] *= src[i][j][k] # if dim == 1
  6860. self[i][j][index[i][j][k]] *= src[i][j][k] # if dim == 2
  6861. Reducing with the addition operation is the same as using
  6862. :meth:`~torch.Tensor.scatter_add_`.
  6863. .. warning::
  6864. The reduce argument with Tensor ``src`` is deprecated and will be removed in
  6865. a future PyTorch release. Please use :meth:`~torch.Tensor.scatter_reduce_`
  6866. instead for more reduction options.
  6867. Args:
  6868. dim (int): the axis along which to index
  6869. index (LongTensor): the indices of elements to scatter, can be either empty
  6870. or of the same dimensionality as ``src``. When empty, the operation
  6871. returns ``self`` unchanged.
  6872. src (Tensor): the source element(s) to scatter.
  6873. Keyword args:
  6874. reduce (str, optional): reduction operation to apply, can be either
  6875. ``'add'`` or ``'multiply'``.
  6876. Example::
  6877. >>> src = torch.arange(1, 11).reshape((2, 5))
  6878. >>> src
  6879. tensor([[ 1, 2, 3, 4, 5],
  6880. [ 6, 7, 8, 9, 10]])
  6881. >>> index = torch.tensor([[0, 1, 2, 0]])
  6882. >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(0, index, src)
  6883. tensor([[1, 0, 0, 4, 0],
  6884. [0, 2, 0, 0, 0],
  6885. [0, 0, 3, 0, 0]])
  6886. >>> index = torch.tensor([[0, 1, 2], [0, 1, 4]])
  6887. >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(1, index, src)
  6888. tensor([[1, 2, 3, 0, 0],
  6889. [6, 7, 0, 0, 8],
  6890. [0, 0, 0, 0, 0]])
  6891. >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
  6892. ... 1.23, reduce='multiply')
  6893. tensor([[2.0000, 2.0000, 2.4600, 2.0000],
  6894. [2.0000, 2.0000, 2.0000, 2.4600]])
  6895. >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
  6896. ... 1.23, reduce='add')
  6897. tensor([[2.0000, 2.0000, 3.2300, 2.0000],
  6898. [2.0000, 2.0000, 2.0000, 3.2300]])
  6899. .. function:: scatter_(dim, index, value, *, reduce=None) -> Tensor:
  6900. :noindex:
  6901. Writes the value from :attr:`value` into :attr:`self` at the indices
  6902. specified in the :attr:`index` tensor. This operation is equivalent to the previous version,
  6903. with the :attr:`src` tensor filled entirely with :attr:`value`.
  6904. Args:
  6905. dim (int): the axis along which to index
  6906. index (LongTensor): the indices of elements to scatter, can be either empty
  6907. or of the same dimensionality as ``src``. When empty, the operation
  6908. returns ``self`` unchanged.
  6909. value (Scalar): the value to scatter.
  6910. Keyword args:
  6911. reduce (str, optional): reduction operation to apply, can be either
  6912. ``'add'`` or ``'multiply'``.
  6913. Example::
  6914. >>> index = torch.tensor([[0, 1]])
  6915. >>> value = 2
  6916. >>> torch.zeros(3, 5).scatter_(0, index, value)
  6917. tensor([[2., 0., 0., 0., 0.],
  6918. [0., 2., 0., 0., 0.],
  6919. [0., 0., 0., 0., 0.]])
  6920. """
  6921. ...
  6922. @overload
  6923. def scatter_(self, dim: _int, index: Tensor, value: Union[Number, _complex]) -> Tensor:
  6924. r"""
  6925. scatter_(dim, index, src, *, reduce=None) -> Tensor
  6926. Writes all values from the tensor :attr:`src` into :attr:`self` at the indices
  6927. specified in the :attr:`index` tensor. For each value in :attr:`src`, its output
  6928. index is specified by its index in :attr:`src` for ``dimension != dim`` and by
  6929. the corresponding value in :attr:`index` for ``dimension = dim``.
  6930. For a 3-D tensor, :attr:`self` is updated as::
  6931. self[index[i][j][k]][j][k] = src[i][j][k] # if dim == 0
  6932. self[i][index[i][j][k]][k] = src[i][j][k] # if dim == 1
  6933. self[i][j][index[i][j][k]] = src[i][j][k] # if dim == 2
  6934. This is the reverse operation of the manner described in :meth:`~Tensor.gather`.
  6935. :attr:`self`, :attr:`index` and :attr:`src` (if it is a Tensor) should all have
  6936. the same number of dimensions. It is also required that
  6937. ``index.size(d) <= src.size(d)`` for all dimensions ``d``, and that
  6938. ``index.size(d) <= self.size(d)`` for all dimensions ``d != dim``.
  6939. Note that ``index`` and ``src`` do not broadcast.
  6940. Moreover, as for :meth:`~Tensor.gather`, the values of :attr:`index` must be
  6941. between ``0`` and ``self.size(dim) - 1`` inclusive.
  6942. .. warning::
  6943. When indices are not unique, the behavior is non-deterministic (one of the
  6944. values from ``src`` will be picked arbitrarily) and the gradient will be
  6945. incorrect (it will be propagated to all locations in the source that
  6946. correspond to the same index)!
  6947. .. note::
  6948. The backward pass is implemented only for ``src.shape == index.shape``.
  6949. Additionally accepts an optional :attr:`reduce` argument that allows
  6950. specification of an optional reduction operation, which is applied to all
  6951. values in the tensor :attr:`src` into :attr:`self` at the indices
  6952. specified in the :attr:`index`. For each value in :attr:`src`, the reduction
  6953. operation is applied to an index in :attr:`self` which is specified by
  6954. its index in :attr:`src` for ``dimension != dim`` and by the corresponding
  6955. value in :attr:`index` for ``dimension = dim``.
  6956. Given a 3-D tensor and reduction using the multiplication operation, :attr:`self`
  6957. is updated as::
  6958. self[index[i][j][k]][j][k] *= src[i][j][k] # if dim == 0
  6959. self[i][index[i][j][k]][k] *= src[i][j][k] # if dim == 1
  6960. self[i][j][index[i][j][k]] *= src[i][j][k] # if dim == 2
  6961. Reducing with the addition operation is the same as using
  6962. :meth:`~torch.Tensor.scatter_add_`.
  6963. .. warning::
  6964. The reduce argument with Tensor ``src`` is deprecated and will be removed in
  6965. a future PyTorch release. Please use :meth:`~torch.Tensor.scatter_reduce_`
  6966. instead for more reduction options.
  6967. Args:
  6968. dim (int): the axis along which to index
  6969. index (LongTensor): the indices of elements to scatter, can be either empty
  6970. or of the same dimensionality as ``src``. When empty, the operation
  6971. returns ``self`` unchanged.
  6972. src (Tensor): the source element(s) to scatter.
  6973. Keyword args:
  6974. reduce (str, optional): reduction operation to apply, can be either
  6975. ``'add'`` or ``'multiply'``.
  6976. Example::
  6977. >>> src = torch.arange(1, 11).reshape((2, 5))
  6978. >>> src
  6979. tensor([[ 1, 2, 3, 4, 5],
  6980. [ 6, 7, 8, 9, 10]])
  6981. >>> index = torch.tensor([[0, 1, 2, 0]])
  6982. >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(0, index, src)
  6983. tensor([[1, 0, 0, 4, 0],
  6984. [0, 2, 0, 0, 0],
  6985. [0, 0, 3, 0, 0]])
  6986. >>> index = torch.tensor([[0, 1, 2], [0, 1, 4]])
  6987. >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(1, index, src)
  6988. tensor([[1, 2, 3, 0, 0],
  6989. [6, 7, 0, 0, 8],
  6990. [0, 0, 0, 0, 0]])
  6991. >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
  6992. ... 1.23, reduce='multiply')
  6993. tensor([[2.0000, 2.0000, 2.4600, 2.0000],
  6994. [2.0000, 2.0000, 2.0000, 2.4600]])
  6995. >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
  6996. ... 1.23, reduce='add')
  6997. tensor([[2.0000, 2.0000, 3.2300, 2.0000],
  6998. [2.0000, 2.0000, 2.0000, 3.2300]])
  6999. .. function:: scatter_(dim, index, value, *, reduce=None) -> Tensor:
  7000. :noindex:
  7001. Writes the value from :attr:`value` into :attr:`self` at the indices
  7002. specified in the :attr:`index` tensor. This operation is equivalent to the previous version,
  7003. with the :attr:`src` tensor filled entirely with :attr:`value`.
  7004. Args:
  7005. dim (int): the axis along which to index
  7006. index (LongTensor): the indices of elements to scatter, can be either empty
  7007. or of the same dimensionality as ``src``. When empty, the operation
  7008. returns ``self`` unchanged.
  7009. value (Scalar): the value to scatter.
  7010. Keyword args:
  7011. reduce (str, optional): reduction operation to apply, can be either
  7012. ``'add'`` or ``'multiply'``.
  7013. Example::
  7014. >>> index = torch.tensor([[0, 1]])
  7015. >>> value = 2
  7016. >>> torch.zeros(3, 5).scatter_(0, index, value)
  7017. tensor([[2., 0., 0., 0., 0.],
  7018. [0., 2., 0., 0., 0.],
  7019. [0., 0., 0., 0., 0.]])
  7020. """
  7021. ...
  7022. @overload
  7023. def scatter_add(self, dim: _int, index: Tensor, src: Tensor) -> Tensor:
  7024. r"""
  7025. scatter_add(dim, index, src) -> Tensor
  7026. Out-of-place version of :meth:`torch.Tensor.scatter_add_`
  7027. """
  7028. ...
  7029. @overload
  7030. def scatter_add(self, dim: Union[str, ellipsis, None], index: Tensor, src: Tensor) -> Tensor:
  7031. r"""
  7032. scatter_add(dim, index, src) -> Tensor
  7033. Out-of-place version of :meth:`torch.Tensor.scatter_add_`
  7034. """
  7035. ...
  7036. def scatter_add_(self, dim: _int, index: Tensor, src: Tensor) -> Tensor:
  7037. r"""
  7038. scatter_add_(dim, index, src) -> Tensor
  7039. Adds all values from the tensor :attr:`src` into :attr:`self` at the indices
  7040. specified in the :attr:`index` tensor in a similar fashion as
  7041. :meth:`~torch.Tensor.scatter_`. For each value in :attr:`src`, it is added to
  7042. an index in :attr:`self` which is specified by its index in :attr:`src`
  7043. for ``dimension != dim`` and by the corresponding value in :attr:`index` for
  7044. ``dimension = dim``.
  7045. For a 3-D tensor, :attr:`self` is updated as::
  7046. self[index[i][j][k]][j][k] += src[i][j][k] # if dim == 0
  7047. self[i][index[i][j][k]][k] += src[i][j][k] # if dim == 1
  7048. self[i][j][index[i][j][k]] += src[i][j][k] # if dim == 2
  7049. :attr:`self`, :attr:`index` and :attr:`src` should have same number of
  7050. dimensions. It is also required that ``index.size(d) <= src.size(d)`` for all
  7051. dimensions ``d``, and that ``index.size(d) <= self.size(d)`` for all dimensions
  7052. ``d != dim``. Note that ``index`` and ``src`` do not broadcast.
  7053. Note:
  7054. This operation may behave nondeterministically when given tensors on a CUDA device. See :doc:`/notes/randomness` for more information.
  7055. .. note::
  7056. The backward pass is implemented only for ``src.shape == index.shape``.
  7057. Args:
  7058. dim (int): the axis along which to index
  7059. index (LongTensor): the indices of elements to scatter and add, can be
  7060. either empty or of the same dimensionality as ``src``. When empty, the
  7061. operation returns ``self`` unchanged.
  7062. src (Tensor): the source elements to scatter and add
  7063. Example::
  7064. >>> src = torch.ones((2, 5))
  7065. >>> index = torch.tensor([[0, 1, 2, 0, 0]])
  7066. >>> torch.zeros(3, 5, dtype=src.dtype).scatter_add_(0, index, src)
  7067. tensor([[1., 0., 0., 1., 1.],
  7068. [0., 1., 0., 0., 0.],
  7069. [0., 0., 1., 0., 0.]])
  7070. >>> index = torch.tensor([[0, 1, 2, 0, 0], [0, 1, 2, 2, 2]])
  7071. >>> torch.zeros(3, 5, dtype=src.dtype).scatter_add_(0, index, src)
  7072. tensor([[2., 0., 0., 1., 1.],
  7073. [0., 2., 0., 0., 0.],
  7074. [0., 0., 2., 1., 1.]])
  7075. """
  7076. ...
  7077. def scatter_reduce(self, dim: _int, index: Tensor, src: Tensor, reduce: str, *, include_self: _bool = True) -> Tensor:
  7078. r"""
  7079. scatter_reduce(dim, index, src, reduce, *, include_self=True) -> Tensor
  7080. Out-of-place version of :meth:`torch.Tensor.scatter_reduce_`
  7081. """
  7082. ...
  7083. def scatter_reduce_(self, dim: _int, index: Tensor, src: Tensor, reduce: str, *, include_self: _bool = True) -> Tensor:
  7084. r"""
  7085. scatter_reduce_(dim, index, src, reduce, *, include_self=True) -> Tensor
  7086. Reduces all values from the :attr:`src` tensor to the indices specified in
  7087. the :attr:`index` tensor in the :attr:`self` tensor using the applied reduction
  7088. defined via the :attr:`reduce` argument (:obj:`"sum"`, :obj:`"prod"`, :obj:`"mean"`,
  7089. :obj:`"amax"`, :obj:`"amin"`). For each value in :attr:`src`, it is reduced to an
  7090. index in :attr:`self` which is specified by its index in :attr:`src` for
  7091. ``dimension != dim`` and by the corresponding value in :attr:`index` for
  7092. ``dimension = dim``. If :obj:`include_self="True"`, the values in the :attr:`self`
  7093. tensor are included in the reduction.
  7094. :attr:`self`, :attr:`index` and :attr:`src` should all have
  7095. the same number of dimensions. It is also required that
  7096. ``index.size(d) <= src.size(d)`` for all dimensions ``d``, and that
  7097. ``index.size(d) <= self.size(d)`` for all dimensions ``d != dim``.
  7098. Note that ``index`` and ``src`` do not broadcast.
  7099. For a 3-D tensor with :obj:`reduce="sum"` and :obj:`include_self=True` the
  7100. output is given as::
  7101. self[index[i][j][k]][j][k] += src[i][j][k] # if dim == 0
  7102. self[i][index[i][j][k]][k] += src[i][j][k] # if dim == 1
  7103. self[i][j][index[i][j][k]] += src[i][j][k] # if dim == 2
  7104. Note:
  7105. This operation may behave nondeterministically when given tensors on a CUDA device. See :doc:`/notes/randomness` for more information.
  7106. .. note::
  7107. The backward pass is implemented only for ``src.shape == index.shape``.
  7108. .. warning::
  7109. This function is in beta and may change in the near future.
  7110. Args:
  7111. dim (int): the axis along which to index
  7112. index (LongTensor): the indices of elements to scatter and reduce.
  7113. src (Tensor): the source elements to scatter and reduce
  7114. reduce (str): the reduction operation to apply for non-unique indices
  7115. (:obj:`"sum"`, :obj:`"prod"`, :obj:`"mean"`, :obj:`"amax"`, :obj:`"amin"`)
  7116. include_self (bool): whether elements from the :attr:`self` tensor are
  7117. included in the reduction
  7118. Example::
  7119. >>> src = torch.tensor([1., 2., 3., 4., 5., 6.])
  7120. >>> index = torch.tensor([0, 1, 0, 1, 2, 1])
  7121. >>> input = torch.tensor([1., 2., 3., 4.])
  7122. >>> input.scatter_reduce(0, index, src, reduce="sum")
  7123. tensor([5., 14., 8., 4.])
  7124. >>> input.scatter_reduce(0, index, src, reduce="sum", include_self=False)
  7125. tensor([4., 12., 5., 4.])
  7126. >>> input2 = torch.tensor([5., 4., 3., 2.])
  7127. >>> input2.scatter_reduce(0, index, src, reduce="amax")
  7128. tensor([5., 6., 5., 2.])
  7129. >>> input2.scatter_reduce(0, index, src, reduce="amax", include_self=False)
  7130. tensor([3., 6., 5., 2.])
  7131. """
  7132. ...
  7133. @overload
  7134. def select(self, dim: _int, index: Union[_int, SymInt]) -> Tensor:
  7135. r"""
  7136. select(dim, index) -> Tensor
  7137. See :func:`torch.select`
  7138. """
  7139. ...
  7140. @overload
  7141. def select(self, dim: Union[str, ellipsis, None], index: _int) -> Tensor:
  7142. r"""
  7143. select(dim, index) -> Tensor
  7144. See :func:`torch.select`
  7145. """
  7146. ...
  7147. def select_scatter(self, src: Tensor, dim: _int, index: Union[_int, SymInt]) -> Tensor:
  7148. r"""
  7149. select_scatter(src, dim, index) -> Tensor
  7150. See :func:`torch.select_scatter`
  7151. """
  7152. ...
  7153. @overload
  7154. def set_(self, storage: Union[Storage, TypedStorage, UntypedStorage], offset: _int, size: _size, stride: _size) -> Tensor:
  7155. r"""
  7156. set_(source=None, storage_offset=0, size=None, stride=None) -> Tensor
  7157. Sets the underlying storage, size, and strides. If :attr:`source` is a tensor,
  7158. :attr:`self` tensor will share the same storage and have the same size and
  7159. strides as :attr:`source`. Changes to elements in one tensor will be reflected
  7160. in the other.
  7161. If :attr:`source` is a :class:`~torch.Storage`, the method sets the underlying
  7162. storage, offset, size, and stride.
  7163. Args:
  7164. source (Tensor or Storage): the tensor or storage to use
  7165. storage_offset (int, optional): the offset in the storage
  7166. size (torch.Size, optional): the desired size. Defaults to the size of the source.
  7167. stride (tuple, optional): the desired stride. Defaults to C-contiguous strides.
  7168. """
  7169. ...
  7170. @overload
  7171. def set_(self, storage: Union[Storage, TypedStorage, UntypedStorage]) -> Tensor:
  7172. r"""
  7173. set_(source=None, storage_offset=0, size=None, stride=None) -> Tensor
  7174. Sets the underlying storage, size, and strides. If :attr:`source` is a tensor,
  7175. :attr:`self` tensor will share the same storage and have the same size and
  7176. strides as :attr:`source`. Changes to elements in one tensor will be reflected
  7177. in the other.
  7178. If :attr:`source` is a :class:`~torch.Storage`, the method sets the underlying
  7179. storage, offset, size, and stride.
  7180. Args:
  7181. source (Tensor or Storage): the tensor or storage to use
  7182. storage_offset (int, optional): the offset in the storage
  7183. size (torch.Size, optional): the desired size. Defaults to the size of the source.
  7184. stride (tuple, optional): the desired stride. Defaults to C-contiguous strides.
  7185. """
  7186. ...
  7187. def sgn(self) -> Tensor:
  7188. r"""
  7189. sgn() -> Tensor
  7190. See :func:`torch.sgn`
  7191. """
  7192. ...
  7193. def sgn_(self) -> Tensor:
  7194. r"""
  7195. sgn_() -> Tensor
  7196. In-place version of :meth:`~Tensor.sgn`
  7197. """
  7198. ...
  7199. def short(self) -> Tensor:
  7200. r"""
  7201. short(memory_format=torch.preserve_format) -> Tensor
  7202. ``self.short()`` is equivalent to ``self.to(torch.int16)``. See :func:`to`.
  7203. Args:
  7204. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  7205. returned Tensor. Default: ``torch.preserve_format``.
  7206. """
  7207. ...
  7208. def sigmoid(self) -> Tensor:
  7209. r"""
  7210. sigmoid() -> Tensor
  7211. See :func:`torch.sigmoid`
  7212. """
  7213. ...
  7214. def sigmoid_(self) -> Tensor:
  7215. r"""
  7216. sigmoid_() -> Tensor
  7217. In-place version of :meth:`~Tensor.sigmoid`
  7218. """
  7219. ...
  7220. def sign(self) -> Tensor:
  7221. r"""
  7222. sign() -> Tensor
  7223. See :func:`torch.sign`
  7224. """
  7225. ...
  7226. def sign_(self) -> Tensor:
  7227. r"""
  7228. sign_() -> Tensor
  7229. In-place version of :meth:`~Tensor.sign`
  7230. """
  7231. ...
  7232. def signbit(self) -> Tensor:
  7233. r"""
  7234. signbit() -> Tensor
  7235. See :func:`torch.signbit`
  7236. """
  7237. ...
  7238. def sin(self) -> Tensor:
  7239. r"""
  7240. sin() -> Tensor
  7241. See :func:`torch.sin`
  7242. """
  7243. ...
  7244. def sin_(self) -> Tensor:
  7245. r"""
  7246. sin_() -> Tensor
  7247. In-place version of :meth:`~Tensor.sin`
  7248. """
  7249. ...
  7250. def sinc(self) -> Tensor:
  7251. r"""
  7252. sinc() -> Tensor
  7253. See :func:`torch.sinc`
  7254. """
  7255. ...
  7256. def sinc_(self) -> Tensor:
  7257. r"""
  7258. sinc_() -> Tensor
  7259. In-place version of :meth:`~Tensor.sinc`
  7260. """
  7261. ...
  7262. def sinh(self) -> Tensor:
  7263. r"""
  7264. sinh() -> Tensor
  7265. See :func:`torch.sinh`
  7266. """
  7267. ...
  7268. def sinh_(self) -> Tensor:
  7269. r"""
  7270. sinh_() -> Tensor
  7271. In-place version of :meth:`~Tensor.sinh`
  7272. """
  7273. ...
  7274. @overload
  7275. def size(self, dim: None = None) -> Size:
  7276. r"""
  7277. size(dim=None) -> torch.Size or int
  7278. Returns the size of the :attr:`self` tensor. If ``dim`` is not specified,
  7279. the returned value is a :class:`torch.Size`, a subclass of :class:`tuple`.
  7280. If ``dim`` is specified, returns an int holding the size of that dimension.
  7281. Args:
  7282. dim (int, optional): The dimension for which to retrieve the size.
  7283. Example::
  7284. >>> t = torch.empty(3, 4, 5)
  7285. >>> t.size()
  7286. torch.Size([3, 4, 5])
  7287. >>> t.size(dim=1)
  7288. 4
  7289. """
  7290. ...
  7291. @overload
  7292. def size(self, dim: _int) -> _int:
  7293. r"""
  7294. size(dim=None) -> torch.Size or int
  7295. Returns the size of the :attr:`self` tensor. If ``dim`` is not specified,
  7296. the returned value is a :class:`torch.Size`, a subclass of :class:`tuple`.
  7297. If ``dim`` is specified, returns an int holding the size of that dimension.
  7298. Args:
  7299. dim (int, optional): The dimension for which to retrieve the size.
  7300. Example::
  7301. >>> t = torch.empty(3, 4, 5)
  7302. >>> t.size()
  7303. torch.Size([3, 4, 5])
  7304. >>> t.size(dim=1)
  7305. 4
  7306. """
  7307. ...
  7308. def slice_inverse(self, src: Tensor, dim: _int = 0, start: Optional[Union[_int, SymInt]] = None, end: Optional[Union[_int, SymInt]] = None, step: Union[_int, SymInt] = 1) -> Tensor: ...
  7309. def slice_scatter(self, src: Tensor, dim: _int = 0, start: Optional[Union[_int, SymInt]] = None, end: Optional[Union[_int, SymInt]] = None, step: Union[_int, SymInt] = 1) -> Tensor:
  7310. r"""
  7311. slice_scatter(src, dim=0, start=None, end=None, step=1) -> Tensor
  7312. See :func:`torch.slice_scatter`
  7313. """
  7314. ...
  7315. def slogdet(self) -> torch.return_types.slogdet:
  7316. r"""
  7317. slogdet() -> (Tensor, Tensor)
  7318. See :func:`torch.slogdet`
  7319. """
  7320. ...
  7321. def smm(self, mat2: Tensor) -> Tensor:
  7322. r"""
  7323. smm(mat) -> Tensor
  7324. See :func:`torch.smm`
  7325. """
  7326. ...
  7327. @overload
  7328. def softmax(self, dim: _int, dtype: Optional[_dtype] = None) -> Tensor:
  7329. r"""
  7330. softmax(dim) -> Tensor
  7331. Alias for :func:`torch.nn.functional.softmax`.
  7332. """
  7333. ...
  7334. @overload
  7335. def softmax(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor:
  7336. r"""
  7337. softmax(dim) -> Tensor
  7338. Alias for :func:`torch.nn.functional.softmax`.
  7339. """
  7340. ...
  7341. @overload
  7342. def sort(self, *, stable: Optional[_bool], dim: _int = -1, descending: _bool = False) -> torch.return_types.sort:
  7343. r"""
  7344. sort(dim=-1, descending=False) -> (Tensor, LongTensor)
  7345. See :func:`torch.sort`
  7346. """
  7347. ...
  7348. @overload
  7349. def sort(self, dim: _int = -1, descending: _bool = False) -> torch.return_types.sort:
  7350. r"""
  7351. sort(dim=-1, descending=False) -> (Tensor, LongTensor)
  7352. See :func:`torch.sort`
  7353. """
  7354. ...
  7355. @overload
  7356. def sort(self, *, stable: Optional[_bool], dim: Union[str, ellipsis, None], descending: _bool = False) -> torch.return_types.sort:
  7357. r"""
  7358. sort(dim=-1, descending=False) -> (Tensor, LongTensor)
  7359. See :func:`torch.sort`
  7360. """
  7361. ...
  7362. @overload
  7363. def sort(self, dim: Union[str, ellipsis, None], descending: _bool = False) -> torch.return_types.sort:
  7364. r"""
  7365. sort(dim=-1, descending=False) -> (Tensor, LongTensor)
  7366. See :func:`torch.sort`
  7367. """
  7368. ...
  7369. def sparse_dim(self) -> _int:
  7370. r"""
  7371. sparse_dim() -> int
  7372. Return the number of sparse dimensions in a :ref:`sparse tensor <sparse-docs>` :attr:`self`.
  7373. .. note::
  7374. Returns ``0`` if :attr:`self` is not a sparse tensor.
  7375. See also :meth:`Tensor.dense_dim` and :ref:`hybrid tensors <sparse-hybrid-coo-docs>`.
  7376. """
  7377. ...
  7378. def sparse_mask(self, mask: Tensor) -> Tensor:
  7379. r"""
  7380. sparse_mask(mask) -> Tensor
  7381. Returns a new :ref:`sparse tensor <sparse-docs>` with values from a
  7382. strided tensor :attr:`self` filtered by the indices of the sparse
  7383. tensor :attr:`mask`. The values of :attr:`mask` sparse tensor are
  7384. ignored. :attr:`self` and :attr:`mask` tensors must have the same
  7385. shape.
  7386. .. note::
  7387. The returned sparse tensor might contain duplicate values if :attr:`mask`
  7388. is not coalesced. It is therefore advisable to pass ``mask.coalesce()``
  7389. if such behavior is not desired.
  7390. .. note::
  7391. The returned sparse tensor has the same indices as the sparse tensor
  7392. :attr:`mask`, even when the corresponding values in :attr:`self` are
  7393. zeros.
  7394. Args:
  7395. mask (Tensor): a sparse tensor whose indices are used as a filter
  7396. Example::
  7397. >>> nse = 5
  7398. >>> dims = (5, 5, 2, 2)
  7399. >>> I = torch.cat([torch.randint(0, dims[0], size=(nse,)),
  7400. ... torch.randint(0, dims[1], size=(nse,))], 0).reshape(2, nse)
  7401. >>> V = torch.randn(nse, dims[2], dims[3])
  7402. >>> S = torch.sparse_coo_tensor(I, V, dims).coalesce()
  7403. >>> D = torch.randn(dims)
  7404. >>> D.sparse_mask(S)
  7405. tensor(indices=tensor([[0, 0, 0, 2],
  7406. [0, 1, 4, 3]]),
  7407. values=tensor([[[ 1.6550, 0.2397],
  7408. [-0.1611, -0.0779]],
  7409. [[ 0.2326, -1.0558],
  7410. [ 1.4711, 1.9678]],
  7411. [[-0.5138, -0.0411],
  7412. [ 1.9417, 0.5158]],
  7413. [[ 0.0793, 0.0036],
  7414. [-0.2569, -0.1055]]]),
  7415. size=(5, 5, 2, 2), nnz=4, layout=torch.sparse_coo)
  7416. """
  7417. ...
  7418. def sparse_resize_(self, size: _size, sparse_dim: _int, dense_dim: _int) -> Tensor:
  7419. r"""
  7420. sparse_resize_(size, sparse_dim, dense_dim) -> Tensor
  7421. Resizes :attr:`self` :ref:`sparse tensor <sparse-docs>` to the desired
  7422. size and the number of sparse and dense dimensions.
  7423. .. note::
  7424. If the number of specified elements in :attr:`self` is zero, then
  7425. :attr:`size`, :attr:`sparse_dim`, and :attr:`dense_dim` can be any
  7426. size and positive integers such that ``len(size) == sparse_dim +
  7427. dense_dim``.
  7428. If :attr:`self` specifies one or more elements, however, then each
  7429. dimension in :attr:`size` must not be smaller than the corresponding
  7430. dimension of :attr:`self`, :attr:`sparse_dim` must equal the number
  7431. of sparse dimensions in :attr:`self`, and :attr:`dense_dim` must
  7432. equal the number of dense dimensions in :attr:`self`.
  7433. .. warning::
  7434. Throws an error if :attr:`self` is not a sparse tensor.
  7435. Args:
  7436. size (torch.Size): the desired size. If :attr:`self` is non-empty
  7437. sparse tensor, the desired size cannot be smaller than the
  7438. original size.
  7439. sparse_dim (int): the number of sparse dimensions
  7440. dense_dim (int): the number of dense dimensions
  7441. """
  7442. ...
  7443. def sparse_resize_and_clear_(self, size: _size, sparse_dim: _int, dense_dim: _int) -> Tensor:
  7444. r"""
  7445. sparse_resize_and_clear_(size, sparse_dim, dense_dim) -> Tensor
  7446. Removes all specified elements from a :ref:`sparse tensor
  7447. <sparse-docs>` :attr:`self` and resizes :attr:`self` to the desired
  7448. size and the number of sparse and dense dimensions.
  7449. .. warning:
  7450. Throws an error if :attr:`self` is not a sparse tensor.
  7451. Args:
  7452. size (torch.Size): the desired size.
  7453. sparse_dim (int): the number of sparse dimensions
  7454. dense_dim (int): the number of dense dimensions
  7455. """
  7456. ...
  7457. @overload
  7458. def split(self, split_size: _int, dim: _int = 0) -> Sequence[Tensor]: ...
  7459. @overload
  7460. def split(self, split_size: Tuple[_int, ...], dim: _int = 0) -> Sequence[Tensor]: ...
  7461. def split_with_sizes(self, split_sizes: Sequence[Union[_int, SymInt]], dim: _int = 0) -> Tuple[Tensor, ...]: ...
  7462. def sqrt(self) -> Tensor:
  7463. r"""
  7464. sqrt() -> Tensor
  7465. See :func:`torch.sqrt`
  7466. """
  7467. ...
  7468. def sqrt_(self) -> Tensor:
  7469. r"""
  7470. sqrt_() -> Tensor
  7471. In-place version of :meth:`~Tensor.sqrt`
  7472. """
  7473. ...
  7474. def square(self) -> Tensor:
  7475. r"""
  7476. square() -> Tensor
  7477. See :func:`torch.square`
  7478. """
  7479. ...
  7480. def square_(self) -> Tensor:
  7481. r"""
  7482. square_() -> Tensor
  7483. In-place version of :meth:`~Tensor.square`
  7484. """
  7485. ...
  7486. @overload
  7487. def squeeze(self) -> Tensor:
  7488. r"""
  7489. squeeze(dim=None) -> Tensor
  7490. See :func:`torch.squeeze`
  7491. """
  7492. ...
  7493. @overload
  7494. def squeeze(self, dim: _int) -> Tensor:
  7495. r"""
  7496. squeeze(dim=None) -> Tensor
  7497. See :func:`torch.squeeze`
  7498. """
  7499. ...
  7500. @overload
  7501. def squeeze(self, dim: _size) -> Tensor:
  7502. r"""
  7503. squeeze(dim=None) -> Tensor
  7504. See :func:`torch.squeeze`
  7505. """
  7506. ...
  7507. @overload
  7508. def squeeze(self, *dim: _int) -> Tensor:
  7509. r"""
  7510. squeeze(dim=None) -> Tensor
  7511. See :func:`torch.squeeze`
  7512. """
  7513. ...
  7514. @overload
  7515. def squeeze(self, dim: Union[str, ellipsis, None]) -> Tensor:
  7516. r"""
  7517. squeeze(dim=None) -> Tensor
  7518. See :func:`torch.squeeze`
  7519. """
  7520. ...
  7521. @overload
  7522. def squeeze_(self) -> Tensor:
  7523. r"""
  7524. squeeze_(dim=None) -> Tensor
  7525. In-place version of :meth:`~Tensor.squeeze`
  7526. """
  7527. ...
  7528. @overload
  7529. def squeeze_(self, dim: _int) -> Tensor:
  7530. r"""
  7531. squeeze_(dim=None) -> Tensor
  7532. In-place version of :meth:`~Tensor.squeeze`
  7533. """
  7534. ...
  7535. @overload
  7536. def squeeze_(self, dim: _size) -> Tensor:
  7537. r"""
  7538. squeeze_(dim=None) -> Tensor
  7539. In-place version of :meth:`~Tensor.squeeze`
  7540. """
  7541. ...
  7542. @overload
  7543. def squeeze_(self, *dim: _int) -> Tensor:
  7544. r"""
  7545. squeeze_(dim=None) -> Tensor
  7546. In-place version of :meth:`~Tensor.squeeze`
  7547. """
  7548. ...
  7549. @overload
  7550. def squeeze_(self, dim: Union[str, ellipsis, None]) -> Tensor:
  7551. r"""
  7552. squeeze_(dim=None) -> Tensor
  7553. In-place version of :meth:`~Tensor.squeeze`
  7554. """
  7555. ...
  7556. def sspaddmm(self, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor:
  7557. r"""
  7558. sspaddmm(mat1, mat2, *, beta=1, alpha=1) -> Tensor
  7559. See :func:`torch.sspaddmm`
  7560. """
  7561. ...
  7562. @overload
  7563. def std(self, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False) -> Tensor:
  7564. r"""
  7565. std(dim=None, *, correction=1, keepdim=False) -> Tensor
  7566. See :func:`torch.std`
  7567. """
  7568. ...
  7569. @overload
  7570. def std(self, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tensor:
  7571. r"""
  7572. std(dim=None, *, correction=1, keepdim=False) -> Tensor
  7573. See :func:`torch.std`
  7574. """
  7575. ...
  7576. @overload
  7577. def std(self, unbiased: _bool = True) -> Tensor:
  7578. r"""
  7579. std(dim=None, *, correction=1, keepdim=False) -> Tensor
  7580. See :func:`torch.std`
  7581. """
  7582. ...
  7583. @overload
  7584. def std(self, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False) -> Tensor:
  7585. r"""
  7586. std(dim=None, *, correction=1, keepdim=False) -> Tensor
  7587. See :func:`torch.std`
  7588. """
  7589. ...
  7590. @overload
  7591. def std(self, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tensor:
  7592. r"""
  7593. std(dim=None, *, correction=1, keepdim=False) -> Tensor
  7594. See :func:`torch.std`
  7595. """
  7596. ...
  7597. def untyped_storage(self) -> UntypedStorage: ...
  7598. def storage_offset(self) -> _int:
  7599. r"""
  7600. storage_offset() -> int
  7601. Returns :attr:`self` tensor's offset in the underlying storage in terms of
  7602. number of storage elements (not bytes).
  7603. Example::
  7604. >>> x = torch.tensor([1, 2, 3, 4, 5])
  7605. >>> x.storage_offset()
  7606. 0
  7607. >>> x[3:].storage_offset()
  7608. 3
  7609. """
  7610. ...
  7611. def storage_type(self) -> Storage: ...
  7612. @overload
  7613. def stride(self, dim: None = None) -> Tuple[_int, ...]:
  7614. r"""
  7615. stride(dim) -> tuple or int
  7616. Returns the stride of :attr:`self` tensor.
  7617. Stride is the jump necessary to go from one element to the next one in the
  7618. specified dimension :attr:`dim`. A tuple of all strides is returned when no
  7619. argument is passed in. Otherwise, an integer value is returned as the stride in
  7620. the particular dimension :attr:`dim`.
  7621. Args:
  7622. dim (int, optional): the desired dimension in which stride is required
  7623. Example::
  7624. >>> x = torch.tensor([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])
  7625. >>> x.stride()
  7626. (5, 1)
  7627. >>> x.stride(0)
  7628. 5
  7629. >>> x.stride(-1)
  7630. 1
  7631. """
  7632. ...
  7633. @overload
  7634. def stride(self, dim: _int) -> _int:
  7635. r"""
  7636. stride(dim) -> tuple or int
  7637. Returns the stride of :attr:`self` tensor.
  7638. Stride is the jump necessary to go from one element to the next one in the
  7639. specified dimension :attr:`dim`. A tuple of all strides is returned when no
  7640. argument is passed in. Otherwise, an integer value is returned as the stride in
  7641. the particular dimension :attr:`dim`.
  7642. Args:
  7643. dim (int, optional): the desired dimension in which stride is required
  7644. Example::
  7645. >>> x = torch.tensor([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])
  7646. >>> x.stride()
  7647. (5, 1)
  7648. >>> x.stride(0)
  7649. 5
  7650. >>> x.stride(-1)
  7651. 1
  7652. """
  7653. ...
  7654. def sub(self, other: Union[Tensor, Number, _complex, torch.SymInt, torch.SymFloat], *, alpha: Optional[Union[Number, _complex]] = 1, out: Optional[Tensor] = None) -> Tensor:
  7655. r"""
  7656. sub(other, *, alpha=1) -> Tensor
  7657. See :func:`torch.sub`.
  7658. """
  7659. ...
  7660. def sub_(self, other: Union[Tensor, Number, _complex, torch.SymInt, torch.SymFloat], *, alpha: Optional[Union[Number, _complex]] = 1) -> Tensor:
  7661. r"""
  7662. sub_(other, *, alpha=1) -> Tensor
  7663. In-place version of :meth:`~Tensor.sub`
  7664. """
  7665. ...
  7666. @overload
  7667. def subtract(self, other: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor:
  7668. r"""
  7669. subtract(other, *, alpha=1) -> Tensor
  7670. See :func:`torch.subtract`.
  7671. """
  7672. ...
  7673. @overload
  7674. def subtract(self, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor:
  7675. r"""
  7676. subtract(other, *, alpha=1) -> Tensor
  7677. See :func:`torch.subtract`.
  7678. """
  7679. ...
  7680. @overload
  7681. def subtract_(self, other: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor:
  7682. r"""
  7683. subtract_(other, *, alpha=1) -> Tensor
  7684. In-place version of :meth:`~Tensor.subtract`.
  7685. """
  7686. ...
  7687. @overload
  7688. def subtract_(self, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor:
  7689. r"""
  7690. subtract_(other, *, alpha=1) -> Tensor
  7691. In-place version of :meth:`~Tensor.subtract`.
  7692. """
  7693. ...
  7694. @overload
  7695. def sum(self, *, dtype: Optional[_dtype] = None) -> Tensor:
  7696. r"""
  7697. sum(dim=None, keepdim=False, dtype=None) -> Tensor
  7698. See :func:`torch.sum`
  7699. """
  7700. ...
  7701. @overload
  7702. def sum(self, dim: Optional[Union[_int, _size]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor:
  7703. r"""
  7704. sum(dim=None, keepdim=False, dtype=None) -> Tensor
  7705. See :func:`torch.sum`
  7706. """
  7707. ...
  7708. @overload
  7709. def sum(self, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor:
  7710. r"""
  7711. sum(dim=None, keepdim=False, dtype=None) -> Tensor
  7712. See :func:`torch.sum`
  7713. """
  7714. ...
  7715. @overload
  7716. def sum_to_size(self, size: Sequence[Union[_int, SymInt]]) -> Tensor:
  7717. r"""
  7718. sum_to_size(*size) -> Tensor
  7719. Sum ``this`` tensor to :attr:`size`.
  7720. :attr:`size` must be broadcastable to ``this`` tensor size.
  7721. Args:
  7722. size (int...): a sequence of integers defining the shape of the output tensor.
  7723. """
  7724. ...
  7725. @overload
  7726. def sum_to_size(self, *size: _int) -> Tensor:
  7727. r"""
  7728. sum_to_size(*size) -> Tensor
  7729. Sum ``this`` tensor to :attr:`size`.
  7730. :attr:`size` must be broadcastable to ``this`` tensor size.
  7731. Args:
  7732. size (int...): a sequence of integers defining the shape of the output tensor.
  7733. """
  7734. ...
  7735. def svd(self, some: _bool = True, compute_uv: _bool = True) -> torch.return_types.svd:
  7736. r"""
  7737. svd(some=True, compute_uv=True) -> (Tensor, Tensor, Tensor)
  7738. See :func:`torch.svd`
  7739. """
  7740. ...
  7741. def swapaxes(self, axis0: _int, axis1: _int) -> Tensor:
  7742. r"""
  7743. swapaxes(axis0, axis1) -> Tensor
  7744. See :func:`torch.swapaxes`
  7745. """
  7746. ...
  7747. def swapaxes_(self, axis0: _int, axis1: _int) -> Tensor:
  7748. r"""
  7749. swapaxes_(axis0, axis1) -> Tensor
  7750. In-place version of :meth:`~Tensor.swapaxes`
  7751. """
  7752. ...
  7753. def swapdims(self, dim0: _int, dim1: _int) -> Tensor:
  7754. r"""
  7755. swapdims(dim0, dim1) -> Tensor
  7756. See :func:`torch.swapdims`
  7757. """
  7758. ...
  7759. def swapdims_(self, dim0: _int, dim1: _int) -> Tensor:
  7760. r"""
  7761. swapdims_(dim0, dim1) -> Tensor
  7762. In-place version of :meth:`~Tensor.swapdims`
  7763. """
  7764. ...
  7765. def t(self) -> Tensor:
  7766. r"""
  7767. t() -> Tensor
  7768. See :func:`torch.t`
  7769. """
  7770. ...
  7771. def t_(self) -> Tensor:
  7772. r"""
  7773. t_() -> Tensor
  7774. In-place version of :meth:`~Tensor.t`
  7775. """
  7776. ...
  7777. def take(self, index: Tensor) -> Tensor:
  7778. r"""
  7779. take(indices) -> Tensor
  7780. See :func:`torch.take`
  7781. """
  7782. ...
  7783. def take_along_dim(self, indices: Tensor, dim: Optional[_int] = None) -> Tensor:
  7784. r"""
  7785. take_along_dim(indices, dim) -> Tensor
  7786. See :func:`torch.take_along_dim`
  7787. """
  7788. ...
  7789. def tan(self) -> Tensor:
  7790. r"""
  7791. tan() -> Tensor
  7792. See :func:`torch.tan`
  7793. """
  7794. ...
  7795. def tan_(self) -> Tensor:
  7796. r"""
  7797. tan_() -> Tensor
  7798. In-place version of :meth:`~Tensor.tan`
  7799. """
  7800. ...
  7801. def tanh(self) -> Tensor:
  7802. r"""
  7803. tanh() -> Tensor
  7804. See :func:`torch.tanh`
  7805. """
  7806. ...
  7807. def tanh_(self) -> Tensor:
  7808. r"""
  7809. tanh_() -> Tensor
  7810. In-place version of :meth:`~Tensor.tanh`
  7811. """
  7812. ...
  7813. @overload
  7814. def tensor_split(self, indices: Sequence[Union[_int, SymInt]], dim: _int = 0) -> Tuple[Tensor, ...]:
  7815. r"""
  7816. tensor_split(indices_or_sections, dim=0) -> List of Tensors
  7817. See :func:`torch.tensor_split`
  7818. """
  7819. ...
  7820. @overload
  7821. def tensor_split(self, tensor_indices_or_sections: Tensor, dim: _int = 0) -> Tuple[Tensor, ...]:
  7822. r"""
  7823. tensor_split(indices_or_sections, dim=0) -> List of Tensors
  7824. See :func:`torch.tensor_split`
  7825. """
  7826. ...
  7827. @overload
  7828. def tensor_split(self, sections: Union[_int, SymInt], dim: _int = 0) -> Tuple[Tensor, ...]:
  7829. r"""
  7830. tensor_split(indices_or_sections, dim=0) -> List of Tensors
  7831. See :func:`torch.tensor_split`
  7832. """
  7833. ...
  7834. @overload
  7835. def tile(self, dims: Sequence[Union[_int, SymInt]]) -> Tensor:
  7836. r"""
  7837. tile(dims) -> Tensor
  7838. See :func:`torch.tile`
  7839. """
  7840. ...
  7841. @overload
  7842. def tile(self, *dims: _int) -> Tensor:
  7843. r"""
  7844. tile(dims) -> Tensor
  7845. See :func:`torch.tile`
  7846. """
  7847. ...
  7848. @overload
  7849. def to(self, dtype: _dtype, non_blocking: _bool = False, copy: _bool = False, *, memory_format: Optional[torch.memory_format] = None) -> Tensor:
  7850. r"""
  7851. to(*args, **kwargs) -> Tensor
  7852. Performs Tensor dtype and/or device conversion. A :class:`torch.dtype` and :class:`torch.device` are
  7853. inferred from the arguments of ``self.to(*args, **kwargs)``.
  7854. .. note::
  7855. If the ``self`` Tensor already
  7856. has the correct :class:`torch.dtype` and :class:`torch.device`, then ``self`` is returned.
  7857. Otherwise, the returned tensor is a copy of ``self`` with the desired
  7858. :class:`torch.dtype` and :class:`torch.device`.
  7859. Here are the ways to call ``to``:
  7860. .. method:: to(dtype, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor
  7861. :noindex:
  7862. Returns a Tensor with the specified :attr:`dtype`
  7863. Args:
  7864. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  7865. returned Tensor. Default: ``torch.preserve_format``.
  7866. .. method:: to(device=None, dtype=None, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor
  7867. :noindex:
  7868. Returns a Tensor with the specified :attr:`device` and (optional)
  7869. :attr:`dtype`. If :attr:`dtype` is ``None`` it is inferred to be ``self.dtype``.
  7870. When :attr:`non_blocking`, tries to convert asynchronously with respect to
  7871. the host if possible, e.g., converting a CPU Tensor with pinned memory to a
  7872. CUDA Tensor.
  7873. When :attr:`copy` is set, a new Tensor is created even when the Tensor
  7874. already matches the desired conversion.
  7875. Args:
  7876. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  7877. returned Tensor. Default: ``torch.preserve_format``.
  7878. .. method:: to(other, non_blocking=False, copy=False) -> Tensor
  7879. :noindex:
  7880. Returns a Tensor with same :class:`torch.dtype` and :class:`torch.device` as
  7881. the Tensor :attr:`other`. When :attr:`non_blocking`, tries to convert
  7882. asynchronously with respect to the host if possible, e.g., converting a CPU
  7883. Tensor with pinned memory to a CUDA Tensor.
  7884. When :attr:`copy` is set, a new Tensor is created even when the Tensor
  7885. already matches the desired conversion.
  7886. Example::
  7887. >>> tensor = torch.randn(2, 2) # Initially dtype=float32, device=cpu
  7888. >>> tensor.to(torch.float64)
  7889. tensor([[-0.5044, 0.0005],
  7890. [ 0.3310, -0.0584]], dtype=torch.float64)
  7891. >>> cuda0 = torch.device('cuda:0')
  7892. >>> tensor.to(cuda0)
  7893. tensor([[-0.5044, 0.0005],
  7894. [ 0.3310, -0.0584]], device='cuda:0')
  7895. >>> tensor.to(cuda0, dtype=torch.float64)
  7896. tensor([[-0.5044, 0.0005],
  7897. [ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0')
  7898. >>> other = torch.randn((), dtype=torch.float64, device=cuda0)
  7899. >>> tensor.to(other, non_blocking=True)
  7900. tensor([[-0.5044, 0.0005],
  7901. [ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0')
  7902. """
  7903. ...
  7904. @overload
  7905. def to(self, device: Optional[DeviceLikeType] = None, dtype: Optional[_dtype] = None, non_blocking: _bool = False, copy: _bool = False, *, memory_format: Optional[torch.memory_format] = None) -> Tensor:
  7906. r"""
  7907. to(*args, **kwargs) -> Tensor
  7908. Performs Tensor dtype and/or device conversion. A :class:`torch.dtype` and :class:`torch.device` are
  7909. inferred from the arguments of ``self.to(*args, **kwargs)``.
  7910. .. note::
  7911. If the ``self`` Tensor already
  7912. has the correct :class:`torch.dtype` and :class:`torch.device`, then ``self`` is returned.
  7913. Otherwise, the returned tensor is a copy of ``self`` with the desired
  7914. :class:`torch.dtype` and :class:`torch.device`.
  7915. Here are the ways to call ``to``:
  7916. .. method:: to(dtype, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor
  7917. :noindex:
  7918. Returns a Tensor with the specified :attr:`dtype`
  7919. Args:
  7920. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  7921. returned Tensor. Default: ``torch.preserve_format``.
  7922. .. method:: to(device=None, dtype=None, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor
  7923. :noindex:
  7924. Returns a Tensor with the specified :attr:`device` and (optional)
  7925. :attr:`dtype`. If :attr:`dtype` is ``None`` it is inferred to be ``self.dtype``.
  7926. When :attr:`non_blocking`, tries to convert asynchronously with respect to
  7927. the host if possible, e.g., converting a CPU Tensor with pinned memory to a
  7928. CUDA Tensor.
  7929. When :attr:`copy` is set, a new Tensor is created even when the Tensor
  7930. already matches the desired conversion.
  7931. Args:
  7932. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  7933. returned Tensor. Default: ``torch.preserve_format``.
  7934. .. method:: to(other, non_blocking=False, copy=False) -> Tensor
  7935. :noindex:
  7936. Returns a Tensor with same :class:`torch.dtype` and :class:`torch.device` as
  7937. the Tensor :attr:`other`. When :attr:`non_blocking`, tries to convert
  7938. asynchronously with respect to the host if possible, e.g., converting a CPU
  7939. Tensor with pinned memory to a CUDA Tensor.
  7940. When :attr:`copy` is set, a new Tensor is created even when the Tensor
  7941. already matches the desired conversion.
  7942. Example::
  7943. >>> tensor = torch.randn(2, 2) # Initially dtype=float32, device=cpu
  7944. >>> tensor.to(torch.float64)
  7945. tensor([[-0.5044, 0.0005],
  7946. [ 0.3310, -0.0584]], dtype=torch.float64)
  7947. >>> cuda0 = torch.device('cuda:0')
  7948. >>> tensor.to(cuda0)
  7949. tensor([[-0.5044, 0.0005],
  7950. [ 0.3310, -0.0584]], device='cuda:0')
  7951. >>> tensor.to(cuda0, dtype=torch.float64)
  7952. tensor([[-0.5044, 0.0005],
  7953. [ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0')
  7954. >>> other = torch.randn((), dtype=torch.float64, device=cuda0)
  7955. >>> tensor.to(other, non_blocking=True)
  7956. tensor([[-0.5044, 0.0005],
  7957. [ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0')
  7958. """
  7959. ...
  7960. @overload
  7961. def to(self, other: Tensor, non_blocking: _bool = False, copy: _bool = False, *, memory_format: Optional[torch.memory_format] = None) -> Tensor:
  7962. r"""
  7963. to(*args, **kwargs) -> Tensor
  7964. Performs Tensor dtype and/or device conversion. A :class:`torch.dtype` and :class:`torch.device` are
  7965. inferred from the arguments of ``self.to(*args, **kwargs)``.
  7966. .. note::
  7967. If the ``self`` Tensor already
  7968. has the correct :class:`torch.dtype` and :class:`torch.device`, then ``self`` is returned.
  7969. Otherwise, the returned tensor is a copy of ``self`` with the desired
  7970. :class:`torch.dtype` and :class:`torch.device`.
  7971. Here are the ways to call ``to``:
  7972. .. method:: to(dtype, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor
  7973. :noindex:
  7974. Returns a Tensor with the specified :attr:`dtype`
  7975. Args:
  7976. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  7977. returned Tensor. Default: ``torch.preserve_format``.
  7978. .. method:: to(device=None, dtype=None, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor
  7979. :noindex:
  7980. Returns a Tensor with the specified :attr:`device` and (optional)
  7981. :attr:`dtype`. If :attr:`dtype` is ``None`` it is inferred to be ``self.dtype``.
  7982. When :attr:`non_blocking`, tries to convert asynchronously with respect to
  7983. the host if possible, e.g., converting a CPU Tensor with pinned memory to a
  7984. CUDA Tensor.
  7985. When :attr:`copy` is set, a new Tensor is created even when the Tensor
  7986. already matches the desired conversion.
  7987. Args:
  7988. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  7989. returned Tensor. Default: ``torch.preserve_format``.
  7990. .. method:: to(other, non_blocking=False, copy=False) -> Tensor
  7991. :noindex:
  7992. Returns a Tensor with same :class:`torch.dtype` and :class:`torch.device` as
  7993. the Tensor :attr:`other`. When :attr:`non_blocking`, tries to convert
  7994. asynchronously with respect to the host if possible, e.g., converting a CPU
  7995. Tensor with pinned memory to a CUDA Tensor.
  7996. When :attr:`copy` is set, a new Tensor is created even when the Tensor
  7997. already matches the desired conversion.
  7998. Example::
  7999. >>> tensor = torch.randn(2, 2) # Initially dtype=float32, device=cpu
  8000. >>> tensor.to(torch.float64)
  8001. tensor([[-0.5044, 0.0005],
  8002. [ 0.3310, -0.0584]], dtype=torch.float64)
  8003. >>> cuda0 = torch.device('cuda:0')
  8004. >>> tensor.to(cuda0)
  8005. tensor([[-0.5044, 0.0005],
  8006. [ 0.3310, -0.0584]], device='cuda:0')
  8007. >>> tensor.to(cuda0, dtype=torch.float64)
  8008. tensor([[-0.5044, 0.0005],
  8009. [ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0')
  8010. >>> other = torch.randn((), dtype=torch.float64, device=cuda0)
  8011. >>> tensor.to(other, non_blocking=True)
  8012. tensor([[-0.5044, 0.0005],
  8013. [ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0')
  8014. """
  8015. ...
  8016. def to_dense(self, dtype: Optional[_dtype] = None, *, masked_grad: Optional[_bool] = None) -> Tensor:
  8017. r"""
  8018. to_dense(dtype=None, *, masked_grad=True) -> Tensor
  8019. Creates a strided copy of :attr:`self` if :attr:`self` is not a strided tensor, otherwise returns :attr:`self`.
  8020. Keyword args:
  8021. {dtype}
  8022. masked_grad (bool, optional): If set to ``True`` (default) and
  8023. :attr:`self` has a sparse layout then the backward of
  8024. :meth:`to_dense` returns ``grad.sparse_mask(self)``.
  8025. Example::
  8026. >>> s = torch.sparse_coo_tensor(
  8027. ... torch.tensor([[1, 1],
  8028. ... [0, 2]]),
  8029. ... torch.tensor([9, 10]),
  8030. ... size=(3, 3))
  8031. >>> s.to_dense()
  8032. tensor([[ 0, 0, 0],
  8033. [ 9, 0, 10],
  8034. [ 0, 0, 0]])
  8035. """
  8036. ...
  8037. def to_mkldnn(self, dtype: Optional[_dtype] = None) -> Tensor:
  8038. r"""
  8039. to_mkldnn() -> Tensor
  8040. Returns a copy of the tensor in ``torch.mkldnn`` layout.
  8041. """
  8042. ...
  8043. def to_padded_tensor(self, padding: _float, output_size: Optional[Sequence[Union[_int, SymInt]]] = None) -> Tensor:
  8044. r"""
  8045. to_padded_tensor(padding, output_size=None) -> Tensor
  8046. See :func:`to_padded_tensor`
  8047. """
  8048. ...
  8049. @overload
  8050. def to_sparse(self, *, layout: Optional[_layout] = None, blocksize: Optional[Union[_int, _size]] = None, dense_dim: Optional[_int] = None) -> Tensor:
  8051. r"""
  8052. to_sparse(sparseDims) -> Tensor
  8053. Returns a sparse copy of the tensor. PyTorch supports sparse tensors in
  8054. :ref:`coordinate format <sparse-coo-docs>`.
  8055. Args:
  8056. sparseDims (int, optional): the number of sparse dimensions to include in the new sparse tensor
  8057. Example::
  8058. >>> d = torch.tensor([[0, 0, 0], [9, 0, 10], [0, 0, 0]])
  8059. >>> d
  8060. tensor([[ 0, 0, 0],
  8061. [ 9, 0, 10],
  8062. [ 0, 0, 0]])
  8063. >>> d.to_sparse()
  8064. tensor(indices=tensor([[1, 1],
  8065. [0, 2]]),
  8066. values=tensor([ 9, 10]),
  8067. size=(3, 3), nnz=2, layout=torch.sparse_coo)
  8068. >>> d.to_sparse(1)
  8069. tensor(indices=tensor([[1]]),
  8070. values=tensor([[ 9, 0, 10]]),
  8071. size=(3, 3), nnz=1, layout=torch.sparse_coo)
  8072. .. method:: to_sparse(*, layout=None, blocksize=None, dense_dim=None) -> Tensor
  8073. :noindex:
  8074. Returns a sparse tensor with the specified layout and blocksize. If
  8075. the :attr:`self` is strided, the number of dense dimensions could be
  8076. specified, and a hybrid sparse tensor will be created, with
  8077. `dense_dim` dense dimensions and `self.dim() - 2 - dense_dim` batch
  8078. dimension.
  8079. .. note:: If the :attr:`self` layout and blocksize parameters match
  8080. with the specified layout and blocksize, return
  8081. :attr:`self`. Otherwise, return a sparse tensor copy of
  8082. :attr:`self`.
  8083. Args:
  8084. layout (:class:`torch.layout`, optional): The desired sparse
  8085. layout. One of ``torch.sparse_coo``, ``torch.sparse_csr``,
  8086. ``torch.sparse_csc``, ``torch.sparse_bsr``, or
  8087. ``torch.sparse_bsc``. Default: if ``None``,
  8088. ``torch.sparse_coo``.
  8089. blocksize (list, tuple, :class:`torch.Size`, optional): Block size
  8090. of the resulting BSR or BSC tensor. For other layouts,
  8091. specifying the block size that is not ``None`` will result in a
  8092. RuntimeError exception. A block size must be a tuple of length
  8093. two such that its items evenly divide the two sparse dimensions.
  8094. dense_dim (int, optional): Number of dense dimensions of the
  8095. resulting CSR, CSC, BSR or BSC tensor. This argument should be
  8096. used only if :attr:`self` is a strided tensor, and must be a
  8097. value between 0 and dimension of :attr:`self` tensor minus two.
  8098. Example::
  8099. >>> x = torch.tensor([[1, 0], [0, 0], [2, 3]])
  8100. >>> x.to_sparse(layout=torch.sparse_coo)
  8101. tensor(indices=tensor([[0, 2, 2],
  8102. [0, 0, 1]]),
  8103. values=tensor([1, 2, 3]),
  8104. size=(3, 2), nnz=3, layout=torch.sparse_coo)
  8105. >>> x.to_sparse(layout=torch.sparse_bsr, blocksize=(1, 2))
  8106. tensor(crow_indices=tensor([0, 1, 1, 2]),
  8107. col_indices=tensor([0, 0]),
  8108. values=tensor([[[1, 0]],
  8109. [[2, 3]]]), size=(3, 2), nnz=2, layout=torch.sparse_bsr)
  8110. >>> x.to_sparse(layout=torch.sparse_bsr, blocksize=(2, 1))
  8111. RuntimeError: Tensor size(-2) 3 needs to be divisible by blocksize[0] 2
  8112. >>> x.to_sparse(layout=torch.sparse_csr, blocksize=(3, 1))
  8113. RuntimeError: to_sparse for Strided to SparseCsr conversion does not use specified blocksize
  8114. >>> x = torch.tensor([[[1], [0]], [[0], [0]], [[2], [3]]])
  8115. >>> x.to_sparse(layout=torch.sparse_csr, dense_dim=1)
  8116. tensor(crow_indices=tensor([0, 1, 1, 3]),
  8117. col_indices=tensor([0, 0, 1]),
  8118. values=tensor([[1],
  8119. [2],
  8120. [3]]), size=(3, 2, 1), nnz=3, layout=torch.sparse_csr)
  8121. """
  8122. ...
  8123. @overload
  8124. def to_sparse(self, sparse_dim: _int) -> Tensor:
  8125. r"""
  8126. to_sparse(sparseDims) -> Tensor
  8127. Returns a sparse copy of the tensor. PyTorch supports sparse tensors in
  8128. :ref:`coordinate format <sparse-coo-docs>`.
  8129. Args:
  8130. sparseDims (int, optional): the number of sparse dimensions to include in the new sparse tensor
  8131. Example::
  8132. >>> d = torch.tensor([[0, 0, 0], [9, 0, 10], [0, 0, 0]])
  8133. >>> d
  8134. tensor([[ 0, 0, 0],
  8135. [ 9, 0, 10],
  8136. [ 0, 0, 0]])
  8137. >>> d.to_sparse()
  8138. tensor(indices=tensor([[1, 1],
  8139. [0, 2]]),
  8140. values=tensor([ 9, 10]),
  8141. size=(3, 3), nnz=2, layout=torch.sparse_coo)
  8142. >>> d.to_sparse(1)
  8143. tensor(indices=tensor([[1]]),
  8144. values=tensor([[ 9, 0, 10]]),
  8145. size=(3, 3), nnz=1, layout=torch.sparse_coo)
  8146. .. method:: to_sparse(*, layout=None, blocksize=None, dense_dim=None) -> Tensor
  8147. :noindex:
  8148. Returns a sparse tensor with the specified layout and blocksize. If
  8149. the :attr:`self` is strided, the number of dense dimensions could be
  8150. specified, and a hybrid sparse tensor will be created, with
  8151. `dense_dim` dense dimensions and `self.dim() - 2 - dense_dim` batch
  8152. dimension.
  8153. .. note:: If the :attr:`self` layout and blocksize parameters match
  8154. with the specified layout and blocksize, return
  8155. :attr:`self`. Otherwise, return a sparse tensor copy of
  8156. :attr:`self`.
  8157. Args:
  8158. layout (:class:`torch.layout`, optional): The desired sparse
  8159. layout. One of ``torch.sparse_coo``, ``torch.sparse_csr``,
  8160. ``torch.sparse_csc``, ``torch.sparse_bsr``, or
  8161. ``torch.sparse_bsc``. Default: if ``None``,
  8162. ``torch.sparse_coo``.
  8163. blocksize (list, tuple, :class:`torch.Size`, optional): Block size
  8164. of the resulting BSR or BSC tensor. For other layouts,
  8165. specifying the block size that is not ``None`` will result in a
  8166. RuntimeError exception. A block size must be a tuple of length
  8167. two such that its items evenly divide the two sparse dimensions.
  8168. dense_dim (int, optional): Number of dense dimensions of the
  8169. resulting CSR, CSC, BSR or BSC tensor. This argument should be
  8170. used only if :attr:`self` is a strided tensor, and must be a
  8171. value between 0 and dimension of :attr:`self` tensor minus two.
  8172. Example::
  8173. >>> x = torch.tensor([[1, 0], [0, 0], [2, 3]])
  8174. >>> x.to_sparse(layout=torch.sparse_coo)
  8175. tensor(indices=tensor([[0, 2, 2],
  8176. [0, 0, 1]]),
  8177. values=tensor([1, 2, 3]),
  8178. size=(3, 2), nnz=3, layout=torch.sparse_coo)
  8179. >>> x.to_sparse(layout=torch.sparse_bsr, blocksize=(1, 2))
  8180. tensor(crow_indices=tensor([0, 1, 1, 2]),
  8181. col_indices=tensor([0, 0]),
  8182. values=tensor([[[1, 0]],
  8183. [[2, 3]]]), size=(3, 2), nnz=2, layout=torch.sparse_bsr)
  8184. >>> x.to_sparse(layout=torch.sparse_bsr, blocksize=(2, 1))
  8185. RuntimeError: Tensor size(-2) 3 needs to be divisible by blocksize[0] 2
  8186. >>> x.to_sparse(layout=torch.sparse_csr, blocksize=(3, 1))
  8187. RuntimeError: to_sparse for Strided to SparseCsr conversion does not use specified blocksize
  8188. >>> x = torch.tensor([[[1], [0]], [[0], [0]], [[2], [3]]])
  8189. >>> x.to_sparse(layout=torch.sparse_csr, dense_dim=1)
  8190. tensor(crow_indices=tensor([0, 1, 1, 3]),
  8191. col_indices=tensor([0, 0, 1]),
  8192. values=tensor([[1],
  8193. [2],
  8194. [3]]), size=(3, 2, 1), nnz=3, layout=torch.sparse_csr)
  8195. """
  8196. ...
  8197. def to_sparse_bsc(self, blocksize: Union[_int, _size], dense_dim: Optional[_int] = None) -> Tensor:
  8198. r"""
  8199. to_sparse_bsc(blocksize, dense_dim) -> Tensor
  8200. Convert a tensor to a block sparse column (BSC) storage format of
  8201. given blocksize. If the :attr:`self` is strided, then the number of
  8202. dense dimensions could be specified, and a hybrid BSC tensor will be
  8203. created, with `dense_dim` dense dimensions and `self.dim() - 2 -
  8204. dense_dim` batch dimension.
  8205. Args:
  8206. blocksize (list, tuple, :class:`torch.Size`, optional): Block size
  8207. of the resulting BSC tensor. A block size must be a tuple of
  8208. length two such that its items evenly divide the two sparse
  8209. dimensions.
  8210. dense_dim (int, optional): Number of dense dimensions of the
  8211. resulting BSC tensor. This argument should be used only if
  8212. :attr:`self` is a strided tensor, and must be a value between 0
  8213. and dimension of :attr:`self` tensor minus two.
  8214. Example::
  8215. >>> dense = torch.randn(10, 10)
  8216. >>> sparse = dense.to_sparse_csr()
  8217. >>> sparse_bsc = sparse.to_sparse_bsc((5, 5))
  8218. >>> sparse_bsc.row_indices()
  8219. tensor([0, 1, 0, 1])
  8220. >>> dense = torch.zeros(4, 3, 1)
  8221. >>> dense[0:2, 0] = dense[0:2, 2] = dense[2:4, 1] = 1
  8222. >>> dense.to_sparse_bsc((2, 1), 1)
  8223. tensor(ccol_indices=tensor([0, 1, 2, 3]),
  8224. row_indices=tensor([0, 1, 0]),
  8225. values=tensor([[[[1.]],
  8226. [[1.]]],
  8227. [[[1.]],
  8228. [[1.]]],
  8229. [[[1.]],
  8230. [[1.]]]]), size=(4, 3, 1), nnz=3,
  8231. layout=torch.sparse_bsc)
  8232. """
  8233. ...
  8234. def to_sparse_bsr(self, blocksize: Union[_int, _size], dense_dim: Optional[_int] = None) -> Tensor:
  8235. r"""
  8236. to_sparse_bsr(blocksize, dense_dim) -> Tensor
  8237. Convert a tensor to a block sparse row (BSR) storage format of given
  8238. blocksize. If the :attr:`self` is strided, then the number of dense
  8239. dimensions could be specified, and a hybrid BSR tensor will be
  8240. created, with `dense_dim` dense dimensions and `self.dim() - 2 -
  8241. dense_dim` batch dimension.
  8242. Args:
  8243. blocksize (list, tuple, :class:`torch.Size`, optional): Block size
  8244. of the resulting BSR tensor. A block size must be a tuple of
  8245. length two such that its items evenly divide the two sparse
  8246. dimensions.
  8247. dense_dim (int, optional): Number of dense dimensions of the
  8248. resulting BSR tensor. This argument should be used only if
  8249. :attr:`self` is a strided tensor, and must be a value between 0
  8250. and dimension of :attr:`self` tensor minus two.
  8251. Example::
  8252. >>> dense = torch.randn(10, 10)
  8253. >>> sparse = dense.to_sparse_csr()
  8254. >>> sparse_bsr = sparse.to_sparse_bsr((5, 5))
  8255. >>> sparse_bsr.col_indices()
  8256. tensor([0, 1, 0, 1])
  8257. >>> dense = torch.zeros(4, 3, 1)
  8258. >>> dense[0:2, 0] = dense[0:2, 2] = dense[2:4, 1] = 1
  8259. >>> dense.to_sparse_bsr((2, 1), 1)
  8260. tensor(crow_indices=tensor([0, 2, 3]),
  8261. col_indices=tensor([0, 2, 1]),
  8262. values=tensor([[[[1.]],
  8263. [[1.]]],
  8264. [[[1.]],
  8265. [[1.]]],
  8266. [[[1.]],
  8267. [[1.]]]]), size=(4, 3, 1), nnz=3,
  8268. layout=torch.sparse_bsr)
  8269. """
  8270. ...
  8271. def to_sparse_csc(self, dense_dim: Optional[_int] = None) -> Tensor:
  8272. r"""
  8273. to_sparse_csc() -> Tensor
  8274. Convert a tensor to compressed column storage (CSC) format. Except
  8275. for strided tensors, only works with 2D tensors. If the :attr:`self`
  8276. is strided, then the number of dense dimensions could be specified,
  8277. and a hybrid CSC tensor will be created, with `dense_dim` dense
  8278. dimensions and `self.dim() - 2 - dense_dim` batch dimension.
  8279. Args:
  8280. dense_dim (int, optional): Number of dense dimensions of the
  8281. resulting CSC tensor. This argument should be used only if
  8282. :attr:`self` is a strided tensor, and must be a value between 0
  8283. and dimension of :attr:`self` tensor minus two.
  8284. Example::
  8285. >>> dense = torch.randn(5, 5)
  8286. >>> sparse = dense.to_sparse_csc()
  8287. >>> sparse._nnz()
  8288. 25
  8289. >>> dense = torch.zeros(3, 3, 1, 1)
  8290. >>> dense[0, 0] = dense[1, 2] = dense[2, 1] = 1
  8291. >>> dense.to_sparse_csc(dense_dim=2)
  8292. tensor(ccol_indices=tensor([0, 1, 2, 3]),
  8293. row_indices=tensor([0, 2, 1]),
  8294. values=tensor([[[1.]],
  8295. [[1.]],
  8296. [[1.]]]), size=(3, 3, 1, 1), nnz=3,
  8297. layout=torch.sparse_csc)
  8298. """
  8299. ...
  8300. def to_sparse_csr(self, dense_dim: Optional[_int] = None) -> Tensor:
  8301. r"""
  8302. to_sparse_csr(dense_dim=None) -> Tensor
  8303. Convert a tensor to compressed row storage format (CSR). Except for
  8304. strided tensors, only works with 2D tensors. If the :attr:`self` is
  8305. strided, then the number of dense dimensions could be specified, and a
  8306. hybrid CSR tensor will be created, with `dense_dim` dense dimensions
  8307. and `self.dim() - 2 - dense_dim` batch dimension.
  8308. Args:
  8309. dense_dim (int, optional): Number of dense dimensions of the
  8310. resulting CSR tensor. This argument should be used only if
  8311. :attr:`self` is a strided tensor, and must be a value between 0
  8312. and dimension of :attr:`self` tensor minus two.
  8313. Example::
  8314. >>> dense = torch.randn(5, 5)
  8315. >>> sparse = dense.to_sparse_csr()
  8316. >>> sparse._nnz()
  8317. 25
  8318. >>> dense = torch.zeros(3, 3, 1, 1)
  8319. >>> dense[0, 0] = dense[1, 2] = dense[2, 1] = 1
  8320. >>> dense.to_sparse_csr(dense_dim=2)
  8321. tensor(crow_indices=tensor([0, 1, 2, 3]),
  8322. col_indices=tensor([0, 2, 1]),
  8323. values=tensor([[[1.]],
  8324. [[1.]],
  8325. [[1.]]]), size=(3, 3, 1, 1), nnz=3,
  8326. layout=torch.sparse_csr)
  8327. """
  8328. ...
  8329. def tolist(self) -> List:
  8330. r"""
  8331. tolist() -> list or number
  8332. Returns the tensor as a (nested) list. For scalars, a standard
  8333. Python number is returned, just like with :meth:`~Tensor.item`.
  8334. Tensors are automatically moved to the CPU first if necessary.
  8335. This operation is not differentiable.
  8336. Examples::
  8337. >>> a = torch.randn(2, 2)
  8338. >>> a.tolist()
  8339. [[0.012766935862600803, 0.5415473580360413],
  8340. [-0.08909505605697632, 0.7729271650314331]]
  8341. >>> a[0,0].tolist()
  8342. 0.012766935862600803
  8343. """
  8344. ...
  8345. def topk(self, k: Union[_int, SymInt], dim: _int = -1, largest: _bool = True, sorted: _bool = True) -> torch.return_types.topk:
  8346. r"""
  8347. topk(k, dim=None, largest=True, sorted=True) -> (Tensor, LongTensor)
  8348. See :func:`torch.topk`
  8349. """
  8350. ...
  8351. def trace(self) -> Tensor:
  8352. r"""
  8353. trace() -> Tensor
  8354. See :func:`torch.trace`
  8355. """
  8356. ...
  8357. @overload
  8358. def transpose(self, dim0: _int, dim1: _int) -> Tensor:
  8359. r"""
  8360. transpose(dim0, dim1) -> Tensor
  8361. See :func:`torch.transpose`
  8362. """
  8363. ...
  8364. @overload
  8365. def transpose(self, dim0: Union[str, ellipsis, None], dim1: Union[str, ellipsis, None]) -> Tensor:
  8366. r"""
  8367. transpose(dim0, dim1) -> Tensor
  8368. See :func:`torch.transpose`
  8369. """
  8370. ...
  8371. def transpose_(self, dim0: _int, dim1: _int) -> Tensor:
  8372. r"""
  8373. transpose_(dim0, dim1) -> Tensor
  8374. In-place version of :meth:`~Tensor.transpose`
  8375. """
  8376. ...
  8377. def triangular_solve(self, A: Tensor, upper: _bool = True, transpose: _bool = False, unitriangular: _bool = False) -> torch.return_types.triangular_solve:
  8378. r"""
  8379. triangular_solve(A, upper=True, transpose=False, unitriangular=False) -> (Tensor, Tensor)
  8380. See :func:`torch.triangular_solve`
  8381. """
  8382. ...
  8383. def tril(self, diagonal: _int = 0) -> Tensor:
  8384. r"""
  8385. tril(diagonal=0) -> Tensor
  8386. See :func:`torch.tril`
  8387. """
  8388. ...
  8389. def tril_(self, diagonal: _int = 0) -> Tensor:
  8390. r"""
  8391. tril_(diagonal=0) -> Tensor
  8392. In-place version of :meth:`~Tensor.tril`
  8393. """
  8394. ...
  8395. def triu(self, diagonal: _int = 0) -> Tensor:
  8396. r"""
  8397. triu(diagonal=0) -> Tensor
  8398. See :func:`torch.triu`
  8399. """
  8400. ...
  8401. def triu_(self, diagonal: _int = 0) -> Tensor:
  8402. r"""
  8403. triu_(diagonal=0) -> Tensor
  8404. In-place version of :meth:`~Tensor.triu`
  8405. """
  8406. ...
  8407. def true_divide(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat], *, out: Optional[Tensor] = None) -> Tensor:
  8408. r"""
  8409. true_divide(value) -> Tensor
  8410. See :func:`torch.true_divide`
  8411. """
  8412. ...
  8413. def true_divide_(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat]) -> Tensor:
  8414. r"""
  8415. true_divide_(value) -> Tensor
  8416. In-place version of :meth:`~Tensor.true_divide_`
  8417. """
  8418. ...
  8419. def trunc(self) -> Tensor:
  8420. r"""
  8421. trunc() -> Tensor
  8422. See :func:`torch.trunc`
  8423. """
  8424. ...
  8425. def trunc_(self) -> Tensor:
  8426. r"""
  8427. trunc_() -> Tensor
  8428. In-place version of :meth:`~Tensor.trunc`
  8429. """
  8430. ...
  8431. @overload
  8432. def type(self, dtype: None = None, non_blocking: _bool = False) -> str:
  8433. r"""
  8434. type(dtype=None, non_blocking=False, **kwargs) -> str or Tensor
  8435. Returns the type if `dtype` is not provided, else casts this object to
  8436. the specified type.
  8437. If this is already of the correct type, no copy is performed and the
  8438. original object is returned.
  8439. Args:
  8440. dtype (dtype or string): The desired type
  8441. non_blocking (bool): If ``True``, and the source is in pinned memory
  8442. and destination is on the GPU or vice versa, the copy is performed
  8443. asynchronously with respect to the host. Otherwise, the argument
  8444. has no effect.
  8445. **kwargs: For compatibility, may contain the key ``async`` in place of
  8446. the ``non_blocking`` argument. The ``async`` arg is deprecated.
  8447. """
  8448. ...
  8449. @overload
  8450. def type(self, dtype: Union[str, _dtype], non_blocking: _bool = False) -> Tensor:
  8451. r"""
  8452. type(dtype=None, non_blocking=False, **kwargs) -> str or Tensor
  8453. Returns the type if `dtype` is not provided, else casts this object to
  8454. the specified type.
  8455. If this is already of the correct type, no copy is performed and the
  8456. original object is returned.
  8457. Args:
  8458. dtype (dtype or string): The desired type
  8459. non_blocking (bool): If ``True``, and the source is in pinned memory
  8460. and destination is on the GPU or vice versa, the copy is performed
  8461. asynchronously with respect to the host. Otherwise, the argument
  8462. has no effect.
  8463. **kwargs: For compatibility, may contain the key ``async`` in place of
  8464. the ``non_blocking`` argument. The ``async`` arg is deprecated.
  8465. """
  8466. ...
  8467. def type_as(self, other: Tensor) -> Tensor:
  8468. r"""
  8469. type_as(tensor) -> Tensor
  8470. Returns this tensor cast to the type of the given tensor.
  8471. This is a no-op if the tensor is already of the correct type. This is
  8472. equivalent to ``self.type(tensor.type())``
  8473. Args:
  8474. tensor (Tensor): the tensor which has the desired type
  8475. """
  8476. ...
  8477. @overload
  8478. def unbind(self, dim: _int = 0) -> Tuple[Tensor, ...]:
  8479. r"""
  8480. unbind(dim=0) -> seq
  8481. See :func:`torch.unbind`
  8482. """
  8483. ...
  8484. @overload
  8485. def unbind(self, dim: Union[str, ellipsis, None]) -> Tuple[Tensor, ...]:
  8486. r"""
  8487. unbind(dim=0) -> seq
  8488. See :func:`torch.unbind`
  8489. """
  8490. ...
  8491. @overload
  8492. def unflatten(self, dim: Union[str, ellipsis, None], sizes: Sequence[Union[_int, SymInt]], names: Sequence[Union[str, ellipsis, None]]) -> Tensor: ...
  8493. @overload
  8494. def unflatten(self, dim: _int, sizes: Sequence[Union[_int, SymInt]]) -> Tensor: ...
  8495. def unfold(self, dimension: _int, size: _int, step: _int) -> Tensor:
  8496. r"""
  8497. unfold(dimension, size, step) -> Tensor
  8498. Returns a view of the original tensor which contains all slices of size :attr:`size` from
  8499. :attr:`self` tensor in the dimension :attr:`dimension`.
  8500. Step between two slices is given by :attr:`step`.
  8501. If `sizedim` is the size of dimension :attr:`dimension` for :attr:`self`, the size of
  8502. dimension :attr:`dimension` in the returned tensor will be
  8503. `(sizedim - size) / step + 1`.
  8504. An additional dimension of size :attr:`size` is appended in the returned tensor.
  8505. Args:
  8506. dimension (int): dimension in which unfolding happens
  8507. size (int): the size of each slice that is unfolded
  8508. step (int): the step between each slice
  8509. Example::
  8510. >>> x = torch.arange(1., 8)
  8511. >>> x
  8512. tensor([ 1., 2., 3., 4., 5., 6., 7.])
  8513. >>> x.unfold(0, 2, 1)
  8514. tensor([[ 1., 2.],
  8515. [ 2., 3.],
  8516. [ 3., 4.],
  8517. [ 4., 5.],
  8518. [ 5., 6.],
  8519. [ 6., 7.]])
  8520. >>> x.unfold(0, 2, 2)
  8521. tensor([[ 1., 2.],
  8522. [ 3., 4.],
  8523. [ 5., 6.]])
  8524. """
  8525. ...
  8526. def uniform_(self, from_: _float = 0, to: _float = 1, *, generator: Optional[Generator] = None) -> Tensor:
  8527. r"""
  8528. uniform_(from=0, to=1, *, generator=None) -> Tensor
  8529. Fills :attr:`self` tensor with numbers sampled from the continuous uniform
  8530. distribution:
  8531. .. math::
  8532. f(x) = \dfrac{1}{\text{to} - \text{from}}
  8533. """
  8534. ...
  8535. def unsafe_chunk(self, chunks: _int, dim: _int = 0) -> Tuple[Tensor, ...]:
  8536. r"""
  8537. unsafe_chunk(chunks, dim=0) -> List of Tensors
  8538. See :func:`torch.unsafe_chunk`
  8539. """
  8540. ...
  8541. def unsafe_split(self, split_size: Union[_int, SymInt], dim: _int = 0) -> Tuple[Tensor, ...]:
  8542. r"""
  8543. unsafe_split(split_size, dim=0) -> List of Tensors
  8544. See :func:`torch.unsafe_split`
  8545. """
  8546. ...
  8547. def unsafe_split_with_sizes(self, split_sizes: Sequence[Union[_int, SymInt]], dim: _int = 0) -> Tuple[Tensor, ...]: ...
  8548. def unsqueeze(self, dim: _int) -> Tensor:
  8549. r"""
  8550. unsqueeze(dim) -> Tensor
  8551. See :func:`torch.unsqueeze`
  8552. """
  8553. ...
  8554. def unsqueeze_(self, dim: _int) -> Tensor:
  8555. r"""
  8556. unsqueeze_(dim) -> Tensor
  8557. In-place version of :meth:`~Tensor.unsqueeze`
  8558. """
  8559. ...
  8560. def values(self) -> Tensor:
  8561. r"""
  8562. values() -> Tensor
  8563. Return the values tensor of a :ref:`sparse COO tensor <sparse-coo-docs>`.
  8564. .. warning::
  8565. Throws an error if :attr:`self` is not a sparse COO tensor.
  8566. See also :meth:`Tensor.indices`.
  8567. .. note::
  8568. This method can only be called on a coalesced sparse tensor. See
  8569. :meth:`Tensor.coalesce` for details.
  8570. """
  8571. ...
  8572. @overload
  8573. def var(self, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False) -> Tensor:
  8574. r"""
  8575. var(dim=None, *, correction=1, keepdim=False) -> Tensor
  8576. See :func:`torch.var`
  8577. """
  8578. ...
  8579. @overload
  8580. def var(self, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tensor:
  8581. r"""
  8582. var(dim=None, *, correction=1, keepdim=False) -> Tensor
  8583. See :func:`torch.var`
  8584. """
  8585. ...
  8586. @overload
  8587. def var(self, unbiased: _bool = True) -> Tensor:
  8588. r"""
  8589. var(dim=None, *, correction=1, keepdim=False) -> Tensor
  8590. See :func:`torch.var`
  8591. """
  8592. ...
  8593. @overload
  8594. def var(self, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False) -> Tensor:
  8595. r"""
  8596. var(dim=None, *, correction=1, keepdim=False) -> Tensor
  8597. See :func:`torch.var`
  8598. """
  8599. ...
  8600. @overload
  8601. def var(self, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tensor:
  8602. r"""
  8603. var(dim=None, *, correction=1, keepdim=False) -> Tensor
  8604. See :func:`torch.var`
  8605. """
  8606. ...
  8607. def vdot(self, other: Tensor) -> Tensor:
  8608. r"""
  8609. vdot(other) -> Tensor
  8610. See :func:`torch.vdot`
  8611. """
  8612. ...
  8613. @overload
  8614. def view(self, dtype: _dtype) -> Tensor:
  8615. r"""
  8616. view(*shape) -> Tensor
  8617. Returns a new tensor with the same data as the :attr:`self` tensor but of a
  8618. different :attr:`shape`.
  8619. The returned tensor shares the same data and must have the same number
  8620. of elements, but may have a different size. For a tensor to be viewed, the new
  8621. view size must be compatible with its original size and stride, i.e., each new
  8622. view dimension must either be a subspace of an original dimension, or only span
  8623. across original dimensions :math:`d, d+1, \dots, d+k` that satisfy the following
  8624. contiguity-like condition that :math:`\forall i = d, \dots, d+k-1`,
  8625. .. math::
  8626. \text{stride}[i] = \text{stride}[i+1] \times \text{size}[i+1]
  8627. Otherwise, it will not be possible to view :attr:`self` tensor as :attr:`shape`
  8628. without copying it (e.g., via :meth:`contiguous`). When it is unclear whether a
  8629. :meth:`view` can be performed, it is advisable to use :meth:`reshape`, which
  8630. returns a view if the shapes are compatible, and copies (equivalent to calling
  8631. :meth:`contiguous`) otherwise.
  8632. Args:
  8633. shape (torch.Size or int...): the desired size
  8634. Example::
  8635. >>> x = torch.randn(4, 4)
  8636. >>> x.size()
  8637. torch.Size([4, 4])
  8638. >>> y = x.view(16)
  8639. >>> y.size()
  8640. torch.Size([16])
  8641. >>> z = x.view(-1, 8) # the size -1 is inferred from other dimensions
  8642. >>> z.size()
  8643. torch.Size([2, 8])
  8644. >>> a = torch.randn(1, 2, 3, 4)
  8645. >>> a.size()
  8646. torch.Size([1, 2, 3, 4])
  8647. >>> b = a.transpose(1, 2) # Swaps 2nd and 3rd dimension
  8648. >>> b.size()
  8649. torch.Size([1, 3, 2, 4])
  8650. >>> c = a.view(1, 3, 2, 4) # Does not change tensor layout in memory
  8651. >>> c.size()
  8652. torch.Size([1, 3, 2, 4])
  8653. >>> torch.equal(b, c)
  8654. False
  8655. .. method:: view(dtype) -> Tensor
  8656. :noindex:
  8657. Returns a new tensor with the same data as the :attr:`self` tensor but of a
  8658. different :attr:`dtype`.
  8659. If the element size of :attr:`dtype` is different than that of ``self.dtype``,
  8660. then the size of the last dimension of the output will be scaled
  8661. proportionally. For instance, if :attr:`dtype` element size is twice that of
  8662. ``self.dtype``, then each pair of elements in the last dimension of
  8663. :attr:`self` will be combined, and the size of the last dimension of the output
  8664. will be half that of :attr:`self`. If :attr:`dtype` element size is half that
  8665. of ``self.dtype``, then each element in the last dimension of :attr:`self` will
  8666. be split in two, and the size of the last dimension of the output will be
  8667. double that of :attr:`self`. For this to be possible, the following conditions
  8668. must be true:
  8669. * ``self.dim()`` must be greater than 0.
  8670. * ``self.stride(-1)`` must be 1.
  8671. Additionally, if the element size of :attr:`dtype` is greater than that of
  8672. ``self.dtype``, the following conditions must be true as well:
  8673. * ``self.size(-1)`` must be divisible by the ratio between the element
  8674. sizes of the dtypes.
  8675. * ``self.storage_offset()`` must be divisible by the ratio between the
  8676. element sizes of the dtypes.
  8677. * The strides of all dimensions, except the last dimension, must be
  8678. divisible by the ratio between the element sizes of the dtypes.
  8679. If any of the above conditions are not met, an error is thrown.
  8680. .. warning::
  8681. This overload is not supported by TorchScript, and using it in a Torchscript
  8682. program will cause undefined behavior.
  8683. Args:
  8684. dtype (:class:`torch.dtype`): the desired dtype
  8685. Example::
  8686. >>> x = torch.randn(4, 4)
  8687. >>> x
  8688. tensor([[ 0.9482, -0.0310, 1.4999, -0.5316],
  8689. [-0.1520, 0.7472, 0.5617, -0.8649],
  8690. [-2.4724, -0.0334, -0.2976, -0.8499],
  8691. [-0.2109, 1.9913, -0.9607, -0.6123]])
  8692. >>> x.dtype
  8693. torch.float32
  8694. >>> y = x.view(torch.int32)
  8695. >>> y
  8696. tensor([[ 1064483442, -1124191867, 1069546515, -1089989247],
  8697. [-1105482831, 1061112040, 1057999968, -1084397505],
  8698. [-1071760287, -1123489973, -1097310419, -1084649136],
  8699. [-1101533110, 1073668768, -1082790149, -1088634448]],
  8700. dtype=torch.int32)
  8701. >>> y[0, 0] = 1000000000
  8702. >>> x
  8703. tensor([[ 0.0047, -0.0310, 1.4999, -0.5316],
  8704. [-0.1520, 0.7472, 0.5617, -0.8649],
  8705. [-2.4724, -0.0334, -0.2976, -0.8499],
  8706. [-0.2109, 1.9913, -0.9607, -0.6123]])
  8707. >>> x.view(torch.cfloat)
  8708. tensor([[ 0.0047-0.0310j, 1.4999-0.5316j],
  8709. [-0.1520+0.7472j, 0.5617-0.8649j],
  8710. [-2.4724-0.0334j, -0.2976-0.8499j],
  8711. [-0.2109+1.9913j, -0.9607-0.6123j]])
  8712. >>> x.view(torch.cfloat).size()
  8713. torch.Size([4, 2])
  8714. >>> x.view(torch.uint8)
  8715. tensor([[ 0, 202, 154, 59, 182, 243, 253, 188, 185, 252, 191, 63, 240, 22,
  8716. 8, 191],
  8717. [227, 165, 27, 190, 128, 72, 63, 63, 146, 203, 15, 63, 22, 106,
  8718. 93, 191],
  8719. [205, 59, 30, 192, 112, 206, 8, 189, 7, 95, 152, 190, 12, 147,
  8720. 89, 191],
  8721. [ 43, 246, 87, 190, 235, 226, 254, 63, 111, 240, 117, 191, 177, 191,
  8722. 28, 191]], dtype=torch.uint8)
  8723. >>> x.view(torch.uint8).size()
  8724. torch.Size([4, 16])
  8725. """
  8726. ...
  8727. @overload
  8728. def view(self, size: Sequence[Union[_int, SymInt]]) -> Tensor:
  8729. r"""
  8730. view(*shape) -> Tensor
  8731. Returns a new tensor with the same data as the :attr:`self` tensor but of a
  8732. different :attr:`shape`.
  8733. The returned tensor shares the same data and must have the same number
  8734. of elements, but may have a different size. For a tensor to be viewed, the new
  8735. view size must be compatible with its original size and stride, i.e., each new
  8736. view dimension must either be a subspace of an original dimension, or only span
  8737. across original dimensions :math:`d, d+1, \dots, d+k` that satisfy the following
  8738. contiguity-like condition that :math:`\forall i = d, \dots, d+k-1`,
  8739. .. math::
  8740. \text{stride}[i] = \text{stride}[i+1] \times \text{size}[i+1]
  8741. Otherwise, it will not be possible to view :attr:`self` tensor as :attr:`shape`
  8742. without copying it (e.g., via :meth:`contiguous`). When it is unclear whether a
  8743. :meth:`view` can be performed, it is advisable to use :meth:`reshape`, which
  8744. returns a view if the shapes are compatible, and copies (equivalent to calling
  8745. :meth:`contiguous`) otherwise.
  8746. Args:
  8747. shape (torch.Size or int...): the desired size
  8748. Example::
  8749. >>> x = torch.randn(4, 4)
  8750. >>> x.size()
  8751. torch.Size([4, 4])
  8752. >>> y = x.view(16)
  8753. >>> y.size()
  8754. torch.Size([16])
  8755. >>> z = x.view(-1, 8) # the size -1 is inferred from other dimensions
  8756. >>> z.size()
  8757. torch.Size([2, 8])
  8758. >>> a = torch.randn(1, 2, 3, 4)
  8759. >>> a.size()
  8760. torch.Size([1, 2, 3, 4])
  8761. >>> b = a.transpose(1, 2) # Swaps 2nd and 3rd dimension
  8762. >>> b.size()
  8763. torch.Size([1, 3, 2, 4])
  8764. >>> c = a.view(1, 3, 2, 4) # Does not change tensor layout in memory
  8765. >>> c.size()
  8766. torch.Size([1, 3, 2, 4])
  8767. >>> torch.equal(b, c)
  8768. False
  8769. .. method:: view(dtype) -> Tensor
  8770. :noindex:
  8771. Returns a new tensor with the same data as the :attr:`self` tensor but of a
  8772. different :attr:`dtype`.
  8773. If the element size of :attr:`dtype` is different than that of ``self.dtype``,
  8774. then the size of the last dimension of the output will be scaled
  8775. proportionally. For instance, if :attr:`dtype` element size is twice that of
  8776. ``self.dtype``, then each pair of elements in the last dimension of
  8777. :attr:`self` will be combined, and the size of the last dimension of the output
  8778. will be half that of :attr:`self`. If :attr:`dtype` element size is half that
  8779. of ``self.dtype``, then each element in the last dimension of :attr:`self` will
  8780. be split in two, and the size of the last dimension of the output will be
  8781. double that of :attr:`self`. For this to be possible, the following conditions
  8782. must be true:
  8783. * ``self.dim()`` must be greater than 0.
  8784. * ``self.stride(-1)`` must be 1.
  8785. Additionally, if the element size of :attr:`dtype` is greater than that of
  8786. ``self.dtype``, the following conditions must be true as well:
  8787. * ``self.size(-1)`` must be divisible by the ratio between the element
  8788. sizes of the dtypes.
  8789. * ``self.storage_offset()`` must be divisible by the ratio between the
  8790. element sizes of the dtypes.
  8791. * The strides of all dimensions, except the last dimension, must be
  8792. divisible by the ratio between the element sizes of the dtypes.
  8793. If any of the above conditions are not met, an error is thrown.
  8794. .. warning::
  8795. This overload is not supported by TorchScript, and using it in a Torchscript
  8796. program will cause undefined behavior.
  8797. Args:
  8798. dtype (:class:`torch.dtype`): the desired dtype
  8799. Example::
  8800. >>> x = torch.randn(4, 4)
  8801. >>> x
  8802. tensor([[ 0.9482, -0.0310, 1.4999, -0.5316],
  8803. [-0.1520, 0.7472, 0.5617, -0.8649],
  8804. [-2.4724, -0.0334, -0.2976, -0.8499],
  8805. [-0.2109, 1.9913, -0.9607, -0.6123]])
  8806. >>> x.dtype
  8807. torch.float32
  8808. >>> y = x.view(torch.int32)
  8809. >>> y
  8810. tensor([[ 1064483442, -1124191867, 1069546515, -1089989247],
  8811. [-1105482831, 1061112040, 1057999968, -1084397505],
  8812. [-1071760287, -1123489973, -1097310419, -1084649136],
  8813. [-1101533110, 1073668768, -1082790149, -1088634448]],
  8814. dtype=torch.int32)
  8815. >>> y[0, 0] = 1000000000
  8816. >>> x
  8817. tensor([[ 0.0047, -0.0310, 1.4999, -0.5316],
  8818. [-0.1520, 0.7472, 0.5617, -0.8649],
  8819. [-2.4724, -0.0334, -0.2976, -0.8499],
  8820. [-0.2109, 1.9913, -0.9607, -0.6123]])
  8821. >>> x.view(torch.cfloat)
  8822. tensor([[ 0.0047-0.0310j, 1.4999-0.5316j],
  8823. [-0.1520+0.7472j, 0.5617-0.8649j],
  8824. [-2.4724-0.0334j, -0.2976-0.8499j],
  8825. [-0.2109+1.9913j, -0.9607-0.6123j]])
  8826. >>> x.view(torch.cfloat).size()
  8827. torch.Size([4, 2])
  8828. >>> x.view(torch.uint8)
  8829. tensor([[ 0, 202, 154, 59, 182, 243, 253, 188, 185, 252, 191, 63, 240, 22,
  8830. 8, 191],
  8831. [227, 165, 27, 190, 128, 72, 63, 63, 146, 203, 15, 63, 22, 106,
  8832. 93, 191],
  8833. [205, 59, 30, 192, 112, 206, 8, 189, 7, 95, 152, 190, 12, 147,
  8834. 89, 191],
  8835. [ 43, 246, 87, 190, 235, 226, 254, 63, 111, 240, 117, 191, 177, 191,
  8836. 28, 191]], dtype=torch.uint8)
  8837. >>> x.view(torch.uint8).size()
  8838. torch.Size([4, 16])
  8839. """
  8840. ...
  8841. @overload
  8842. def view(self, *size: _int) -> Tensor:
  8843. r"""
  8844. view(*shape) -> Tensor
  8845. Returns a new tensor with the same data as the :attr:`self` tensor but of a
  8846. different :attr:`shape`.
  8847. The returned tensor shares the same data and must have the same number
  8848. of elements, but may have a different size. For a tensor to be viewed, the new
  8849. view size must be compatible with its original size and stride, i.e., each new
  8850. view dimension must either be a subspace of an original dimension, or only span
  8851. across original dimensions :math:`d, d+1, \dots, d+k` that satisfy the following
  8852. contiguity-like condition that :math:`\forall i = d, \dots, d+k-1`,
  8853. .. math::
  8854. \text{stride}[i] = \text{stride}[i+1] \times \text{size}[i+1]
  8855. Otherwise, it will not be possible to view :attr:`self` tensor as :attr:`shape`
  8856. without copying it (e.g., via :meth:`contiguous`). When it is unclear whether a
  8857. :meth:`view` can be performed, it is advisable to use :meth:`reshape`, which
  8858. returns a view if the shapes are compatible, and copies (equivalent to calling
  8859. :meth:`contiguous`) otherwise.
  8860. Args:
  8861. shape (torch.Size or int...): the desired size
  8862. Example::
  8863. >>> x = torch.randn(4, 4)
  8864. >>> x.size()
  8865. torch.Size([4, 4])
  8866. >>> y = x.view(16)
  8867. >>> y.size()
  8868. torch.Size([16])
  8869. >>> z = x.view(-1, 8) # the size -1 is inferred from other dimensions
  8870. >>> z.size()
  8871. torch.Size([2, 8])
  8872. >>> a = torch.randn(1, 2, 3, 4)
  8873. >>> a.size()
  8874. torch.Size([1, 2, 3, 4])
  8875. >>> b = a.transpose(1, 2) # Swaps 2nd and 3rd dimension
  8876. >>> b.size()
  8877. torch.Size([1, 3, 2, 4])
  8878. >>> c = a.view(1, 3, 2, 4) # Does not change tensor layout in memory
  8879. >>> c.size()
  8880. torch.Size([1, 3, 2, 4])
  8881. >>> torch.equal(b, c)
  8882. False
  8883. .. method:: view(dtype) -> Tensor
  8884. :noindex:
  8885. Returns a new tensor with the same data as the :attr:`self` tensor but of a
  8886. different :attr:`dtype`.
  8887. If the element size of :attr:`dtype` is different than that of ``self.dtype``,
  8888. then the size of the last dimension of the output will be scaled
  8889. proportionally. For instance, if :attr:`dtype` element size is twice that of
  8890. ``self.dtype``, then each pair of elements in the last dimension of
  8891. :attr:`self` will be combined, and the size of the last dimension of the output
  8892. will be half that of :attr:`self`. If :attr:`dtype` element size is half that
  8893. of ``self.dtype``, then each element in the last dimension of :attr:`self` will
  8894. be split in two, and the size of the last dimension of the output will be
  8895. double that of :attr:`self`. For this to be possible, the following conditions
  8896. must be true:
  8897. * ``self.dim()`` must be greater than 0.
  8898. * ``self.stride(-1)`` must be 1.
  8899. Additionally, if the element size of :attr:`dtype` is greater than that of
  8900. ``self.dtype``, the following conditions must be true as well:
  8901. * ``self.size(-1)`` must be divisible by the ratio between the element
  8902. sizes of the dtypes.
  8903. * ``self.storage_offset()`` must be divisible by the ratio between the
  8904. element sizes of the dtypes.
  8905. * The strides of all dimensions, except the last dimension, must be
  8906. divisible by the ratio between the element sizes of the dtypes.
  8907. If any of the above conditions are not met, an error is thrown.
  8908. .. warning::
  8909. This overload is not supported by TorchScript, and using it in a Torchscript
  8910. program will cause undefined behavior.
  8911. Args:
  8912. dtype (:class:`torch.dtype`): the desired dtype
  8913. Example::
  8914. >>> x = torch.randn(4, 4)
  8915. >>> x
  8916. tensor([[ 0.9482, -0.0310, 1.4999, -0.5316],
  8917. [-0.1520, 0.7472, 0.5617, -0.8649],
  8918. [-2.4724, -0.0334, -0.2976, -0.8499],
  8919. [-0.2109, 1.9913, -0.9607, -0.6123]])
  8920. >>> x.dtype
  8921. torch.float32
  8922. >>> y = x.view(torch.int32)
  8923. >>> y
  8924. tensor([[ 1064483442, -1124191867, 1069546515, -1089989247],
  8925. [-1105482831, 1061112040, 1057999968, -1084397505],
  8926. [-1071760287, -1123489973, -1097310419, -1084649136],
  8927. [-1101533110, 1073668768, -1082790149, -1088634448]],
  8928. dtype=torch.int32)
  8929. >>> y[0, 0] = 1000000000
  8930. >>> x
  8931. tensor([[ 0.0047, -0.0310, 1.4999, -0.5316],
  8932. [-0.1520, 0.7472, 0.5617, -0.8649],
  8933. [-2.4724, -0.0334, -0.2976, -0.8499],
  8934. [-0.2109, 1.9913, -0.9607, -0.6123]])
  8935. >>> x.view(torch.cfloat)
  8936. tensor([[ 0.0047-0.0310j, 1.4999-0.5316j],
  8937. [-0.1520+0.7472j, 0.5617-0.8649j],
  8938. [-2.4724-0.0334j, -0.2976-0.8499j],
  8939. [-0.2109+1.9913j, -0.9607-0.6123j]])
  8940. >>> x.view(torch.cfloat).size()
  8941. torch.Size([4, 2])
  8942. >>> x.view(torch.uint8)
  8943. tensor([[ 0, 202, 154, 59, 182, 243, 253, 188, 185, 252, 191, 63, 240, 22,
  8944. 8, 191],
  8945. [227, 165, 27, 190, 128, 72, 63, 63, 146, 203, 15, 63, 22, 106,
  8946. 93, 191],
  8947. [205, 59, 30, 192, 112, 206, 8, 189, 7, 95, 152, 190, 12, 147,
  8948. 89, 191],
  8949. [ 43, 246, 87, 190, 235, 226, 254, 63, 111, 240, 117, 191, 177, 191,
  8950. 28, 191]], dtype=torch.uint8)
  8951. >>> x.view(torch.uint8).size()
  8952. torch.Size([4, 16])
  8953. """
  8954. ...
  8955. def view_as(self, other: Tensor) -> Tensor:
  8956. r"""
  8957. view_as(other) -> Tensor
  8958. View this tensor as the same size as :attr:`other`.
  8959. ``self.view_as(other)`` is equivalent to ``self.view(other.size())``.
  8960. Please see :meth:`~Tensor.view` for more information about ``view``.
  8961. Args:
  8962. other (:class:`torch.Tensor`): The result tensor has the same size
  8963. as :attr:`other`.
  8964. """
  8965. ...
  8966. @overload
  8967. def vsplit(self, sections: _int) -> Tuple[Tensor, ...]:
  8968. r"""
  8969. vsplit(split_size_or_sections) -> List of Tensors
  8970. See :func:`torch.vsplit`
  8971. """
  8972. ...
  8973. @overload
  8974. def vsplit(self, indices: _size) -> Tuple[Tensor, ...]:
  8975. r"""
  8976. vsplit(split_size_or_sections) -> List of Tensors
  8977. See :func:`torch.vsplit`
  8978. """
  8979. ...
  8980. @overload
  8981. def vsplit(self, *indices: _int) -> Tuple[Tensor, ...]:
  8982. r"""
  8983. vsplit(split_size_or_sections) -> List of Tensors
  8984. See :func:`torch.vsplit`
  8985. """
  8986. ...
  8987. @overload
  8988. def where(self, condition: Tensor, other: Tensor) -> Tensor:
  8989. r"""
  8990. where(condition, y) -> Tensor
  8991. ``self.where(condition, y)`` is equivalent to ``torch.where(condition, self, y)``.
  8992. See :func:`torch.where`
  8993. """
  8994. ...
  8995. @overload
  8996. def where(self, condition: Tensor, other: Union[Number, _complex]) -> Tensor:
  8997. r"""
  8998. where(condition, y) -> Tensor
  8999. ``self.where(condition, y)`` is equivalent to ``torch.where(condition, self, y)``.
  9000. See :func:`torch.where`
  9001. """
  9002. ...
  9003. @overload
  9004. def xlogy(self, other: Tensor) -> Tensor:
  9005. r"""
  9006. xlogy(other) -> Tensor
  9007. See :func:`torch.xlogy`
  9008. """
  9009. ...
  9010. @overload
  9011. def xlogy(self, other: Union[Number, _complex]) -> Tensor:
  9012. r"""
  9013. xlogy(other) -> Tensor
  9014. See :func:`torch.xlogy`
  9015. """
  9016. ...
  9017. @overload
  9018. def xlogy_(self, other: Tensor) -> Tensor:
  9019. r"""
  9020. xlogy_(other) -> Tensor
  9021. In-place version of :meth:`~Tensor.xlogy`
  9022. """
  9023. ...
  9024. @overload
  9025. def xlogy_(self, other: Union[Number, _complex]) -> Tensor:
  9026. r"""
  9027. xlogy_(other) -> Tensor
  9028. In-place version of :meth:`~Tensor.xlogy`
  9029. """
  9030. ...
  9031. def xpu(self, device: Optional[Union[_device, _int, str]] = None, non_blocking: _bool = False, memory_format: torch.memory_format = torch.preserve_format) -> Tensor:
  9032. r"""
  9033. xpu(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor
  9034. Returns a copy of this object in XPU memory.
  9035. If this object is already in XPU memory and on the correct device,
  9036. then no copy is performed and the original object is returned.
  9037. Args:
  9038. device (:class:`torch.device`): The destination XPU device.
  9039. Defaults to the current XPU device.
  9040. non_blocking (bool): If ``True`` and the source is in pinned memory,
  9041. the copy will be asynchronous with respect to the host.
  9042. Otherwise, the argument has no effect. Default: ``False``.
  9043. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  9044. returned Tensor. Default: ``torch.preserve_format``.
  9045. """
  9046. ...
  9047. def zero_(self) -> Tensor:
  9048. r"""
  9049. zero_() -> Tensor
  9050. Fills :attr:`self` tensor with zeros.
  9051. """
  9052. ...
  9053. _TensorBase = TensorBase
  9054. # Defined in torch/csrc/multiprocessing/init.cpp
  9055. def _multiprocessing_init() -> None: ...
  9056. # Defined in torch/csrc/Module.cpp
  9057. def _accelerator_hooks_device_count() -> _int: ...
  9058. def _accelerator_hooks_set_current_device(device_index: _int) -> None: ...
  9059. def _accelerator_hooks_get_current_device() -> _int: ...
  9060. def _accelerator_hooks_exchange_device(device_index: _int) -> _int: ...
  9061. def _accelerator_hooks_maybe_exchange_device(device_index: _int) -> _int: ...
  9062. def _get_accelerator(check: _bool = False) -> _device: ...
  9063. # Defined in torch/csrc/mtia/Module.cpp
  9064. def _mtia_init() -> None: ...
  9065. def _mtia_isBuilt() -> _bool: ...
  9066. def _mtia_isInBadFork() -> _bool: ...
  9067. def _mtia_deviceSynchronize() -> None: ...
  9068. def _mtia_getCurrentStream(device: _int) -> Stream: ...
  9069. def _mtia_setCurrentStream(stream: Stream) -> None: ...
  9070. def _mtia_getDefaultStream(device: _int) -> Stream: ...
  9071. # Defined in torch/csrc/mps/Module.cpp
  9072. def _mps_deviceSynchronize() -> None: ...
  9073. def _mps_get_default_generator() -> Generator: ...
  9074. def _mps_emptyCache() -> None: ...
  9075. def _mps_setMemoryFraction(fraction: _float) -> None: ...
  9076. def _mps_currentAllocatedMemory() -> _int: ...
  9077. def _mps_driverAllocatedMemory() -> _int: ...
  9078. def _mps_is_available() -> _bool: ...
  9079. def _mps_is_on_macos_or_newer(major: _int, minor: _int) -> _bool: ...
  9080. def _mps_profilerStartTrace(mode: str, wait_until_completed: _bool) -> None: ...
  9081. def _mps_profilerStopTrace() -> None: ...
  9082. def _mps_acquireEvent(enable_timing: _bool) -> _int: ...
  9083. def _mps_releaseEvent(event_id: _int) -> None: ...
  9084. def _mps_recordEvent(event_id: _int) -> None: ...
  9085. def _mps_waitForEvent(event_id: _int) -> None: ...
  9086. def _mps_synchronizeEvent(event_id: _int) -> None: ...
  9087. def _mps_queryEvent(event_id: _int) -> _bool: ...
  9088. def _mps_elapsedTimeOfEvents(start_event_id: _int, end_event_id: _int) -> _float: ...
  9089. # Defined in torch/csrc/cuda/Module.cpp
  9090. def _cuda_getCurrentStream(device: _int) -> Tuple: ...
  9091. def _cuda_getCurrentRawStream(device: _int) -> _int: ...
  9092. def _cuda_getDefaultStream(device: _int) -> Tuple: ...
  9093. def _cuda_getCurrentBlasHandle() -> _int: ...
  9094. def _cuda_clearCublasWorkspaces() -> None: ...
  9095. def _cuda_setDevice(device: _int) -> None: ...
  9096. def _cuda_exchangeDevice(device: _int) -> _int: ...
  9097. def _cuda_maybeExchangeDevice(device: _int) -> _int: ...
  9098. def _cuda_getDevice() -> _int: ...
  9099. def _cuda_getDeviceCount() -> _int: ...
  9100. def _cuda_set_sync_debug_mode(warn_level: Union[_int, str]) -> None: ...
  9101. def _cuda_get_sync_debug_mode() -> _int: ...
  9102. def _cuda_sleep(cycles: _int) -> None: ...
  9103. def _cuda_synchronize() -> None: ...
  9104. def _cuda_ipc_collect() -> None: ...
  9105. def _cuda_getArchFlags() -> Optional[str]: ...
  9106. def _cuda_init() -> None: ...
  9107. def _cuda_setStream(stream_id: _int, device_index: _int, device_type: _int) -> None: ...
  9108. def _cuda_getCompiledVersion() -> _int: ...
  9109. def _cuda_cudaHostAllocator() -> _int: ...
  9110. def _cuda_cudaCachingAllocator_raw_alloc(size: _int, cuda_stream: _int) -> _int: ...
  9111. def _cuda_cudaCachingAllocator_raw_delete(ptr: _int) -> None: ...
  9112. def _cuda_cudaCachingAllocator_set_allocator_settings(env: str) -> None: ...
  9113. def _cuda_beginAllocateCurrentStreamToPool(device: _int, mempool_id: Tuple[_int, _int]) -> None: ...
  9114. def _cuda_endAllocateCurrentStreamToPool(device: _int, mempool_id: Tuple[_int, _int]) -> None: ...
  9115. def _cuda_releasePool(device: _int, mempool_id: Tuple[_int, _int]) -> None: ...
  9116. def _cuda_checkPoolLiveAllocations(device: _int, mempool_id: Tuple[_int, _int], expected_live_allocations: Set) -> _bool: ...
  9117. def _cuda_setCheckpointPoolState(device: _int, state: _cuda_CUDAAllocator_AllocatorState, stale_storages: List[_int], storages_to_add_deleters_to: List[_int]) -> None: ...
  9118. def _cuda_setMemoryFraction(fraction: _float, device: _int) -> None: ...
  9119. def _cuda_emptyCache() -> None: ...
  9120. def _cuda_memoryStats(device: _int) -> Dict[str, Any]: ...
  9121. def _cuda_resetAccumulatedMemoryStats(device: _int) -> None: ...
  9122. def _cuda_resetPeakMemoryStats(device: _int) -> None: ...
  9123. def _cuda_memorySnapshot() -> Dict[str, Any]: ...
  9124. def _cuda_record_memory_history_legacy(
  9125. enabled: _bool,
  9126. record_context: _bool,
  9127. record_context_cpp: _bool,
  9128. alloc_trace_max_entries: _int,
  9129. alloc_trace_record_context: _bool,
  9130. ) -> None: ...
  9131. def _cuda_record_memory_history(
  9132. enabled: Optional[str],
  9133. context: Optional[str],
  9134. stacks: str,
  9135. max_entries
  9136. ) -> None: ...
  9137. def _cuda_isHistoryEnabled() -> _bool: ...
  9138. def _cuda_getAllocatorBackend() -> str: ...
  9139. class _cuda_CUDAAllocator_AllocatorState:
  9140. pass
  9141. def _cuda_getCheckpointState(device: _int, mempool: Tuple[_int, _int]) -> _cuda_CUDAAllocator_AllocatorState: ...
  9142. def _set_cached_tensors_enabled(enabled: _bool) -> None: ...
  9143. def _add_cached_tensor(t: Tensor) -> None: ...
  9144. def _remove_cached_tensor(t: Tensor) -> None: ...
  9145. def _tensors_data_ptrs_at_indices_equal(tensors: List[Tensor], ptrs: List[Optional[_int]], indices: List[_int]) -> _bool: ...
  9146. def _construct_CUDA_Tensor_From_Storage_And_Metadata(metadata: dict, storage: Storage) -> Tensor: ...
  9147. def _storage_Use_Count(storage_ptr: _int) -> _int: ...
  9148. def _set_storage_access_error_msg(t: Tensor, s: str) -> None: ...
  9149. def _free_And_Remove_DeleterFn(storage_ptr: _int) -> None: ...
  9150. def _has_Standard_Deleter(storage_ptr: _int) -> _bool: ...
  9151. class _cuda_CUDAAllocator: ...
  9152. def _cuda_customAllocator(alloc_fn: _int, free_fn: _int) -> _cuda_CUDAAllocator: ...
  9153. def _cuda_changeCurrentAllocator(allocator: _cuda_CUDAAllocator) -> None: ...
  9154. def _cuda_getAllocator() -> _cuda_CUDAAllocator: ...
  9155. def _cuda_lock_mutex() -> None: ...
  9156. def _cuda_unlock_mutex() -> None: ...
  9157. def _cuda_canDeviceAccessPeer(device: _int, peer_device: _int) -> _bool: ...
  9158. def _cuda_jiterator_compile_and_launch_kernel(
  9159. code_string: str,
  9160. kernel_name: str,
  9161. return_by_ref: _bool,
  9162. num_outputs: _int,
  9163. tensors: Tuple,
  9164. kwargs: Dict[str, Union[_int, _float, _bool]],
  9165. ) -> Tensor: ...
  9166. def _cuda_get_cudnn_benchmark_limit() -> _int: ...
  9167. def _cuda_set_cudnn_benchmark_limit(arg: _int) -> None: ...
  9168. def _cuda_get_conv_benchmark_empty_cache() -> _bool: ...
  9169. def _cudnn_set_conv_benchmark_empty_cache(enable: _bool) -> None: ...
  9170. def _nccl_version() -> _int: ...
  9171. def _nccl_version_suffix() -> bytes : ...
  9172. def _nccl_unique_id() -> bytes: ...
  9173. def _nccl_init_rank(nranks: _int, comm_id: bytes, rank: _int) -> object: ...
  9174. def _nccl_reduce(
  9175. input: Sequence[Tensor],
  9176. output: Tensor,
  9177. root: _int,
  9178. op: _int,
  9179. streams: Optional[Sequence[_CudaStreamBase]],
  9180. comms: Optional[Sequence[object]],
  9181. ) -> None: ...
  9182. def _nccl_all_reduce(
  9183. input: Sequence[Tensor],
  9184. output: Sequence[Tensor],
  9185. op: _int,
  9186. streams: Optional[Sequence[_CudaStreamBase]],
  9187. comms: Optional[Sequence[object]],
  9188. ) -> None: ...
  9189. def _nccl_broadcast(
  9190. input: Sequence[Tensor],
  9191. root: _int,
  9192. streams: Optional[Sequence[_CudaStreamBase]],
  9193. comms: Optional[Sequence[object]],
  9194. ) -> None: ...
  9195. def _nccl_all_gather(
  9196. input: Sequence[Tensor],
  9197. output: Sequence[Tensor],
  9198. streams: Optional[Sequence[_CudaStreamBase]],
  9199. comms: Optional[Sequence[object]],
  9200. ) -> None: ...
  9201. def _nccl_reduce_scatter(
  9202. input: Sequence[Tensor],
  9203. output: Sequence[Tensor],
  9204. op: _int,
  9205. streams: Optional[Sequence[_CudaStreamBase]],
  9206. comms: Optional[Sequence[object]],
  9207. ) -> None: ...
  9208. def _rocm_is_backward_pass() -> _bool: ...
  9209. def _cuda_tunableop_enable(val: _bool) -> None: ...
  9210. def _cuda_tunableop_is_enabled() -> _bool: ...
  9211. def _cuda_tunableop_tuning_enable(val: _bool) -> None: ...
  9212. def _cuda_tunableop_tuning_is_enabled() -> _bool: ...
  9213. def _cuda_tunableop_set_max_tuning_duration(duration: _int) -> None: ...
  9214. def _cuda_tunableop_get_max_tuning_duration() -> _int: ...
  9215. def _cuda_tunableop_set_max_tuning_iterations(iterations: _int) -> None: ...
  9216. def _cuda_tunableop_get_max_tuning_iterations() -> _int: ...
  9217. def _cuda_tunableop_set_filename(filename: str, insert_device_ordinal: Optional[_bool]) -> None: ...
  9218. def _cuda_tunableop_get_filename() -> str: ...
  9219. def _cuda_tunableop_write_file(filename: Optional[str]) -> _bool: ...
  9220. def _cuda_tunableop_read_file(filename: Optional[str]) -> _bool: ...
  9221. def _cuda_tunableop_write_file_on_exit(val: _bool) -> None: ...
  9222. def _cuda_tunableop_get_results() -> Tuple[str, str, str, _float]: ...
  9223. def _cuda_tunableop_get_validators() -> Tuple[str, str]: ...
  9224. class _CudaDeviceProperties:
  9225. name: str
  9226. major: _int
  9227. minor: _int
  9228. multi_processor_count: _int
  9229. total_memory: _int
  9230. is_integrated: _int
  9231. is_multi_gpu_board: _int
  9232. max_threads_per_multi_processor: _int
  9233. gcnArchName: str
  9234. # Functions related to SDPA
  9235. class _SDPAParams:
  9236. query: Tensor
  9237. key: Tensor
  9238. value: Tensor
  9239. attn_mask: Optional[Tensor]
  9240. dropout: _float
  9241. is_causal: _bool
  9242. def __init__(
  9243. self,
  9244. query: Tensor,
  9245. key: Tensor,
  9246. value: Tensor,
  9247. attn_mask: Optional[Tensor],
  9248. dropout: _float,
  9249. is_causal: _bool) -> None: ...
  9250. class _SDPBackend(Enum):
  9251. ERROR = -1
  9252. MATH = 0
  9253. FLASH_ATTENTION = 1
  9254. EFFICIENT_ATTENTION = 2
  9255. CUDNN_ATTENTION = 3
  9256. def _can_use_flash_attention(params: _SDPAParams, debug: _bool) -> _bool: ...
  9257. def _can_use_mem_efficient_attention(params: _SDPAParams, debug: _bool) -> _bool: ...
  9258. # Defined in torch/csrc/cuda/python_comm.cpp
  9259. def _broadcast(tensor: Tensor, devices: List[_int]) -> List[Tensor]: ...
  9260. def _broadcast_out(tensor: Tensor, out_tensors: List[Tensor]) -> List[Tensor]: ...
  9261. def _broadcast_coalesced(
  9262. tensors: List[Tensor],
  9263. devices: List[_int],
  9264. buffer_size: _int,
  9265. ) -> List[List[Tensor]]: ...
  9266. def _scatter(
  9267. tensor: Tensor,
  9268. devices: List[_int],
  9269. chunk_sizes: Optional[List[_int]],
  9270. dim: _int,
  9271. streams: Optional[List[Stream]],
  9272. ) -> List[Tensor]: ...
  9273. def _scatter_out(
  9274. tensor: Tensor,
  9275. out_tensors: List[Tensor],
  9276. dim: _int,
  9277. streams: Optional[List[Stream]],
  9278. ) -> List[Tensor]: ...
  9279. def _gather(
  9280. tensors: List[Tensor],
  9281. dim: _int,
  9282. destination_index: Optional[_int],
  9283. ) -> Tensor: ...
  9284. def _gather_out(tensors: List[Tensor], out_tensor: Tensor, dim: _int) -> Tensor: ...
  9285. # Defined in torch/csrc/cuda/Stream.cpp
  9286. class _CudaStreamBase(Stream):
  9287. stream_id: _int
  9288. device_index: _int
  9289. device_type: _int
  9290. device: _device
  9291. cuda_stream: _int
  9292. priority: _int
  9293. def __new__(
  9294. self,
  9295. priority: _int = 0,
  9296. stream_id: _int = 0,
  9297. device_index: _int = 0,
  9298. stream_ptr: _int = 0,
  9299. ) -> _CudaStreamBase: ...
  9300. def query(self) -> _bool: ...
  9301. def synchronize(self) -> None: ...
  9302. def priority_range(self) -> Tuple[_int, _int]: ...
  9303. # Defined in torch/csrc/cuda/Event.cpp
  9304. class _CudaEventBase:
  9305. device: _device
  9306. cuda_event: _int
  9307. def __new__(
  9308. cls,
  9309. enable_timing: _bool = False,
  9310. blocking: _bool = False,
  9311. interprocess: _bool = False,
  9312. ) -> _CudaEventBase: ...
  9313. @classmethod
  9314. def from_ipc_handle(cls, device: _device, ipc_handle: bytes) -> _CudaEventBase: ...
  9315. def record(self, stream: _CudaStreamBase) -> None: ...
  9316. def wait(self, stream: _CudaStreamBase) -> None: ...
  9317. def query(self) -> _bool: ...
  9318. def elapsed_time(self, other: _CudaEventBase) -> _float: ...
  9319. def synchronize(self) -> None: ...
  9320. def ipc_handle(self) -> bytes: ...
  9321. # Defined in torch/csrc/cuda/Graph.cpp
  9322. class _CUDAGraph:
  9323. def capture_begin(self, pool: Optional[Tuple[_int, _int]] = ..., capture_error_mode: str = "global") -> None: ...
  9324. def capture_end(self) -> None: ...
  9325. def register_generator_state(self, Generator) -> None: ...
  9326. def replay(self) -> None: ...
  9327. def reset(self) -> None: ...
  9328. def pool(self) -> Tuple[_int, _int]: ...
  9329. def enable_debug_mode(self) -> None: ...
  9330. def debug_dump(self, debug_path: str) -> None: ...
  9331. def _cuda_isCurrentStreamCapturing() -> _bool: ...
  9332. def _graph_pool_handle() -> Tuple[_int, _int]: ...
  9333. # Defined in torch/csrc/xpu/Module.cpp
  9334. def _xpu_setDevice(device: _int) -> None: ...
  9335. def _xpu_exchangeDevice(device: _int) -> _int: ...
  9336. def _xpu_maybeExchangeDevice(device: _int) -> _int: ...
  9337. def _xpu_getDevice() -> _int: ...
  9338. def _xpu_getDeviceCount() -> _int: ...
  9339. def _xpu_init() -> None: ...
  9340. def _xpu_setStream(stream_id: _int, device_index: _int, device_type: _int) -> None: ...
  9341. def _xpu_getCurrentStream(device: _int) -> Tuple: ...
  9342. def _xpu_getCurrentRawStream(device: _int) -> _int: ...
  9343. def _xpu_synchronize(device: _int) -> None: ...
  9344. def _xpu_emptyCache() -> None: ...
  9345. class _XpuDeviceProperties:
  9346. name: str
  9347. platform_name: str
  9348. vendor: str
  9349. driver_version: str
  9350. version: str
  9351. total_memory: _int
  9352. max_compute_units: _int
  9353. gpu_eu_count: _int
  9354. gpu_subslice_count: _int
  9355. max_work_group_size: _int
  9356. max_num_sub_groups: _int
  9357. sub_group_sizes: List[_int]
  9358. has_fp16: _bool
  9359. has_fp64: _bool
  9360. has_atomic64: _bool
  9361. type: str
  9362. # Defined in torch/csrc/xpu/Stream.cpp
  9363. class _XpuStreamBase(Stream):
  9364. stream_id: _int
  9365. device_index: _int
  9366. device_type: _int
  9367. device: _device
  9368. sycl_queue: _int
  9369. priority: _int
  9370. def __new__(
  9371. cls,
  9372. priority: _int = 0,
  9373. stream_id: _int = 0,
  9374. device_index: _int = 0,
  9375. device_type: _int = 0,
  9376. ) -> _XpuStreamBase: ...
  9377. def query(self) -> _bool: ...
  9378. def synchronize(self) -> None: ...
  9379. @staticmethod
  9380. def priority_range() -> Tuple: ...
  9381. # Defined in torch/csrc/xpu/Event.cpp
  9382. class _XpuEventBase:
  9383. device: _device
  9384. sycl_event: _int
  9385. def __new__(cls, enable_timing: _bool = False) -> _XpuEventBase: ...
  9386. def record(self, stream: _XpuEventBase) -> None: ...
  9387. def wait(self, stream: _XpuStreamBase) -> None: ...
  9388. def query(self) -> _bool: ...
  9389. def elapsed_time(self, other: _XpuEventBase) -> _float: ...
  9390. def synchronize(self) -> None: ...
  9391. # Defined in torch/csrc/DataLoader.cpp
  9392. def _set_worker_signal_handlers(
  9393. *arg: Any,
  9394. ) -> None: ... # THPModule_setWorkerSignalHandlers
  9395. def _set_worker_pids(
  9396. key: _int,
  9397. child_pids: Tuple[_int, ...],
  9398. ) -> None: ... # THPModule_setWorkerPIDs
  9399. def _remove_worker_pids(loader_id: _int) -> None: ... # THPModule_removeWorkerPIDs
  9400. def _error_if_any_worker_fails() -> None: ... # THPModule_errorIfAnyWorkerFails
  9401. # Defined in torch/csrc/jit/python/python_tracer.cpp
  9402. class TracingState:
  9403. def push_scope(self, scope_name: str) -> None: ...
  9404. def pop_scope(self) -> None: ...
  9405. def current_scope(self) -> str: ...
  9406. def set_graph(self, graph: Graph) -> None: ...
  9407. def graph(self) -> Graph: ...
  9408. def _create_graph_by_tracing(
  9409. func: Callable[..., Any],
  9410. inputs: Any,
  9411. var_name_lookup_fn: Callable[[Tensor], str],
  9412. strict: Any,
  9413. force_outplace: Any,
  9414. self: Any = None,
  9415. argument_names: List[str] = [],
  9416. ) -> Tuple[Graph, Stack]: ...
  9417. def _tracer_warn_use_python(): ...
  9418. def _get_tracing_state() -> TracingState: ...
  9419. # Defined in torch/csrc/jit/python/python_ir.cpp
  9420. # Not actually defined in python_ir.cpp, not sure where they are.
  9421. class IValue: ...
  9422. Stack = List[IValue]
  9423. class JitType:
  9424. annotation_str: str
  9425. def isSubtypeOf(self, other: JitType) -> _bool: ...
  9426. def with_dtype(self, dtype: _dtype) -> JitType: ...
  9427. def with_sizes(self, sizes: List[Optional[_int]]) -> JitType: ...
  9428. def kind(self) -> str: ...
  9429. def scalarType(self) -> Optional[str]: ...
  9430. def getElementType(self) -> JitType: ...
  9431. def dtype(self) -> Optional[_dtype]: ...
  9432. class InferredType:
  9433. def __init__(self, arg: Union[JitType, str]): ...
  9434. def type(self) -> JitType: ...
  9435. def success(self) -> _bool: ...
  9436. def reason(self) -> str: ...
  9437. R = TypeVar("R", bound=JitType)
  9438. class AnyType(JitType):
  9439. @staticmethod
  9440. def get() -> AnyType: ...
  9441. class NoneType(JitType):
  9442. @staticmethod
  9443. def get() -> NoneType: ...
  9444. class BoolType(JitType):
  9445. @staticmethod
  9446. def get() -> BoolType: ...
  9447. class FloatType(JitType):
  9448. @staticmethod
  9449. def get() -> FloatType: ...
  9450. class ComplexType(JitType):
  9451. @staticmethod
  9452. def get() -> ComplexType: ...
  9453. class IntType(JitType):
  9454. @staticmethod
  9455. def get() -> IntType: ...
  9456. class SymIntType(JitType):
  9457. @staticmethod
  9458. def get() -> SymIntType: ...
  9459. class SymBoolType(JitType):
  9460. @staticmethod
  9461. def get() -> SymBoolType: ...
  9462. class NumberType(JitType):
  9463. @staticmethod
  9464. def get() -> NumberType: ...
  9465. class StringType(JitType):
  9466. @staticmethod
  9467. def get() -> StringType: ...
  9468. class DeviceObjType(JitType):
  9469. @staticmethod
  9470. def get() -> DeviceObjType: ...
  9471. class _GeneratorType(JitType):
  9472. @staticmethod
  9473. def get() -> _GeneratorType: ...
  9474. class StreamObjType(JitType):
  9475. @staticmethod
  9476. def get() -> StreamObjType: ...
  9477. class ListType(JitType):
  9478. def __init__(self, a: JitType) -> None: ...
  9479. def getElementType(self) -> JitType: ...
  9480. @staticmethod
  9481. def ofInts() -> ListType: ...
  9482. @staticmethod
  9483. def ofTensors() -> ListType: ...
  9484. @staticmethod
  9485. def ofFloats() -> ListType: ...
  9486. @staticmethod
  9487. def ofComplexDoubles() -> ListType: ...
  9488. @staticmethod
  9489. def ofBools() -> ListType: ...
  9490. @staticmethod
  9491. def ofStrings() -> ListType: ...
  9492. class DictType(JitType):
  9493. def __init__(self, key: JitType, value: JitType) -> None: ...
  9494. def getKeyType(self) -> JitType: ...
  9495. def getValueType(self) -> JitType: ...
  9496. class TupleType(JitType):
  9497. def __init__(self, a: List[Optional[JitType]]) -> None: ...
  9498. def elements(self) -> List[JitType]: ...
  9499. class UnionType(JitType):
  9500. def __init__(self, a: List[JitType]) -> None: ...
  9501. class ClassType(JitType):
  9502. def __init__(self, qualified_name: str) -> None: ...
  9503. class InterfaceType(JitType):
  9504. def __init__(self, qualified_name: str) -> None: ...
  9505. def getMethod(self, name: str) -> Optional[FunctionSchema]: ...
  9506. def getMethodNames(self) -> List[str]: ...
  9507. class OptionalType(JitType, Generic[R]):
  9508. def __init__(self, a: JitType) -> None: ...
  9509. def getElementType(self) -> JitType: ...
  9510. @staticmethod
  9511. def ofTensor() -> OptionalType: ...
  9512. class FutureType(JitType):
  9513. def __init__(self, a: JitType) -> None: ...
  9514. def getElementType(self) -> JitType: ...
  9515. class AwaitType(JitType):
  9516. def __init__(self, a: JitType) -> None: ...
  9517. def getElementType(self) -> JitType: ...
  9518. class RRefType(JitType):
  9519. def __init__(self, a: JitType) -> None: ...
  9520. class EnumType(JitType):
  9521. def __init__(
  9522. self,
  9523. qualified_name: str,
  9524. value_type: JitType,
  9525. enum_names_values: List[Any],
  9526. ) -> None: ...
  9527. class TensorType(JitType):
  9528. @classmethod
  9529. def get(cls) -> TensorType: ...
  9530. @classmethod
  9531. def getInferred(cls) -> TensorType: ...
  9532. def with_sizes(self, other: Optional[List[Optional[_int]]]) -> TensorType: ...
  9533. def sizes(self) -> Optional[List[_int]]: ...
  9534. def varyingSizes(self) -> Optional[List[Optional[_int]]]: ...
  9535. def strides(self) -> Optional[List[_int]]: ...
  9536. def device(self) -> Optional[_device]: ...
  9537. def dim(self) -> _int: ...
  9538. def dtype(self) -> Optional[_dtype]: ...
  9539. @staticmethod
  9540. def create_from_tensor(t: Tensor) -> TensorType: ...
  9541. # Defined in torch/csrc/jit/python/python_tree_views.cpp
  9542. class SourceRange: ...
  9543. class TreeView: ...
  9544. class Ident(TreeView):
  9545. @property
  9546. def name(self) -> str: ...
  9547. class ClassDef(TreeView): ...
  9548. class Def(TreeView):
  9549. def name(self) -> Ident: ...
  9550. class Decl(TreeView): ...
  9551. # Defined in torch/csrc/distributed/rpc/init.cpp
  9552. def _rpc_init() -> _bool: ...
  9553. # Defined in torch/csrc/distributed/autograd/init.cpp
  9554. def _dist_autograd_init() -> _bool: ...
  9555. # Defined in torch/csrc/distributed/c10d/init.cpp
  9556. def _c10d_init() -> _bool: ...
  9557. # Defined in torch/csrc/distributed/rpc/testing/init.cpp
  9558. def _faulty_agent_init() -> _bool: ...
  9559. def _register_py_class_for_device(device: str, cls: Any) -> None: ...
  9560. # Defined in torch/csrc/Module.cpp
  9561. def _current_graph_task_id() -> _int: ...
  9562. def _current_autograd_node() -> _Node: ...
  9563. def _dispatch_key_set(Tensor) -> str: ...
  9564. # Defined in torch/csrc/Exceptions.cpp
  9565. class OutOfMemoryError(RuntimeError): ...
  9566. class _DistError(RuntimeError): ...
  9567. class _DistBackendError(RuntimeError): ...
  9568. class _DistStoreError(RuntimeError): ...
  9569. class _DistNetworkError(RuntimeError): ...
  9570. # Defined in torch/csrc/profiler/init.cpp
  9571. class CapturedTraceback:
  9572. pass
  9573. def gather_traceback(python: _bool, script: _bool, cpp: _bool) -> CapturedTraceback: ...
  9574. def symbolize_tracebacks(tracebacks: List[CapturedTraceback]) -> List[Dict[str, Any]]: ...
  9575. def _load_mobile_module_from_file(filename: str): ...
  9576. def _load_mobile_module_from_bytes(bytes_: bytes): ...
  9577. def _load_jit_module_from_file(filename: str): ...
  9578. def _load_jit_module_from_bytes(bytes_: bytes): ...
  9579. def _save_mobile_module(m: LiteScriptModule, filename: str): ...
  9580. def _save_jit_module(m: ScriptModule, filename: str, extra_files: Dict[str, Any]): ...
  9581. def _save_mobile_module_to_bytes(m: LiteScriptModule) -> bytes: ...
  9582. def _save_jit_module_to_bytes(m: ScriptModule, extra_files: Dict[str, Any]) -> bytes: ...
  9583. def _get_module_info_from_flatbuffer(data: bytes): ...
  9584. def _jit_resolve_packet(op_name: str, *args, **kwargs) -> str: ...
  9585. def _swap_tensor_impl(t1: Tensor, t2: Tensor): ...
  9586. def _save_pickle(obj: Any) -> bytes: ...
  9587. # Defined in torch/csrc/jit/runtime/static/init.cpp
  9588. def _jit_to_static_module(graph_or_module: Union[Graph,ScriptModule]) -> Any: ...
  9589. def _fuse_to_static_module(graph_or_module: Union[Graph,ScriptModule], min_size: _int) -> Any: ...