| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149 |
- # @generated from torch/_C/__init__.pyi.in
- # mypy: disable-error-code="type-arg"
- # mypy: allow-untyped-defs
- import builtins
- from enum import Enum, IntEnum
- from pathlib import Path
- from typing import (
- Any,
- AnyStr,
- BinaryIO,
- Callable,
- ContextManager,
- Dict,
- Generic,
- Iterable,
- Iterator,
- List,
- Literal,
- NamedTuple,
- Optional,
- Protocol,
- Sequence,
- Set,
- SupportsIndex,
- Tuple,
- Type,
- TypeVar,
- Union,
- overload,
- runtime_checkable,
- )
- from typing_extensions import ParamSpec, Self
- import numpy
- import torch
- from torch import inf, SymInt, Tensor
- from torch.autograd.graph import Node as _Node
- from torch.package import PackageExporter
- from torch.storage import UntypedStorage, TypedStorage
- from torch.types import (
- _bool,
- _complex,
- _device,
- _dispatchkey,
- _dtype,
- _float,
- _int,
- _layout,
- _qscheme,
- _size,
- Device,
- Number,
- Storage,
- )
- from torch._prims_common import DeviceLikeType
- from torch.utils._python_dispatch import TorchDispatchMode
- # This module is defined in torch/csrc/Module.cpp
- from . import _functorch, _lazy, _lazy_ts_backend, _nn, _onnx, _VariableFunctions, _cpu, _aoti, _verbose
- K = TypeVar("K")
- T = TypeVar("T")
- S = TypeVar("S", bound="torch.Tensor")
- P = ParamSpec("P")
- ReturnVal = TypeVar("ReturnVal", covariant=True) # return value (always covariant)
- _T_co = TypeVar("_T_co", covariant=True)
- @runtime_checkable
- class _NestedSequence(Protocol[_T_co]):
- """A protocol for representing nested sequences.
- References::
- `numpy._typing._NestedSequence`
- <https://github.com/numpy/numpy/blob/main/numpy/_typing/_nested_sequence.py>
- """
- def __len__(self, /) -> builtins.int: ...
- def __getitem__(self, index: builtins.int, /) -> _T_co | _NestedSequence[_T_co]: ...
- def __contains__(self, x: builtins.object, /) -> builtins.bool: ...
- def __iter__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]: ...
- def __reversed__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]: ...
- def count(self, value: Any, /) -> builtins.int: ...
- def index(self, value: Any, /) -> builtins.int: ...
- # Defined in torch/csrc/Device.cpp
- class device:
- type: str # THPDevice_type
- index: _int # THPDevice_index
- def __get__(self, instance, owner=None) -> device: ...
- # THPDevice_pynew
- @overload
- def __init__(self, device: DeviceLikeType) -> None: ...
- @overload
- def __init__(self, type: str, index: _int) -> None: ...
- # Uncomment if we ever make torch.device a decorator
- # def __call__(self, func: T) -> T: ...
- def __enter__(self) -> device: ...
- def __exit__(self, exc_type, exc_val, exc_tb) -> None: ...
- def __reduce__(self) -> Tuple[Any, ...]: ... # THPDevice_reduce
- # Defined in torch/csrc/Stream.cpp
- class Stream:
- stream_id: _int # Stream id
- device_index: _int
- device_type: _int
- device: _device # The device of the stream
- @overload
- def __new__(self, device: Optional[DeviceLikeType] = None, *, priority: _int = 0) -> Stream: ...
- @overload
- def __new__(self, stream_id: _int, device_index: _int, device_type: _int, *, priority: _int = 0) -> Stream: ...
- def query(self) -> _bool: ...
- def synchronize(self) -> None: ...
- def wait_event(self, event: Event) -> None: ...
- def wait_stream(self, other: Stream) -> None: ...
- def record_event(self, event: Optional[Event] = None) -> Event: ...
- def __hash__(self) -> _int: ...
- def __repr__(self) -> str: ...
- def __eq__(self, other: object) -> _bool: ...
- # Defined in torch/csrc/Event.cpp
- class Event:
- device: _device # The device of the Event
- event_id: _int # The raw event created by device backend
- def __new__(self,
- device: Optional[DeviceLikeType] = None,
- *,
- enable_timing: _bool = False,
- blocking: _bool = False,
- interprocess: _bool = False) -> Event: ...
- @classmethod
- def from_ipc_handle(self, device: _device, ipc_handle: bytes) -> Event: ...
- def record(self, stream: Optional[Stream] = None) -> None: ...
- def wait(self, stream: Optional[Stream] = None) -> None: ...
- def query(self) -> _bool: ...
- def elapsed_time(self, other: Event) -> _float: ...
- def synchronize(self) -> None: ...
- def ipc_handle(self) -> bytes: ...
- def __repr__(self) -> str: ...
- # Defined in torch/csrc/Size.cpp
- class Size(Tuple[_int, ...]):
- # TODO: __reduce__
- @overload # type: ignore[override]
- def __getitem__(self: Size, key: _int) -> _int: ...
- @overload
- def __getitem__(self: Size, key: slice) -> Size: ...
- def numel(self: Size) -> _int: ...
- # Defined in torch/csrc/Dtype.cpp
- class dtype:
- # TODO: __reduce__
- is_floating_point: _bool
- is_complex: _bool
- is_signed: _bool
- itemsize: _int
- def to_real(self) -> dtype: ...
- def to_complex(self) -> dtype: ...
- # Defined in torch/csrc/TypeInfo.cpp
- class iinfo:
- bits: _int
- min: _int
- max: _int
- dtype: str
- def __init__(self, dtype: _dtype) -> None: ...
- class finfo:
- bits: _int
- min: _float
- max: _float
- eps: _float
- tiny: _float
- smallest_normal: _float
- resolution: _float
- dtype: str
- @overload
- def __init__(self, dtype: _dtype) -> None: ...
- @overload
- def __init__(self) -> None: ...
- float32: dtype = ...
- float: dtype = ...
- float64: dtype = ...
- double: dtype = ...
- float16: dtype = ...
- bfloat16: dtype = ...
- float8_e4m3fn: dtype = ...
- float8_e4m3fnuz: dtype = ...
- float8_e5m2: dtype = ...
- float8_e5m2fnuz: dtype = ...
- half: dtype = ...
- uint8: dtype = ...
- uint16: dtype = ...
- uint32: dtype = ...
- uint64: dtype = ...
- int8: dtype = ...
- int16: dtype = ...
- short: dtype = ...
- int32: dtype = ...
- int: dtype = ...
- int64: dtype = ...
- long: dtype = ...
- complex32: dtype = ...
- complex64: dtype = ...
- chalf: dtype = ...
- cfloat: dtype = ...
- complex128: dtype = ...
- cdouble: dtype = ...
- quint8: dtype = ...
- qint8: dtype = ...
- qint32: dtype = ...
- bool: dtype = ...
- quint4x2: dtype = ...
- quint2x4: dtype = ...
- bits1x8: dtype = ...
- bits2x4: dtype = ...
- bits4x2: dtype = ...
- bits8: dtype = ...
- bits16: dtype = ...
- # Defined in torch/csrc/Layout.cpp
- class layout: ...
- # Defined in torch/csrc/utils/disable_torch_function.cpp
- def DisableTorchFunction(): ...
- def DisableTorchFunctionSubclass(): ...
- # Defined in torch/csrc/utils/tensor_layouts.cpp
- strided: layout = ...
- sparse_coo: layout = ...
- sparse_csr: layout = ...
- sparse_csc: layout = ...
- sparse_bsr: layout = ...
- sparse_bsc: layout = ...
- _mkldnn: layout = ...
- jagged: layout = ...
- # Defined in torch/csrc/MemoryFormat.cpp
- class memory_format: ...
- # Defined in torch/csrc/utils/tensor_memoryformats.cpp
- contiguous_format: memory_format = ...
- channels_last: memory_format = ...
- channels_last_3d: memory_format = ...
- preserve_format: memory_format = ...
- # Defined in torch/csrc/QScheme.cpp
- class qscheme: ...
- # Defined in torch/csrc/utils/tensor_qschemes.h
- per_tensor_affine: qscheme = ...
- per_channel_affine: qscheme = ...
- per_tensor_symmetric: qscheme = ...
- per_channel_symmetric: qscheme = ...
- per_channel_affine_float_qparams: qscheme = ...
- # Defined in torch/csrc/autograd/python_function.cpp
- class _FunctionBase:
- saved_tensors: Tuple[Tensor]
- _raw_saved_tensors: Tuple[Any]
- next_functions: Tuple[Tuple[Any, _int], ...]
- needs_input_grad: Tuple[_bool]
- metadata: dict
- _materialize_non_diff_grads: _bool
- # skip adding type hints for the fields that have wrappers defined
- # in torch/autograd/function.py
- # Defined in torch/csrc/autograd/python_legacy_variable.cpp
- class _LegacyVariableBase(Tensor): # inherits from Tensor to appease mypy
- def __init__(
- self,
- data: Optional[Tensor] = ...,
- requires_grad: Optional[_bool] = ...,
- volatile: Optional[_bool] = ...,
- _grad_fn: Optional[_FunctionBase] = ...,
- ) -> None: ...
- # Defined in torch/csrc/jit/python/init.cpp
- class IODescriptor: ...
- class JITException: ...
- class Future(Generic[T]):
- def __init__(self, devices: List[device]) -> None: ...
- def done(self) -> _bool: ...
- def value(self) -> T: ...
- def wait(self) -> T: ...
- def add_done_callback(self, callback: Callable) -> None: ...
- def then(self, callback: Callable) -> Future[T]: ...
- def set_result(self, result: T) -> None: ...
- def _set_unwrap_func(self, callback: Callable) -> None: ...
- class _Await:
- def __init__(self) -> None: ...
- def fn(self) -> Callable: ...
- def args(self) -> Tuple[Any, ...]: ...
- def is_nowait(self) -> _bool: ...
- def _jit_set_num_profiled_runs(num: _size) -> _size: ...
- # Defined in torch/csrc/jit/passes/mobile_optimizer_type.h
- class _MobileOptimizerType: ...
- CONV_BN_FUSION: _MobileOptimizerType
- INSERT_FOLD_PREPACK_OPS: _MobileOptimizerType
- REMOVE_DROPOUT: _MobileOptimizerType
- FUSE_ADD_RELU: _MobileOptimizerType
- HOIST_CONV_PACKED_PARAMS: _MobileOptimizerType
- VULKAN_AUTOMATIC_GPU_TRANSFER: _MobileOptimizerType
- def fork(*args: Any, **kwargs: Any) -> Future: ...
- def wait(fut: Future) -> Any: ...
- def _awaitable(*args: Any, **kwargs: Any) -> _Await: ...
- def _awaitable_wait(aw: _Await) -> Any: ...
- def _awaitable_nowait(x: Any) -> _Await: ...
- def _collect_all(futures: List[Future]) -> Future: ...
- def _set_print_stack_traces_on_fatal_signal(print: _bool) -> None: ...
- def unify_type_list(types: List[JitType]) -> JitType: ...
- def _freeze_module(
- module: ScriptModule,
- preserved_attrs: List[str] = [],
- freeze_interfaces: _bool = True,
- preserveParameters: _bool = True,
- ) -> ScriptModule: ...
- def _jit_pass_optimize_frozen_graph(Graph, optimize_numerics: _bool = True) -> None: ...
- def _jit_pass_optimize_for_inference(
- module: torch.jit.ScriptModule,
- other_methods: List[str] = [],
- ) -> None: ...
- def _jit_pass_fold_frozen_conv_bn(graph: Graph): ...
- def _jit_pass_fold_frozen_conv_add_or_sub(graph: Graph): ...
- def _jit_pass_fold_frozen_conv_mul_or_div(graph: Graph): ...
- def _jit_pass_fuse_frozen_conv_add_relu(graph: Graph): ...
- def _jit_pass_concat_frozen_linear(graph: Graph): ...
- def _jit_pass_convert_frozen_ops_to_mkldnn(graph: Graph): ...
- def _jit_pass_transpose_frozen_linear(graph: Graph): ...
- def _jit_pass_remove_dropout(module: torch.jit.ScriptModule): ...
- def _is_tracing() -> _bool: ...
- def _jit_init() -> _bool: ...
- def _jit_flatten(arg: Any) -> Tuple[List[Tensor], IODescriptor]: ...
- def _jit_unflatten(vars: List[Tensor], desc: IODescriptor) -> Any: ...
- def _jit_get_operation(op_name: str) -> Tuple[Callable, List[str]]: ...
- def _get_operation_overload(
- op_name: str,
- op_overload_name: str,
- ) -> Tuple[Callable, Callable, List[Any]]: ...
- def _get_schema(op_name: str, overload_name: str) -> FunctionSchema: ...
- def _jit_pass_optimize_for_mobile(
- module: torch.jit.ScriptModule,
- optimization_blocklist: Set[_MobileOptimizerType],
- preserved_methods: List[AnyStr],
- ) -> torch.jit.ScriptModule: ...
- def _clone_module_with_class(
- module: torch.jit.ScriptModule,
- ignored_methods: List[AnyStr],
- ignored_attributes: List[AnyStr],
- ) -> torch.jit.ScriptModule: ...
- def _jit_pass_vulkan_optimize_for_mobile(
- module: torch.jit.ScriptModule,
- optimization_blocklist: Set[_MobileOptimizerType],
- preserved_methods: List[AnyStr],
- ) -> torch.jit.ScriptModule: ...
- def _jit_pass_metal_optimize_for_mobile(
- module: torch.jit.ScriptModule,
- preserved_methods: List[AnyStr],
- ) -> torch.jit.ScriptModule: ...
- def _jit_pass_inline(Graph) -> None: ...
- def _jit_pass_constant_propagation(Graph) -> None: ...
- def _jit_pass_propagate_shapes_on_graph(Graph) -> None: ...
- def _jit_register_decomposition_for_schema(schema: FunctionSchema, Graph) -> None: ...
- def _jit_erase_non_input_shape_information(Graph) -> None: ...
- def _jit_get_schemas_for_operator(name: str) -> List[FunctionSchema]: ...
- def _jit_get_all_schemas() -> List[FunctionSchema]: ...
- def _jit_check_alias_annotation(
- g: Graph,
- args: Tuple[Any, ...],
- unqualified_op_name: str,
- ): ...
- def _jit_can_fuse_on_cpu() -> _bool: ...
- def _jit_can_fuse_on_gpu() -> _bool: ...
- def _jit_can_fuse_on_cpu_legacy() -> _bool: ...
- def _debug_get_fusion_group_inlining() -> _bool: ...
- def _debug_set_fusion_group_inlining(enable: _bool): ...
- def _jit_texpr_fuser_enabled() -> _bool: ...
- def _jit_nvfuser_enabled() -> _bool: ...
- def _jit_llga_enabled() -> _bool: ...
- def _jit_set_llga_enabled(enable: _bool): ...
- def _llvm_enabled() -> _bool: ...
- def _jit_override_can_fuse_on_cpu(override: _bool): ...
- def _jit_override_can_fuse_on_gpu(override: _bool): ...
- def _jit_override_can_fuse_on_cpu_legacy(override: _bool): ...
- def _jit_set_symbolic_shapes_test_mode(override: _bool): ...
- def _jit_symbolic_shapes_test_mode_enabled() -> _bool: ...
- def _jit_set_texpr_fuser_enabled(enable: _bool): ...
- def _jit_set_te_must_use_llvm_cpu(use_llvm: _bool): ...
- def _jit_set_nvfuser_enabled(enable: _bool) -> _bool: ...
- def _jit_cat_wo_conditionals(optimize_cat: _bool): ...
- def _jit_opt_conditionals(opt_conds: _bool): ...
- def _jit_pass_canonicalize(graph: Graph, keep_unique_names: _bool = True): ...
- def _jit_pass_erase_shape_information(graph: Graph): ...
- def _jit_pass_fold_convbn(module: torch.jit.ScriptModule): ...
- def _jit_pass_insert_observers(
- module: torch.jit.ScriptModule,
- method_name: str,
- qconfig_dict: Dict[str, Any],
- inplace: _bool,
- quant_type: _int,
- ): ...
- def _jit_pass_insert_quant_dequant(
- module: torch.jit.ScriptModule,
- method_name: str,
- inplace: _bool,
- debug: _bool,
- quant_type: _int,
- ): ...
- def _jit_pass_insert_quant_dequant_for_ondevice_ptq(
- module: torch.jit.ScriptModule,
- method_name: str,
- inplace: _bool,
- debug: _bool,
- quant_type: _int,
- ): ...
- def _jit_pass_quant_finalize(
- module: torch.jit.ScriptModule,
- quant_type: _int,
- preserved_attrs: Sequence[str],
- ): ...
- def _jit_pass_quant_finalize_for_ondevice_ptq(
- module: torch.jit.ScriptModule,
- quant_type: _int,
- method_name: str,
- ): ...
- def _jit_pass_insert_observer_method_for_ondevice_ptq(
- module: torch.jit.ScriptModule,
- method_name: str,
- qconfig_dict: Dict[str, Any],
- inplace: _bool,
- quant_type: _int,
- ): ...
- def _jit_set_profiling_executor(profiling_flag: _bool) -> _bool: ...
- def _jit_set_profiling_mode(profiling_flag: _bool) -> _bool: ...
- def _jit_set_fusion_strategy(
- strategy: List[Tuple[str, _int]],
- ) -> List[Tuple[str, _int]]: ...
- def _jit_try_infer_type(obj: Any) -> InferredType: ...
- def _jit_get_trigger_value(trigger_name: str) -> _int: ...
- # Defined in torch/csrc/jit/python/script_init.cpp
- ResolutionCallback = Callable[[str], Callable[..., Any]]
- # Defined in torch/csrc/jit/python/script_init.cpp
- # and torch/csrc/jit/python/init.cpp
- def _maybe_call_torch_function_for_op_packet(
- op_overload_packet: Any,
- args: Any,
- kwargs: Any,
- ) -> Any: ...
- def _check_schema_allow_fake_script_object(
- schema: FunctionSchema,
- args: Any,
- kwargs: Any,
- ) -> _bool: ...
- def _create_function_from_graph(qualname: str, graph: Graph) -> ScriptFunction: ...
- def _debug_set_autodiff_subgraph_inlining(disabled: _bool) -> None: ...
- def _ivalue_tags_match(lhs: ScriptModule, rhs: ScriptModule) -> _bool: ...
- def _jit_assert_is_instance(obj: Any, type: JitType): ...
- def _jit_clear_class_registry() -> None: ...
- def _jit_set_emit_hooks(
- ModuleHook: Optional[Callable],
- FunctionHook: Optional[Callable],
- ) -> None: ...
- def _jit_get_emit_hooks() -> Tuple[Callable, Callable]: ...
- def _load_for_lite_interpreter(
- filename: Union[str, Path],
- map_location: Optional[DeviceLikeType],
- ): ...
- def _load_for_lite_interpreter_from_buffer(
- buffer: BinaryIO,
- map_location: Optional[DeviceLikeType],
- ): ...
- def _export_operator_list(module: LiteScriptModule): ...
- def _quantize_ondevice_ptq_dynamic(module: LiteScriptModule, method_name: str): ...
- def _get_model_bytecode_version(filename: Union[str, Path]) -> _int: ...
- def _get_model_bytecode_version_from_buffer(buffer: BinaryIO) -> _int: ...
- def _backport_for_mobile(
- filename_input: Union[str, Path],
- filename_output: Union[str, Path],
- to_version: _int,
- ) -> None: ...
- def _backport_for_mobile_from_buffer(
- buffer: BinaryIO,
- filename_output: Union[str, Path],
- to_version: _int,
- ) -> None: ...
- def _backport_for_mobile_to_buffer(
- filename_input: Union[str, Path],
- to_version: _int,
- ) -> bytes: ...
- def _backport_for_mobile_from_buffer_to_buffer(
- buffer: BinaryIO,
- to_version: _int,
- ) -> bytes: ...
- def _get_model_ops_and_info(filename: Union[str, Path]): ...
- def _get_model_ops_and_info_from_buffer(buffer: BinaryIO): ...
- def _get_mobile_model_contained_types(filename: Union[str, Path]): ...
- def _get_mobile_model_contained_types_from_buffer(buffer: BinaryIO): ...
- def _logging_set_logger(logger: LoggerBase) -> LoggerBase: ...
- def _get_graph_executor_optimize(optimize: Optional[_bool] = None) -> _bool: ...
- def _set_graph_executor_optimize(optimize: _bool): ...
- def _export_opnames(module: ScriptModule) -> List[str]: ...
- def _create_function_from_trace(
- qualname: str,
- func: Callable[..., Any],
- input_tuple: Tuple[Any, ...],
- var_lookup_fn: Callable[[Tensor], str],
- strict: _bool,
- force_outplace: _bool,
- argument_names: List[str],
- ) -> Tuple[Graph, Stack]: ...
- def _create_function_from_trace_with_dict(
- qualname: str,
- func: Callable[..., Any],
- input_dict: Dict[str, Any],
- var_lookup_fn: Callable[[Tensor], str],
- strict: _bool,
- force_outplace: _bool,
- argument_names: List[str],
- ) -> Tuple[Graph, Stack]: ...
- def _jit_is_script_object(obj: Any) -> _bool: ...
- def _last_executed_optimized_graph() -> Graph: ...
- def parse_type_comment(comment: str) -> Decl: ...
- def _get_upgraders_map_size() -> _int: ...
- def _get_upgraders_entry_map() -> Dict[str, str]: ...
- def _dump_upgraders_map() -> Dict[str, str]: ...
- def _test_only_populate_upgraders(content: Dict[str, str]) -> None: ...
- def _test_only_remove_upgraders(content: Dict[str, str]) -> None: ...
- def merge_type_from_type_comment(
- decl: Decl,
- type_annotation_decl: Decl,
- is_method: _bool,
- ) -> Decl: ...
- def parse_ir(input: str, parse_tensor_constants: _bool = False) -> Graph: ...
- def parse_schema(schema: str) -> FunctionSchema: ...
- def get_device(input: Tensor) -> _int: ...
- def _resolve_type_from_object(
- obj: Any,
- range: SourceRange,
- rcb: ResolutionCallback,
- ) -> JitType: ...
- def _create_module_with_type(ty: JitType) -> ScriptModule: ...
- def _create_object_with_type(ty: ClassType) -> ScriptObject: ...
- def _run_emit_module_hook(m: ScriptModule): ...
- def _replace_overloaded_method_decl(
- overload_decl: Decl,
- implementation_def: Def,
- new_name: str,
- ) -> Def: ...
- def _jit_pass_lower_all_tuples(graph: Graph) -> None: ...
- def _jit_pass_onnx_set_dynamic_input_shape(
- graph: Graph,
- dynamic_axes: Dict[str, Dict[_int, str]],
- input_names: List[str],
- ) -> None: ...
- def _jit_pass_onnx_graph_shape_type_inference(
- graph: Graph,
- params_dict: Dict[str, IValue],
- opset_version: _int,
- ) -> None: ...
- def _jit_pass_onnx_assign_output_shape(
- graph: Graph,
- tensors: List[Tensor],
- desc: IODescriptor,
- onnx_shape_inference: _bool,
- is_script: _bool,
- opset_version: _int,
- ) -> None: ...
- def _jit_pass_onnx_remove_inplace_ops_for_onnx(
- graph: Graph,
- module: Optional[ScriptModule] = None,
- ) -> None: ...
- def _jit_pass_remove_inplace_ops(graph: Graph) -> None: ...
- def _jit_pass_canonicalize_graph_fuser_ops(graph: Graph) -> None: ...
- def _jit_pass_peephole(
- graph: Graph,
- disable_shape_peepholes: _bool = False,
- ) -> None: ...
- def _jit_pass_onnx_autograd_function_process(graph: Graph) -> None: ...
- def _jit_pass_fuse_addmm(graph: Graph) -> None: ...
- def _jit_pass_onnx_preprocess(graph: Graph) -> None: ...
- def _jit_pass_prepare_division_for_onnx(graph: Graph) -> None: ...
- def _jit_pass_onnx_remove_print(graph: Graph) -> None: ...
- def _jit_pass_onnx_preprocess_caffe2(graph: Graph) -> None: ...
- def _jit_pass_onnx_unpack_quantized_weights(
- graph: Graph,
- paramsDict: Dict[str, IValue],
- caffe2: _bool,
- ) -> Dict[str, IValue]: ...
- def _jit_pass_onnx_quantization_insert_permutes(
- graph: Graph,
- paramsDict: Dict[str, IValue],
- ) -> Dict[str, IValue]: ...
- def _jit_pass_custom_pattern_based_rewrite_graph(
- pattern: str,
- fused_node_name: str,
- graph: Graph,
- ) -> None: ...
- def _jit_onnx_list_model_parameters(
- module: ScriptModule,
- ) -> Tuple[ScriptModule, List[IValue]]: ...
- def _jit_pass_erase_number_types(graph: Graph) -> None: ...
- def _jit_pass_onnx_lint(graph: Graph) -> None: ...
- def _jit_pass_onnx(
- graph: Graph,
- _jit_pass_onnx: _onnx.OperatorExportTypes,
- ) -> Graph: ...
- def _jit_pass_onnx_scalar_type_analysis(
- graph: Graph,
- lowprecision_cast: _bool,
- opset_version: _int,
- ) -> None: ...
- def _jit_pass_onnx_peephole(
- graph: Graph,
- opset_version: _int,
- fixed_batch_size: _bool,
- ) -> None: ...
- def _jit_pass_dce_allow_deleting_nodes_with_side_effects(graph: Graph) -> None: ...
- def _jit_pass_onnx_function_substitution(graph: Graph) -> None: ...
- def _jit_pass_onnx_function_extraction(
- graph: Graph,
- module_names: Set[str],
- param_names: List[str],
- ) -> Dict[Node, Dict[str, str]]: ...
- def _jit_pass_onnx_clear_scope_records() -> None: ...
- def _jit_pass_onnx_track_scope_attributes(
- graph: Graph,
- onnx_attrs: Dict[str, Any],
- ) -> None: ...
- def _jit_is_onnx_log_enabled() -> _bool: ...
- def _jit_set_onnx_log_enabled(enabled: _bool) -> None: ...
- def _jit_set_onnx_log_output_stream(stream_name: str) -> None: ...
- def _jit_onnx_log(*args: Any) -> None: ...
- def _jit_pass_lower_graph(graph: Graph, m: Module) -> Tuple[Graph, List[IValue]]: ...
- def _jit_pass_inline_fork_wait(graph: Graph) -> None: ...
- def _jit_pass_onnx_deduplicate_initializers(
- graph: Graph,
- params_dict: Dict[str, IValue],
- is_train: _bool,
- ) -> Dict[str, IValue]: ...
- def _jit_pass_onnx_eval_peephole(
- graph: Graph,
- paramsDict: Dict[str, IValue],
- ) -> Dict[str, IValue]: ...
- def _jit_pass_onnx_constant_fold(
- graph: Graph,
- paramsDict: Dict[str, IValue],
- opset_version: _int,
- ) -> Dict[str, IValue]: ...
- def _jit_pass_onnx_eliminate_unused_items(
- graph: Graph,
- paramsDict: Dict[str, IValue],
- ) -> Dict[str, IValue]: ...
- def _jit_pass_onnx_cast_all_constant_to_floating(graph: Graph) -> None: ...
- def _jit_pass_filter_non_tensor_arguments(
- params: Dict[str, IValue],
- ) -> Dict[str, Tensor]: ...
- def _jit_decay_packed_param_input_types(graph: Graph) -> None: ...
- def _jit_pass_onnx_node_shape_type_inference(
- n: Node,
- paramsDict: Dict[str, IValue],
- opset_version: _int,
- ) -> None: ...
- def _jit_onnx_convert_pattern_from_subblock(
- block: Block,
- n: Node,
- env: Dict[Value, Value],
- values_in_env: Set[Value],
- ) -> List[Value]: ...
- def _jit_pass_onnx_block(
- old_block: Block,
- new_block: Block,
- operator_export_type: _onnx.OperatorExportTypes,
- env: Dict[Value, Value],
- values_in_env: Set[Value],
- is_sub_block: _bool,
- ) -> Dict[Value, Value]: ...
- def _jit_pass_onnx_assign_scoped_names_for_node_and_value(graph: Graph) -> None: ...
- def _jit_pass_fixup_onnx_controlflow_node(
- n: Node,
- opset_version: _int,
- ) -> List[Value]: ...
- def _jit_onnx_create_full_scope_name(class_name: str, variable_name: str) -> str: ...
- def _compile_graph_to_code_table(name: str, graph: Graph) -> IValue: ...
- def _generate_upgraders_graph() -> Dict[str, Graph]: ...
- def _calculate_package_version_based_on_upgraders(val: _bool): ...
- def _get_version_calculator_flag() -> _bool: ...
- def _jit_script_interface_compile(
- name: str,
- class_def: ClassDef,
- rcb: ResolutionCallback,
- is_module: _bool,
- ): ...
- def _jit_script_compile_overload(
- qualname: str,
- overload_decl: Decl,
- implementation_def: Def,
- rcb: ResolutionCallback,
- implementation_defaults: Dict[str, Any],
- signature: Any,
- ): ...
- def _jit_script_compile(
- qual_name: str,
- definition: Def,
- rcb: ResolutionCallback,
- defaults: Dict[str, Any],
- ): ...
- def _jit_script_class_compile(
- qual_name: str,
- definition: ClassDef,
- defaults: Dict[str, Dict[str, Any]],
- rcb: ResolutionCallback,
- ): ...
- def _parse_source_def(src: str) -> Def: ...
- def import_ir_module(
- cu: CompilationUnit,
- filename: Union[str, Path],
- map_location: Optional[DeviceLikeType],
- extra_files: Dict[str, Any],
- ) -> ScriptModule: ...
- def import_ir_module_from_buffer(
- cu: CompilationUnit,
- buffer: BinaryIO,
- map_location: Optional[DeviceLikeType],
- extra_files: Dict[str, Any],
- ) -> ScriptModule: ...
- def _import_ir_module_from_package(
- cu: CompilationUnit,
- reader: PyTorchFileReader,
- storage_context: DeserializationStorageContext,
- map_location: Optional[DeviceLikeType],
- ts_id: str,
- ) -> ScriptModule: ...
- def _assign_output_shapes(graph: Graph, inputs: List[Tensor]) -> Graph: ...
- def _check_onnx_proto(proto: str) -> None: ...
- def _propagate_and_assign_input_shapes(
- graph: Graph,
- inputs: Tuple[Tensor, ...],
- param_count_list: List[_int],
- with_grad: _bool,
- propagate: _bool,
- ) -> Graph: ...
- # Defined in torch/csrc/jit/runtime/graph_executor.h
- class GraphExecutorState: ...
- # Defined in torch/torch/csrc/jit/ir/alias_analysis.h
- class AliasDb:
- def __str__(self) -> str: ...
- class _InsertPoint:
- def __enter__(self) -> None: ...
- def __exit__(self, *args) -> None: ...
- # Defined in torch/csrc/jit/ir/ir.h
- class Use:
- @property
- def user(self) -> Node: ...
- @property
- def offset(self) -> _int: ...
- def isAfter(self, other: Use) -> _bool: ...
- # Defined in torch/csrc/jit/ir/ir.h
- class Value:
- def type(self) -> JitType: ...
- def setType(self, t: JitType) -> Value: ...
- def setTypeAs(self, other: Value) -> Value: ...
- def inferTypeFrom(self, t: Tensor) -> None: ...
- def debugName(self) -> str: ...
- def setDebugName(self, name: str) -> None: ...
- def unique(self) -> _int: ...
- def offset(self) -> _int: ...
- def node(self) -> Node: ...
- def uses(self) -> List[Use]: ...
- def replaceAllUsesWith(self, val: Value) -> None: ...
- def replaceAllUsesAfterNodeWith(self, node: Node, val: Value) -> None: ...
- def requires_grad(self) -> _bool: ...
- def requiresGrad(self) -> _bool: ...
- def copyMetadata(self, other: Value) -> Value: ...
- def isCompleteTensor(self) -> _bool: ...
- def toIValue(self) -> IValue: ...
- # Defined in torch/csrc/jit/ir/ir.h
- class Block:
- def inputs(self) -> Iterator[Value]: ...
- def outputs(self) -> Iterator[Value]: ...
- def nodes(self) -> Iterator[Node]: ...
- def paramNode(self) -> Node: ...
- def returnNode(self) -> Node: ...
- def owningNode(self) -> Node: ...
- def registerOutput(self, n: Value) -> _int: ...
- def addNode(self, name: str, inputs: Sequence[Value]) -> Node: ...
- # Defined in torch/csrc/jit/ir/ir.h
- class Node:
- def __getitem__(self, key: str) -> Any: ...
- def schema(self) -> str: ...
- def input(self) -> Value: ...
- def inputs(self) -> Iterator[Value]: ...
- def inputsAt(self, idx: _int) -> Value: ...
- def inputsSize(self) -> _int: ...
- def output(self) -> Value: ...
- def outputs(self) -> Iterator[Value]: ...
- def outputsAt(self, idx: _int) -> Value: ...
- def outputsSize(self) -> _int: ...
- def hasMultipleOutputs(self) -> _bool: ...
- def blocks(self) -> List[Block]: ...
- def addBlock(self) -> Block: ...
- def mustBeNone(self) -> _bool: ...
- def matches(self, pattern: str) -> _bool: ...
- def kind(self) -> str: ...
- def kindOf(self, name: str) -> str: ...
- def addInput(self, name: str) -> Value: ...
- def replaceInput(self, i: _int, newValue: Value) -> Value: ...
- def replaceInputWith(self, from_: Value, to: Value) -> None: ...
- def replaceAllUsesWith(self, n: Node) -> None: ...
- def insertBefore(self, n: Node) -> Node: ...
- def insertAfter(self, n: Node) -> Node: ...
- def isBefore(self, n: Node) -> _bool: ...
- def isAfter(self, n: Node) -> _bool: ...
- def moveBefore(self, n: Node) -> None: ...
- def moveAfter(self, n: Node) -> None: ...
- def removeInput(self, i: _int) -> None: ...
- def removeAllInputs(self, i: _int) -> None: ...
- def hasUses(self) -> _bool: ...
- def eraseOutput(self, i: _int) -> None: ...
- def addOutput(self) -> Value: ...
- def scopeName(self) -> str: ...
- def isNondeterministic(self) -> _bool: ...
- def copyAttributes(self, rhs: Node) -> Node: ...
- def copyMetadata(self, rhs: Node) -> Node: ...
- def hasAttributes(self) -> _bool: ...
- def hasAttribute(self, name: str) -> _bool: ...
- def removeAttribute(self, attr: str) -> Node: ...
- def namedInput(self, name: str) -> Value: ...
- def sourceRange(self) -> SourceRange: ...
- def owningBlock(self) -> Block: ...
- def findNode(self, kind: str, recurse: _bool = True) -> Node: ...
- def findAllNodes(self, kind: str, recurse: _bool = True) -> List[Node]: ...
- def getModuleHierarchy(self) -> str: ...
- def prev(self) -> Node: ...
- def destroy(self) -> None: ...
- def attributeNames(self) -> List[str]: ...
- # Accessors for attributes as types.
- def f(self, name: str) -> _float: ...
- def f_(self, name: str, val: _float) -> Node: ...
- def fs(self, name: str) -> List[_float]: ...
- def fs_(self, name: str, val: List[_float]) -> Node: ...
- def c(self, name: str) -> complex: ...
- def c_(self, name: str, val: complex) -> Node: ...
- def s(self, name: str) -> str: ...
- def s_(self, name: str, val: str) -> Node: ...
- def ss(self, name: str) -> List[str]: ...
- def ss_(self, name: str, val: List[str]) -> Node: ...
- def i(self, name: str) -> _int: ...
- def i_(self, name: str, val: _int) -> Node: ...
- # Cannot define "is" like this because it's a reserved keyword in python.
- # def is(self, name: str) -> List[_int]: ...
- # def is_(self, name: str, val: List[_int]) -> Node: ...
- def g(self, name: str) -> Graph: ...
- def g_(self, name: str, val: Graph) -> Node: ...
- def gs(self, name: str) -> List[Graph]: ...
- def gs_(self, name: str, val: List[Graph]) -> Node: ...
- def ival(self, name: str) -> IValue: ...
- def ival_(self, name: str, val: IValue) -> Node: ...
- def t(self, name: str) -> Tensor: ...
- def t_(self, name: str, val: Tensor) -> Node: ...
- def ts(self, name: str) -> List[Tensor]: ...
- def ts_(self, name: str, val: List[Tensor]) -> Node: ...
- def ty(self, name: str) -> JitType: ...
- def ty_(self, name: str, val: JitType) -> Node: ...
- def tys(self, name: str) -> List[JitType]: ...
- def tys_(self, name: str, val: List[JitType]) -> Node: ...
- # Defined in torch/torch/csrc/jit/ir/ir.h
- class Graph:
- def inputs(self) -> Iterator[Value]: ...
- def outputs(self) -> Iterator[Value]: ...
- def nodes(self) -> Iterator[Node]: ...
- def param_node(self) -> Node: ...
- def return_node(self) -> Node: ...
- def addInput(self, name: str = "") -> Value: ...
- def eraseInput(self, i: _int) -> None: ...
- def registerOutput(self, n: Value) -> _int: ...
- def eraseOutput(self, i: _int) -> None: ...
- def create(self, name: str, args, num_outputs: _int) -> Node: ...
- def appendNode(self, n: Node) -> Node: ...
- def prependNode(self, n: Node) -> Node: ...
- def insertNode(self, n: Node) -> Node: ...
- def block(self) -> Block: ...
- def lint(self) -> None: ...
- def alias_db(self) -> AliasDb: ...
- def setInsertPoint(self, n: Union[Block, Node]) -> None: ...
- def insert_point_guard(self, n: Union[Block, Node]) -> _InsertPoint: ...
- def insertPoint(self) -> Node: ...
- def insertGraph(self, callee: Graph, inputs: List[Value]) -> List[Value]: ...
- def makeMultiOutputIntoTuple(self) -> None: ...
- def copy(self) -> Graph: ...
- # Defined in torch/aten/src/ATen/core/alias_info.h
- class AliasInfo:
- is_write: _bool
- before_set: Set[str]
- after_set: Set[str]
- # Defined in torch/aten/src/ATen/core/function_schema.h
- class Argument:
- name: str
- type: JitType
- default_value: Optional[Any]
- def has_default_value(self) -> _bool: ...
- kwarg_only: _bool
- is_out: _bool
- alias_info: Optional[AliasInfo]
- class FunctionSchema:
- arguments: List[Argument]
- returns: List[Argument]
- name: str
- overload_name: str
- is_mutable: _bool
- class _UpgraderEntry:
- bumped_at_version: _int
- upgrader_name: str
- old_schema: str
- def __init__(
- self,
- bumped_at_version: _int,
- upgrader_name: str,
- old_schema: str,
- ) -> None: ...
- class _UpgraderRange:
- min_version: _int
- max_version: _int
- def _get_max_operator_version() -> _int: ...
- def _get_operator_version_map() -> Dict[str, List[_UpgraderEntry]]: ...
- def _get_upgrader_ranges(name: str) -> List[_UpgraderRange]: ...
- def _test_only_add_entry_to_op_version(op_name: str, entry: _UpgraderEntry) -> None: ...
- def _test_only_remove_entry_to_op_version(op_name: str) -> None: ...
- # Defined in torch/csrc/jit/python/script_init.cpp
- class ScriptModuleSerializer:
- def __init__(self, export_writer: PyTorchFileWriter) -> None: ...
- def serialize(self, model: ScriptModule, script_module_id: _int) -> None: ...
- def write_files(self) -> None: ...
- def storage_context(self) -> SerializationStorageContext: ...
- # Defined in torch/csrc/jit/python/script_init.cpp
- class SerializationStorageContext:
- def __init__(self) -> None: ...
- def has_storage(self, storage: Storage) -> _bool: ...
- def get_or_add_storage(self, storage: Storage) -> _int: ...
- # Defined in torch/csrc/jit/python/script_init.cpp
- class DeserializationStorageContext:
- def __init__(self) -> None: ...
- def get_storage(self, name: str, dtype: _dtype) -> Tensor: ...
- def has_storage(self, name: str) -> _bool: ...
- def add_storage(self, name: str, tensor: Tensor) -> _int: ...
- # Defined in torch/csrc/jit/python/script_init.cpp
- class ConcreteModuleTypeBuilder:
- def __init__(self, obj: Any) -> None: ...
- def set_module_dict(self): ...
- def set_module_list(self): ...
- def set_parameter_list(self): ...
- def set_parameter_dict(self): ...
- def add_attribute(
- self,
- name: str,
- ty: JitType,
- is_param: _bool,
- is_buffer: _bool,
- ): ...
- def add_module(self, name: str, meta: ConcreteModuleType): ...
- def add_constant(self, name: str, value: Any): ...
- def add_overload(self, method_name: str, overloaded_method_names: List[str]): ...
- def add_builtin_function(self, name: str, symbol_name: str): ...
- def add_failed_attribute(self, name: str, failure_reason: str): ...
- def add_function_attribute(
- self,
- name: str,
- ty: JitType,
- func: Callable[..., Any],
- ): ...
- def add_ignored_attribute(self, name: str): ...
- def add_ignored_attributes(self, names: List[str]): ...
- def add_forward_hook(self, hook: Callable[..., Any]): ...
- def add_forward_pre_hook(self, pre_hook: Callable[..., Any]): ...
- class ConcreteModuleType:
- def get_constants(self) -> Dict[str, Any]: ...
- def equals(self, other: ConcreteModuleType) -> _bool: ...
- @staticmethod
- def from_jit_type(ty: JitType) -> ConcreteModuleType: ...
- class CallStack:
- def __init__(self, name: str, range: SourceRange): ...
- class ErrorReport:
- def __init__(self, range: SourceRange) -> None: ...
- def what(self) -> str: ...
- @staticmethod
- def call_stack() -> str: ...
- class CompilationUnit:
- def __init__(self, lang: str = ..., _frames_up: _int = ...) -> None: ...
- def find_function(self, name: str) -> ScriptFunction: ...
- def __getattr__(self, name: str) -> ScriptFunction: ...
- def define(
- self,
- script: str,
- rcb: ResolutionCallback = ...,
- _frames_up: _int = ...,
- ): ...
- def get_interface(self, name: str) -> InterfaceType: ...
- def get_functions(self) -> List[ScriptFunction]: ...
- def create_function(
- self,
- name: str,
- graph: Graph,
- shouldMangle: _bool = ...,
- ) -> ScriptFunction: ...
- def get_class(self, name: str) -> ClassType: ...
- class ScriptObject:
- def setattr(self, name: str, value: Any): ...
- class ScriptModule(ScriptObject):
- def _method_names(self) -> List[str]: ...
- def _get_method(self, name: str) -> ScriptMethod: ...
- class LiteScriptModule:
- def __call__(self, *input): ...
- def find_method(self, method_name: str): ...
- def forward(self, *input) -> List[str]: ...
- def run_method(self, method_name: str, *input): ...
- # NOTE: switch to collections.abc.Callable in python 3.9
- class ScriptFunction(Generic[P, ReturnVal]):
- def __call__(self, *args: P.args, **kwargs: P.kwargs) -> ReturnVal: ...
- def save(self, filename: str, _extra_files: Dict[str, bytes]) -> None: ...
- def save_to_buffer(self, _extra_files: Dict[str, bytes]) -> bytes: ...
- @property
- def graph(self) -> Graph: ...
- def inlined_graph(self) -> Graph: ...
- def schema(self) -> FunctionSchema: ...
- def code(self) -> str: ...
- def name(self) -> str: ...
- @property
- def qualified_name(self) -> str: ...
- # NOTE: switch to collections.abc.Callable in python 3.9
- class ScriptMethod(Generic[P, ReturnVal]):
- graph: Graph
- def __call__(self, *args: P.args, **kwargs: P.kwargs) -> ReturnVal: ...
- @property
- def owner(self) -> ScriptModule: ...
- @property
- def name(self) -> str: ...
- class ScriptDict(Generic[K, T]):
- def __init__(self, dict: Dict[K, T]) -> None: ...
- def __len__(self) -> _int: ...
- def __contains__(self, key: K) -> _bool: ...
- def __getitem__(self, key: K) -> T: ...
- def __setitem__(self, key: K, value: T) -> None: ...
- def __delitem__(self, key: K) -> None: ...
- def __iter__(self) -> Iterator[K]: ...
- def items(self) -> Iterator[tuple[K, T]]: ...
- def keys(self) -> Iterator[K]: ...
- class ScriptList(Generic[T]):
- def __init__(self, list: List[T]) -> None: ...
- def __len__(self) -> _int: ...
- def __contains__(self, item: T) -> _bool: ...
- @overload
- def __getitem__(self, idx: _int) -> T: ...
- @overload
- def __getitem__(self, idx: slice) -> ScriptList[T]: ...
- @overload
- def __setitem__(self, idx: _int, value: T) -> None: ...
- @overload
- def __setitem__(self, idx: slice, value: List[T]) -> None: ...
- def __delitem__(self, idx: _int) -> None: ...
- def __iter__(self) -> Iterator[T]: ...
- def count(self, value: T) -> _int: ...
- def remove(self, value: T) -> None: ...
- def append(self, value: T) -> None: ...
- def clear(self) -> None: ...
- @overload
- def extend(self, values: List[T]) -> None: ...
- @overload
- def extend(self, values: Iterable[T]) -> None: ...
- @overload
- def pop(self) -> T: ...
- @overload
- def pop(self, idx: _int) -> T: ...
- class ModuleDict:
- def __init__(self, mod: ScriptModule) -> None: ...
- def items(self) -> List[Tuple[str, Any]]: ...
- class ParameterDict:
- def __init__(self, mod: ScriptModule) -> None: ...
- class BufferDict:
- def __init__(self, mod: ScriptModule) -> None: ...
- # Defined in torch/csrc/jit/api/module.h
- class Module: ...
- # Defined in torch/csrc/Module.cpp
- def _initExtension(shm_manager_path: str) -> None: ... # THPModule_initExtension
- def _autograd_init() -> _bool: ... # THPAutograd_initExtension
- def _add_docstr(obj: T, doc_obj: str) -> T: ... # THPModule_addDocStr
- def _init_names(arg: Sequence[Type]) -> None: ... # THPModule_initNames
- def _has_distributed() -> _bool: ... # THPModule_hasDistributed
- def _set_default_tensor_type(type) -> None: ... # THPModule_setDefaultTensorType
- def _set_default_dtype(d: _dtype) -> None: ... # THPModule_setDefaultDtype
- def _infer_size(arg1: Size, arg2: Size) -> Size: ... # THPModule_inferSize
- def _crash_if_csrc_asan() -> _int: ... # THPModule_crashIfCsrcASAN
- def _crash_if_csrc_ubsan() -> _int: ... # THPModule_crashIfCsrcUBSAN
- def _crash_if_aten_asan() -> _int: ... # THPModule_crashIfATenASAN
- def _show_config() -> str: ... # THPModule_showConfig
- def _cxx_flags() -> str: ... # THPModule_cxxFlags
- def _parallel_info() -> str: ... # THPModule_parallelInfo
- def _get_cpu_capability() -> str: ... # THPModule_getCpuCapability
- def _set_backcompat_broadcast_warn(
- arg: _bool,
- ) -> None: ... # THPModule_setBackcompatBroadcastWarn
- def _get_backcompat_broadcast_warn() -> _bool: ... # THPModule_getBackcompatBroadcastWarn
- def _set_backcompat_keepdim_warn(
- arg: _bool,
- ) -> None: ... # THPModule_setBackcompatKeepdimWarn
- def _get_backcompat_keepdim_warn() -> _bool: ... # THPModule_getBackcompatKeepdimWarn
- def get_num_thread() -> _int: ... # THPModule_getNumThreads
- def set_num_threads(nthreads: _int) -> None: ... # THPModule_setNumThreads
- def get_num_interop_threads() -> _int: ... # THPModule_getNumInteropThreads
- def set_num_interop_threads(
- nthreads: _int,
- ) -> None: ... # THPModule_setNumInteropThreads
- def _get_cudnn_enabled() -> _bool: ... # THPModule_userEnabledCuDNN
- def _set_cudnn_enabled(arg: _bool) -> None: ... # THPModule_setUserEnabledCuDNN
- def _get_flash_sdp_enabled() -> _bool: ... # THPModule_userEnabledFusedSDP
- def _set_sdp_use_flash(arg: _bool) -> None: ... # THPModule_setSDPUseFlash
- def _get_mem_efficient_sdp_enabled() -> _bool: ... # THPModule_userEnabledMathSDP
- def _set_sdp_use_mem_efficient(
- arg: _bool,
- ) -> None: ... # THPModule_setSDPUseMemEfficient
- def _get_math_sdp_enabled() -> _bool: ... # THPModule_userEnabledMathSDP
- def _set_sdp_use_math(arg: _bool) -> None: ... # THPModule_setSDPUseMath
- def _get_cudnn_sdp_enabled() -> _bool: ... # THPModule_userEnabledMathSDP
- def _set_sdp_use_cudnn(arg: _bool) -> None: ... # THPModule_setSDPUseMath
- def _get_mkldnn_enabled() -> _bool: ... # THPModule_userEnabledMkldnn
- def _set_mkldnn_enabled(arg: _bool) -> None: ... # THPModule_setUserEnabledMkldnn
- def _get_cudnn_benchmark() -> _bool: ... # THPModule_benchmarkCuDNN
- def _set_cudnn_benchmark(arg: _bool) -> None: ... # THPModule_setBenchmarkCuDNN
- def _get_cudnn_deterministic() -> _bool: ... # THPModule_deterministicCuDNN
- def _set_cudnn_deterministic(arg: _bool) -> None: ... # THPModule_setDeterministicCuDNN
- def _get_deterministic_algorithms() -> _bool: ... # THPModule_deterministicAlgorithms
- def _get_deterministic_algorithms_warn_only() -> _bool: ... # THPModule_deterministicAlgorithmsWarnOnly
- def _set_deterministic_algorithms(
- mode: _bool,
- *,
- warn_only: _bool = ...,
- ) -> None: ... # THPModule_setDeterministicAlgorithms
- def _get_deterministic_fill_uninitialized_memory() -> _bool: ... # THPModule_deterministicFillUninitializedMemory
- def _set_deterministic_fill_uninitialized_memory(arg: _bool) -> None: ... # THPModule_setDeterministicFillUninitializedMemory
- def _get_nnpack_enabled() -> _bool: ... # THPModule_userEnabledNNPACK
- def _set_nnpack_enabled(arg: _bool) -> None: ... # THPModule_setUserEnabledNNPACK
- def _get_warnAlways() -> _bool: ... # THPModule_warnAlways
- def _set_warnAlways(arg: _bool) -> None: ... # THPModule_setWarnAlways
- def _get_cudnn_allow_tf32() -> _bool: ... # THPModule_allowTF32CuDNN
- def _set_cudnn_allow_tf32(arg: _bool) -> None: ... # THPModule_setAllowTF32CuDNN
- def _get_cublas_allow_tf32() -> _bool: ... # THPModule_allowTF32CuBLAS
- def _set_cublas_allow_tf32(arg: _bool) -> None: ... # THPModule_setAllowTF32CuBLAS
- def _get_float32_matmul_precision() -> str: ... # THPModule_float32MatmulPrecision
- def _set_float32_matmul_precision(
- arg: str,
- ) -> None: ... # THPModule_setFloat32MatmulPrecision
- def _get_cublas_allow_fp16_reduced_precision_reduction() -> _bool: ... # THPModule_allowFP16ReductionCuBLAS
- def _set_cublas_allow_fp16_reduced_precision_reduction(
- arg: _bool,
- ) -> None: ... # THPModule_setAllowFP16ReductionCuBLAS
- def _get_cublas_allow_bf16_reduced_precision_reduction() -> _bool: ... # THPModule_allowBF16ReductionCuBLAS
- def _set_cublas_allow_bf16_reduced_precision_reduction(
- arg: _bool,
- ) -> None: ... # THPModule_setAllowBF16ReductionCuBLAS
- def _set_conj(x: Tensor, conj: _bool) -> None: ...
- def _set_neg(x: Tensor, neg: _bool) -> None: ...
- def _set_meta_in_tls_dispatch_include(meta_in_tls: _bool) -> None: ...
- def _meta_in_tls_dispatch_include() -> _bool: ...
- def _stash_obj_in_tls(key: str, arg: Any) -> None: ...
- def _get_obj_in_tls(key: str) -> Any: ...
- def _is_key_in_tls(key: str) -> _bool: ...
- def _select_batch_norm_backend(*args, **kwargs) -> BatchNormBackend: ...
- def _select_conv_backend(*args, **kwargs) -> ConvBackend: ...
- def _conv_determine_backend_memory_format(
- input: Tensor,
- weight: Tensor,
- backend: ConvBackend,
- ) -> memory_format: ...
- def _has_storage(x: Tensor) -> _bool: ...
- def _construct_storage_from_data_pointer(data_ptr: _int, device: torch.device, size: _int) -> Storage: ...
- def _should_allow_numbers_as_tensors(func_name: str) -> _bool: ...
- def _group_tensors_by_device_and_dtype(nested_tensorlists: List[List[Optional[Tensor]]], with_indices: _bool = False) -> Dict[Tuple[torch.device, torch.dtype], Tuple[List[List[Optional[Tensor]]], List[_int]]]: ...
- # NB: There is no Capsule type in typing, see
- # https://code.activestate.com/lists/python-dev/139675/
- def _to_dlpack(data: Tensor) -> Any: ... # THPModule_toDLPack
- def _from_dlpack(data: Any) -> Tensor: ... # THPModule_fromDLPack
- def _get_cpp_backtrace(
- frames_to_skip: _int,
- maximum_number_of_frames: _int,
- ) -> str: ... # THPModule_getCppBacktrace
- def set_flush_denormal(arg: _bool) -> _bool: ... # THPModule_setFlushDenormal
- def get_default_dtype() -> _dtype: ... # THPModule_getDefaultDtype
- def _get_default_device() -> str: ... # THPModule_getDefaultDevice
- def _get_qengine() -> _int: ... # THPModule_qEngine
- def _set_qengine(qengine: _int) -> None: ... # THPModule_setQEngine
- def _supported_qengines() -> List[_int]: ... # THPModule_supportedQEngines
- def _is_xnnpack_enabled() -> _bool: ... # THPModule_isEnabledXNNPACK
- def _check_sparse_tensor_invariants() -> _bool: ... # THPModule_checkSparseTensorInvariants
- def _set_check_sparse_tensor_invariants(
- arg: _bool,
- ) -> None: ... # THPModule_setCheckSparseTensorInvariants
- def _set_default_mobile_cpu_allocator() -> None: ... # THPModule_setDefaultMobileCPUAllocator
- def _unset_default_mobile_cpu_allocator() -> None: ... # THPModule_unsetDefaultMobileCPUAllocator
- def _is_torch_function_enabled() -> _bool: ... # THPModule_isEnabledTorchFunction
- def _has_torch_function(
- args: Iterable[Any],
- ) -> _bool: ... # THPModule_has_torch_function
- def _has_torch_function_unary(Any) -> _bool: ... # THPModule_has_torch_function_unary
- def _has_torch_function_variadic(
- *args: Any,
- ) -> _bool: ... # THPModule_has_torch_function_variadic
- def _vmapmode_increment_nesting() -> _int: ... # THPModule_vmapmode_increment_nesting
- def _vmapmode_decrement_nesting() -> _int: ... # THPModule_vmapmode_decrement_nesting
- def _log_api_usage_once(str) -> None: ... # LogAPIUsageOnceFromPython
- def _log_api_usage_metadata(event: str, metadata_map: Dict[str, str]) -> None: ... # LogAPIUsageMetadataFromPython
- def _demangle(str) -> str: ... # c10::demangle
- def _disabled_torch_function_impl(
- func: Callable,
- types: Iterable[Type],
- args: Tuple,
- kwargs: Dict,
- ) -> Any: ... # THPModule_disable_torch_function
- def _disabled_torch_dispatch_impl(
- func: Callable,
- types: Iterable[Type],
- args: Tuple,
- kwargs: Dict,
- ) -> Any: ... # THPModule_disable_dispatch_function
- def _get_linalg_preferred_backend() -> torch._C._LinalgBackend: ...
- def _set_linalg_preferred_backend(arg: torch._C._LinalgBackend): ...
- class _LinalgBackend:
- Default: _LinalgBackend
- Cusolver: _LinalgBackend
- Magma: _LinalgBackend
- class BatchNormBackend(Enum): ...
- def _get_blas_preferred_backend() -> torch._C._BlasBackend: ...
- def _set_blas_preferred_backend(arg: torch._C._BlasBackend): ...
- class _BlasBackend:
- Cublas: _BlasBackend
- Cublaslt: _BlasBackend
- class ConvBackend(Enum): ...
- class Tag(Enum):
- core: _int = 0
- data_dependent_output: _int = 1
- dynamic_output_shape: _int = 2
- generated: _int = 3
- inplace_view: _int = 4
- needs_fixed_stride_order: _int = 5
- nondeterministic_bitwise: _int = 6
- nondeterministic_seeded: _int = 7
- pointwise: _int = 8
- pt2_compliant_tag: _int = 9
- view_copy: _int = 10
- # Defined in `valgrind.h` and `callgrind.h` respectively.
- def _valgrind_supported_platform() -> _bool: ... # NVALGRIND
- def _valgrind_toggle() -> None: ... # CALLGRIND_TOGGLE_COLLECT
- def _valgrind_toggle_and_dump_stats() -> None: ... # CALLGRIND_TOGGLE_COLLECT and CALLGRIND_DUMP_STATS
- has_openmp: _bool
- has_mkl: _bool
- _has_mps: _bool
- has_lapack: _bool
- _has_cuda: _bool
- _has_magma: _bool
- _has_xpu: _bool
- _has_mkldnn: _bool
- _has_cudnn: _bool
- has_spectral: _bool
- _GLIBCXX_USE_CXX11_ABI: _bool
- default_generator: Generator
- # Defined in torch/csrc/autograd/init.cpp
- def _set_grad_enabled(enabled: _bool) -> None: ...
- def is_grad_enabled() -> _bool: ...
- def _set_fwd_grad_enabled(enabled: _bool) -> None: ...
- def _is_fwd_grad_enabled() -> _bool: ...
- def is_inference_mode_enabled() -> _bool: ...
- @overload
- def set_autocast_enabled(device_type: str, enabled: _bool) -> None: ...
- @overload
- def set_autocast_enabled(enabled: _bool) -> None: ...
- @overload
- def is_autocast_enabled(device_type: str) -> _bool: ...
- @overload
- def is_autocast_enabled() -> _bool: ...
- def set_autocast_dtype(device_type: str, dtype: _dtype) -> None: ...
- def get_autocast_dtype(device_type: str) -> _dtype: ...
- def clear_autocast_cache() -> None: ...
- def set_autocast_cpu_enabled(enabled: _bool) -> None: ...
- def is_autocast_cpu_enabled() -> _bool: ...
- def _is_any_autocast_enabled() -> _bool: ...
- def _is_autocast_available(device_type: str) -> _bool: ...
- def set_autocast_cpu_dtype(dtype: _dtype) -> None: ...
- def set_autocast_gpu_dtype(dtype: _dtype) -> None: ...
- def get_autocast_cpu_dtype() -> _dtype: ...
- def get_autocast_gpu_dtype() -> _dtype: ...
- def autocast_increment_nesting() -> _int: ...
- def autocast_decrement_nesting() -> _int: ...
- def is_autocast_cache_enabled() -> _bool: ...
- def set_autocast_cache_enabled(enabled: _bool) -> None: ...
- def _increment_version(tensor: Tensor) -> None: ...
- def set_anomaly_enabled(enabled: _bool, check_nan: _bool = True) -> None: ...
- def is_anomaly_enabled() -> _bool: ...
- def is_anomaly_check_nan_enabled() -> _bool: ...
- def _is_multithreading_enabled() -> _bool: ...
- def _set_multithreading_enabled(enabled: _bool) -> None: ...
- def _set_view_replay_enabled(enabled: _bool) -> None: ...
- def _is_view_replay_enabled() -> _bool: ...
- def _enter_dual_level() -> _int: ...
- def _exit_dual_level(level: _int) -> None: ...
- def _make_dual(tensor: Tensor, tangent: Tensor, level: _int) -> Tensor: ...
- def _unpack_dual(tensor: Tensor, level: _int) -> Tensor: ...
- def __set_forward_AD_enabled(enabled: _bool) -> None: ...
- def __is_forward_AD_enabled() -> _bool: ...
- def _register_default_hooks(pack_hook: Callable, unpack_hook: Callable) -> None: ...
- def _reset_default_hooks() -> None: ...
- def _is_torch_function_mode_enabled() -> _bool: ...
- def _set_torch_function_mode(cls: Any) -> None: ...
- def _push_on_torch_function_stack(cls: Any) -> None: ...
- def _pop_torch_function_stack() -> Any: ...
- def _get_function_stack_at(idx: _int) -> Any: ...
- def _len_torch_function_stack() -> _int: ...
- def _set_torch_dispatch_mode(cls: Any) -> None: ...
- def _push_on_torch_dispatch_stack(cls: TorchDispatchMode) -> None: ...
- def _pop_torch_dispatch_stack(mode_key: Optional[torch._C._TorchDispatchModeKey] = None) -> Any: ...
- def _get_dispatch_mode(mode_key: Optional[torch._C._TorchDispatchModeKey]) -> Any: ...
- def _unset_dispatch_mode(mode: torch._C._TorchDispatchModeKey) -> Optional[TorchDispatchMode]: ...
- def _set_dispatch_mode(mode: TorchDispatchMode) -> None: ...
- def _get_dispatch_stack_at(idx: _int) -> Any: ...
- def _len_torch_dispatch_stack() -> _int: ...
- def _activate_gpu_trace() -> None: ...
- class _DisableTorchDispatch:
- def __init__(self): ...
- def __enter__(self): ...
- def __exit__(self, exc_type, exc_value, traceback): ...
- class _EnableTorchFunction:
- def __init__(self): ...
- def __enter__(self): ...
- def __exit__(self, exc_type, exc_value, traceback): ...
- class _EnablePythonDispatcher:
- def __init__(self): ...
- def __enter__(self): ...
- def __exit__(self, exc_type, exc_value, traceback): ...
- class _DisablePythonDispatcher:
- def __init__(self): ...
- def __enter__(self): ...
- def __exit__(self, exc_type, exc_value, traceback): ...
- class _EnablePreDispatch:
- def __init__(self): ...
- def __enter__(self): ...
- def __exit__(self, exc_type, exc_value, traceback): ...
- class _DisableFuncTorch:
- def __init__(self): ...
- def __enter__(self): ...
- def __exit__(self, exc_type, exc_value, traceback): ...
- class _DisableAutocast:
- def __init__(self): ...
- def __enter__(self): ...
- def __exit__(self, exc_type, exc_value, traceback): ...
- class _InferenceMode:
- def __init__(self, enabled: _bool): ...
- def __enter__(self): ...
- def __exit__(self, exc_type, exc_value, traceback): ...
- def _set_autograd_fallback_mode(mode: str) -> None: ...
- def _get_autograd_fallback_mode() -> str: ...
- # Defined in torch/csrc/jit/python/script_init.cpp
- class LoggerBase: ...
- class NoopLogger(LoggerBase): ...
- class LockingLogger(LoggerBase): ...
- class AggregationType(Enum):
- SUM = 0
- AVG = 1
- class FileCheck:
- def run(self, test_string: str) -> None: ...
- def check(self, test_string: str) -> FileCheck: ...
- def check_not(self, test_string: str) -> FileCheck: ...
- def check_same(self, test_string: str) -> FileCheck: ...
- def check_next(self, test_string: str) -> FileCheck: ...
- def check_count(
- self,
- test_string: str,
- count: _int,
- exactly: _bool = False,
- ) -> FileCheck: ...
- def check_dag(self, test_string: str) -> FileCheck: ...
- def check_source_highlighted(self, test_string: str) -> FileCheck: ...
- def check_regex(self, test_string: str) -> FileCheck: ...
- # Defined in torch/csrc/jit/python/init.cpp
- class PyTorchFileReader:
- @overload
- def __init__(self, name: str) -> None: ...
- @overload
- def __init__(self, buffer: BinaryIO) -> None: ...
- def get_record(self, name: str) -> bytes: ...
- def serialization_id(self) -> str: ...
- class PyTorchFileWriter:
- @overload
- def __init__(self, name: str) -> None: ...
- @overload
- def __init__(self, buffer: BinaryIO) -> None: ...
- def write_record(self, name: str, data: Union[Storage, bytes, _int], size: _int) -> None: ...
- def write_end_of_file(self) -> None: ...
- def set_min_version(self, version: _int) -> None: ...
- def get_all_written_records(self) -> List[str]: ...
- def archive_name(self) -> str: ...
- def serialization_id(self) -> str: ...
- def _jit_get_inline_everything_mode() -> _bool: ...
- def _jit_set_inline_everything_mode(enabled: _bool) -> None: ...
- def _jit_get_logging_option() -> str: ...
- def _jit_set_logging_option(option: str) -> None: ...
- def _jit_set_logging_stream(stream_name: str) -> None: ...
- def _jit_pass_cse(Graph) -> _bool: ...
- def _jit_pass_dce(Graph) -> None: ...
- def _jit_pass_lint(Graph) -> None: ...
- # Defined in torch/csrc/jit/python/python_custom_class.cpp
- def _get_custom_class_python_wrapper(name: str, attr: str) -> Any: ...
- # Defined in torch/csrc/Module.cpp
- def _rename_privateuse1_backend(backend: str) -> None: ...
- def _get_privateuse1_backend_name() -> str: ...
- # Defined in torch/csrc/Generator.cpp
- class Generator:
- device: _device
- def __init__(self, device: Optional[DeviceLikeType] = None) -> None: ...
- def __reduce__(self) -> Tuple[Type[Generator], Tuple[_device], Tuple[_int, Optional[_int], Tensor]]: ...
- def __setstate__(self, state: Tuple[_int, Optional[_int], Tensor]) -> None: ...
- def get_state(self) -> Tensor: ...
- def set_state(self, _new_state: Tensor) -> Generator: ...
- def clone_state(self) -> Generator: ...
- def graphsafe_get_state(self) -> Generator: ...
- def graphsafe_set_state(self, _new_state: Generator) -> Generator: ...
- def set_offset(self, offset: _int) -> Generator: ...
- def get_offset(self) -> _int: ...
- def manual_seed(self, seed: _int) -> Generator: ...
- def seed(self) -> _int: ...
- def initial_seed(self) -> _int: ...
- # Defined in torch/csrc/utils/python_dispatch.cpp
- class _DispatchOperatorHandle:
- def schema(self) -> FunctionSchema: ...
- def debug(self) -> str: ...
- class _DispatchModule:
- def def_(self, schema: str, alias: str = "") -> _DispatchModule: ...
- def def_legacy(self, schema: str) -> _DispatchModule: ...
- def def_name_t_t(
- self,
- name: str,
- dispatch: str,
- debug: str = "default_def_name_t_t",
- ) -> _DispatchModule: ...
- def def_schema_t_t(
- self,
- schema: str,
- dispatch: str,
- alias: str,
- debug: str = "default_def_schema_t_t",
- ) -> _DispatchModule: ...
- def impl_t_t(
- self,
- name: str,
- dispatch: str,
- debug: str = "impl_t_t",
- ) -> _DispatchModule: ...
- def impl(self, name: str, dispatch: str, func: Callable) -> _DispatchModule: ...
- def define(self, schema: str, alias: str = "") -> _DispatchModule: ...
- def fallback_fallthrough(self, dispatch: str = "") -> _DispatchModule: ...
- _after_ADInplaceOrView_keyset: DispatchKeySet
- _after_autograd_keyset: DispatchKeySet
- def _dispatch_library(
- kind: str,
- name: str,
- dispatch: str,
- file: str = "",
- linenum: Any = 0,
- ) -> _DispatchModule: ...
- def _dispatch_dump(name: str) -> str: ...
- def _dispatch_dump_table(name: str) -> str: ...
- def _dispatch_check_invariants(name: str) -> None: ...
- def _dispatch_check_all_invariants() -> None: ...
- def _dispatch_call_boxed(handle: _DispatchOperatorHandle, *args, **kwargs) -> Any: ...
- def _dispatch_find_schema_or_throw(name: str, overload_name: str) -> _DispatchOperatorHandle: ...
- def _dispatch_set_report_error_callback(handle: _DispatchOperatorHandle, callback: Callable) -> None: ...
- def _dispatch_has_kernel(name: str) -> _bool: ...
- def _dispatch_has_kernel_for_dispatch_key(
- name: str,
- dispatch: _dispatchkey,
- ) -> _bool: ...
- def _dispatch_has_kernel_for_any_dispatch_key(
- name: str,
- dispatch_key_set: DispatchKeySet,
- ) -> _bool: ...
- def _dispatch_kernel_for_dispatch_key_is_fallthrough(
- name: str,
- dispatch: _dispatchkey,
- ) -> _bool: ...
- def _dispatch_has_computed_kernel_for_dispatch_key(
- name: str,
- dispatch: _dispatchkey,
- ) -> _bool: ...
- def _dispatch_find_dangling_impls() -> List[str]: ...
- def _dispatch_get_all_op_names() -> List[str]: ...
- def _dispatch_tls_set_dispatch_key_excluded(
- dispatch: _dispatchkey,
- val: _bool,
- ) -> None: ...
- def _dispatch_tls_is_dispatch_key_excluded(dispatch: _dispatchkey) -> _bool: ...
- def _dispatch_tls_set_dispatch_key_included(
- dispatch: _dispatchkey,
- val: _bool,
- ) -> None: ...
- def _dispatch_tls_is_dispatch_key_included(dispatch: _dispatchkey) -> _bool: ...
- def _dispatch_isTensorSubclassLike(tensor: Tensor) -> _bool: ...
- def _dispatch_key_name(dispatch: _dispatchkey) -> str: ...
- def _dispatch_key_for_device(device_type: str) -> str: ...
- def _parse_dispatch_key(key: str) -> Optional[DispatchKey]: ...
- def _dispatch_key_parse(dispatch: _dispatchkey) -> DispatchKey: ...
- def _dispatch_num_backends() -> _int: ...
- def _dispatch_pystub(name: str, overload: str) -> Optional[Tuple[str, str]]: ...
- def _dispatch_is_alias_key(dispatch: _dispatchkey) -> _bool: ...
- def _functionality_to_backend_keys(dispatch: _dispatchkey) -> List[DispatchKey]: ...
- def _functionalization_reapply_views_tls() -> _bool: ...
- def _only_lift_cpu_tensors() -> _bool: ...
- def _set_only_lift_cpu_tensors(value: _bool) -> None: ...
- def _set_throw_on_mutable_data_ptr(tensor: Tensor) -> None: ...
- def _set_warn_deprecated_on_mutable_data_ptr(tensor: Tensor) -> None: ...
- class DispatchKey(Enum):
- Undefined: DispatchKey = ...
- FPGA: DispatchKey = ...
- MAIA: DispatchKey = ...
- Vulkan: DispatchKey = ...
- Metal: DispatchKey = ...
- MKLDNN: DispatchKey = ...
- OpenGL: DispatchKey = ...
- OpenCL: DispatchKey = ...
- IDEEP: DispatchKey = ...
- CustomRNGKeyId: DispatchKey = ...
- MkldnnCPU: DispatchKey = ...
- Sparse: DispatchKey = ...
- SparseCsr: DispatchKey = ...
- NestedTensor: DispatchKey = ...
- Dense: DispatchKey = ...
- PythonTLSSnapshot: DispatchKey = ...
- PreDispatch: DispatchKey = ...
- PythonDispatcher: DispatchKey = ...
- Python: DispatchKey = ...
- FuncTorchDynamicLayerBackMode: DispatchKey = ...
- ZeroTensor: DispatchKey = ...
- Conjugate: DispatchKey = ...
- Negative: DispatchKey = ...
- BackendSelect: DispatchKey = ...
- Named: DispatchKey = ...
- AutogradOther: DispatchKey = ...
- AutogradFunctionality: DispatchKey = ...
- AutogradNestedTensor: DispatchKey = ...
- Tracer: DispatchKey = ...
- Autocast: DispatchKey = ...
- AutocastCPU: DispatchKey = ...
- AutocastCUDA: DispatchKey = ...
- Batched: DispatchKey = ...
- VmapMode: DispatchKey = ...
- FuncTorchGradWrapper: DispatchKey = ...
- FuncTorchBatched: DispatchKey = ...
- BatchedNestedTensor: DispatchKey = ...
- FuncTorchVmapMode: DispatchKey = ...
- FuncTorchDynamicLayerFrontMode: DispatchKey = ...
- Functionalize: DispatchKey = ...
- TESTING_ONLY_GenericWrapper: DispatchKey = ...
- TESTING_ONLY_GenericMode: DispatchKey = ...
- ADInplaceOrView: DispatchKey = ...
- Autograd: DispatchKey = ...
- CompositeImplicitAutograd: DispatchKey = ...
- CompositeImplicitAutogradNestedTensor: DispatchKey = ...
- CompositeExplicitAutograd: DispatchKey = ...
- CompositeExplicitAutogradNonFunctional: DispatchKey = ...
- FuncTorchBatchedDecomposition: DispatchKey = ...
- CPU: DispatchKey = ...
- CUDA: DispatchKey = ...
- HIP: DispatchKey = ...
- XLA: DispatchKey = ...
- MTIA: DispatchKey = ...
- MPS: DispatchKey = ...
- IPU: DispatchKey = ...
- XPU: DispatchKey = ...
- HPU: DispatchKey = ...
- VE: DispatchKey = ...
- Lazy: DispatchKey = ...
- Meta: DispatchKey = ...
- PrivateUse1: DispatchKey = ...
- PrivateUse2: DispatchKey = ...
- PrivateUse3: DispatchKey = ...
- QuantizedCPU: DispatchKey = ...
- QuantizedCUDA: DispatchKey = ...
- QuantizedHIP: DispatchKey = ...
- QuantizedXLA: DispatchKey = ...
- QuantizedMTIA: DispatchKey = ...
- QuantizedMPS: DispatchKey = ...
- QuantizedIPU: DispatchKey = ...
- QuantizedXPU: DispatchKey = ...
- QuantizedHPU: DispatchKey = ...
- QuantizedVE: DispatchKey = ...
- QuantizedLazy: DispatchKey = ...
- QuantizedMeta: DispatchKey = ...
- QuantizedPrivateUse1: DispatchKey = ...
- QuantizedPrivateUse2: DispatchKey = ...
- QuantizedPrivateUse3: DispatchKey = ...
- SparseCPU: DispatchKey = ...
- SparseCUDA: DispatchKey = ...
- SparseHIP: DispatchKey = ...
- SparseXLA: DispatchKey = ...
- SparseMTIA: DispatchKey = ...
- SparseMPS: DispatchKey = ...
- SparseIPU: DispatchKey = ...
- SparseXPU: DispatchKey = ...
- SparseHPU: DispatchKey = ...
- SparseVE: DispatchKey = ...
- SparseLazy: DispatchKey = ...
- SparseMeta: DispatchKey = ...
- SparsePrivateUse1: DispatchKey = ...
- SparsePrivateUse2: DispatchKey = ...
- SparsePrivateUse3: DispatchKey = ...
- SparseCsrCPU: DispatchKey = ...
- SparseCsrCUDA: DispatchKey = ...
- SparseCsrHIP: DispatchKey = ...
- SparseCsrXLA: DispatchKey = ...
- SparseCsrMTIA: DispatchKey = ...
- SparseCsrMPS: DispatchKey = ...
- SparseCsrIPU: DispatchKey = ...
- SparseCsrXPU: DispatchKey = ...
- SparseCsrHPU: DispatchKey = ...
- SparseCsrVE: DispatchKey = ...
- SparseCsrLazy: DispatchKey = ...
- SparseCsrMeta: DispatchKey = ...
- SparseCsrPrivateUse1: DispatchKey = ...
- SparseCsrPrivateUse2: DispatchKey = ...
- SparseCsrPrivateUse3: DispatchKey = ...
- NestedTensorCPU: DispatchKey = ...
- NestedTensorCUDA: DispatchKey = ...
- NestedTensorHIP: DispatchKey = ...
- NestedTensorXLA: DispatchKey = ...
- NestedTensorMTIA: DispatchKey = ...
- NestedTensorMPS: DispatchKey = ...
- NestedTensorIPU: DispatchKey = ...
- NestedTensorXPU: DispatchKey = ...
- NestedTensorHPU: DispatchKey = ...
- NestedTensorVE: DispatchKey = ...
- NestedTensorLazy: DispatchKey = ...
- NestedTensorMeta: DispatchKey = ...
- NestedTensorPrivateUse1: DispatchKey = ...
- NestedTensorPrivateUse2: DispatchKey = ...
- NestedTensorPrivateUse3: DispatchKey = ...
- AutogradCPU: DispatchKey = ...
- AutogradCUDA: DispatchKey = ...
- AutogradHIP: DispatchKey = ...
- AutogradXLA: DispatchKey = ...
- AutogradMTIA: DispatchKey = ...
- AutogradMPS: DispatchKey = ...
- AutogradIPU: DispatchKey = ...
- AutogradXPU: DispatchKey = ...
- AutogradHPU: DispatchKey = ...
- AutogradVE: DispatchKey = ...
- AutogradLazy: DispatchKey = ...
- AutogradMeta: DispatchKey = ...
- AutogradPrivateUse1: DispatchKey = ...
- AutogradPrivateUse2: DispatchKey = ...
- AutogradPrivateUse3: DispatchKey = ...
- class DispatchKeySet:
- def __init__(self, key: DispatchKey) -> None: ...
- def __or__(self, other: DispatchKeySet) -> DispatchKeySet: ...
- def __sub__(self, other: DispatchKeySet) -> DispatchKeySet: ...
- def __and__(self, other: DispatchKeySet) -> DispatchKeySet: ...
- def highestPriorityTypeId(self) -> DispatchKey: ...
- def has(self, k: _dispatchkey) -> _bool: ...
- def add(self, k: _dispatchkey) -> DispatchKeySet: ...
- def remove(self, k: _dispatchkey) -> DispatchKeySet: ...
- def __repr__(self) -> str: ...
- _dispatch_autogradother_backends: DispatchKeySet
- _additional_keys_to_prop_for_wrapper_tensors: DispatchKeySet
- def _dispatch_has_backend_fallback(dispatch: _dispatchkey) -> _bool: ...
- def _dispatch_keyset_full_after(t: _dispatchkey) -> DispatchKeySet: ...
- def _dispatch_keyset_full() -> DispatchKeySet: ...
- def _dispatch_keyset_to_string(keyset: DispatchKeySet) -> str: ...
- def _dispatch_get_backend_keyset_from_autograd(
- dispatch: _dispatchkey,
- ) -> DispatchKeySet: ...
- def _dispatch_keys(tensor: Tensor) -> DispatchKeySet: ...
- def _dispatch_tls_local_exclude_set() -> DispatchKeySet: ...
- def _dispatch_tls_local_include_set() -> DispatchKeySet: ...
- def _dispatch_is_included_in_alias(
- dispatch_a: _dispatchkey,
- dispatch_b: _dispatchkey,
- ) -> _bool: ...
- def _propagate_xla_data(a: Tensor, b: Tensor) -> None: ...
- def _replace_(a: Tensor, b: Tensor) -> None: ...
- def _commit_update(a: Tensor) -> None: ...
- class _ExcludeDispatchKeyGuard:
- def __init__(self, keyset: DispatchKeySet): ...
- def __enter__(self): ...
- def __exit__(self, exc_type, exc_value, traceback): ...
- class _IncludeDispatchKeyGuard:
- def __init__(self, k: DispatchKey): ...
- def __enter__(self): ...
- def __exit__(self, exc_type, exc_value, traceback): ...
- class _ForceDispatchKeyGuard:
- def __init__(self, include: DispatchKeySet, exclude: DispatchKeySet): ...
- def __enter__(self): ...
- def __exit__(self, exc_type, exc_value, traceback): ...
- class _PreserveDispatchKeyGuard:
- def __init__(self): ...
- def __enter__(self): ...
- def __exit__(self, exc_type, exc_value, traceback): ...
- class _AutoDispatchBelowAutograd:
- def __init__(self): ...
- def __enter__(self): ...
- def __exit__(self, exc_type, exc_value, traceback): ...
- class _AutoDispatchBelowADInplaceOrView:
- def __init__(self): ...
- def __enter__(self): ...
- def __exit__(self, exc_type, exc_value, traceback): ...
- def _dispatch_print_registrations_for_dispatch_key(dispatch_key: str = "") -> None: ...
- def _dispatch_get_registrations_for_dispatch_key(
- dispatch_key: str = "",
- ) -> List[str]: ...
- def _are_functorch_transforms_active() -> _bool: ...
- # Define in torch/csrc/autograd/init.cpp
- def _set_python_dispatcher(dispatcher: object) -> None: ...
- def _get_nested_int(id: _int, coeff: _int) -> SymInt: ...
- def _get_constant_bool_symnode(val: _bool) -> Any: ...
- class _TorchDispatchModeKey(Enum):
- FAKE: _TorchDispatchModeKey = ...
- PROXY: _TorchDispatchModeKey = ...
- FUNCTIONAL: _TorchDispatchModeKey = ...
- class _SetExcludeDispatchKeyGuard:
- def __init__(self, k: DispatchKey, enabled: _bool): ...
- def __enter__(self): ...
- def __exit__(self, exc_type, exc_value, traceback): ...
- # Defined in torch/csrc/utils/init.cpp
- class BenchmarkConfig:
- num_calling_threads: _int
- num_worker_threads: _int
- num_warmup_iters: _int
- num_iters: _int
- profiler_output_path: str
- class BenchmarkExecutionStats:
- latency_avg_ms: _float
- num_iters: _int
- class ThroughputBenchmark:
- def __init__(self, module: Any) -> None: ...
- def add_input(self, *args: Any, **kwargs: Any) -> None: ...
- def run_once(self, *args: Any, **kwargs: Any) -> Any: ...
- def benchmark(self, config: BenchmarkConfig) -> BenchmarkExecutionStats: ...
- # Defined in torch/csrc/Storage.cpp
- class StorageBase(object): ...
- # TODO: where
- class DoubleTensor(Tensor): ...
- class FloatTensor(Tensor): ...
- class BFloat16Tensor(Tensor): ...
- class LongTensor(Tensor): ...
- class IntTensor(Tensor): ...
- class ShortTensor(Tensor): ...
- class HalfTensor(Tensor): ...
- class CharTensor(Tensor): ...
- class ByteTensor(Tensor): ...
- class BoolTensor(Tensor): ...
- # Defined in torch/csrc/autograd/python_engine.cpp
- class _ImperativeEngine:
- def queue_callback(self, callback: Callable[[], None]) -> None: ...
- def run_backward(self, *args: Any, **kwargs: Any) -> Tuple[Tensor, ...]: ...
- def is_checkpoint_valid(self) -> _bool: ...
- # Defined in torch/csrc/autograd/python_variable.cpp
- class _TensorMeta(type): ...
- # Defined in torch/csrc/autograd/python_variable.cpp
- class TensorBase(metaclass=_TensorMeta):
- requires_grad: _bool
- retains_grad: _bool
- shape: Size
- data: Tensor
- names: List[str]
- device: _device
- dtype: _dtype
- layout: _layout
- real: Tensor
- imag: Tensor
- T: Tensor
- H: Tensor
- mT: Tensor
- mH: Tensor
- ndim: _int
- output_nr: _int
- _version: _int
- _base: Optional[Tensor]
- _cdata: _int
- grad_fn: Optional[_Node]
- _grad_fn: Any
- _grad: Optional[Tensor]
- grad: Optional[Tensor]
- _backward_hooks: Optional[Dict[_int, Callable[[Tensor], Optional[Tensor]]]]
- nbytes: _int
- itemsize: _int
- _has_symbolic_sizes_strides: _bool
- def _view_func_unsafe(
- self,
- new_base: Tensor,
- symint_visitor_fn: Optional[Callable[[_int], _int]] = None,
- tensor_visitor_fn: Optional[Callable[[Tensor], Tensor]] = None
- ):
- ...
- def __abs__(self) -> Tensor: ...
- def __add__(self, other: Any) -> Tensor: ...
- @overload
- def __and__(self, other: Tensor) -> Tensor: ...
- @overload
- def __and__(self, other: Union[Number, _complex]) -> Tensor: ...
- @overload
- def __and__(self, other: Any) -> Tensor: ...
- def __bool__(self) -> builtins.bool: ...
- def __complex__(self) -> builtins.complex: ...
- def __div__(self, other: Any) -> Tensor: ...
- def __eq__(self, other: Any) -> Tensor: ... # type: ignore[override]
- def __float__(self) -> builtins.float: ...
- def __floordiv__(self, other: Any) -> Tensor: ...
- def __ge__(self, other: Any) -> Tensor: ...
- def __getitem__(self, indices: Union[Union[SupportsIndex, Union[None, _bool, _int, slice, ellipsis, Tensor], _NestedSequence[Union[None, _bool, _int, slice, ellipsis, Tensor]]], tuple[Union[SupportsIndex, Union[None, _bool, _int, slice, ellipsis, Tensor], _NestedSequence[Union[None, _bool, _int, slice, ellipsis, Tensor]]], ...]]) -> Tensor: ...
- def __gt__(self, other: Any) -> Tensor: ...
- def __iadd__(self, other: Any) -> Tensor: ...
- @overload
- def __iand__(self, other: Tensor) -> Tensor: ...
- @overload
- def __iand__(self, other: Union[Number, _complex]) -> Tensor: ...
- @overload
- def __iand__(self, other: Any) -> Tensor: ...
- def __idiv__(self, other: Any) -> Tensor: ...
- def __ifloordiv__(self, other: Any) -> Tensor: ...
- @overload
- def __ilshift__(self, other: Tensor) -> Tensor: ...
- @overload
- def __ilshift__(self, other: Union[Number, _complex]) -> Tensor: ...
- @overload
- def __ilshift__(self, other: Any) -> Tensor: ...
- def __imod__(self, other: Any) -> Tensor: ...
- def __imul__(self, other: Any) -> Tensor: ...
- def __index__(self) -> builtins.int: ...
- @overload
- def __init__(self, *args: Any, device: Optional[DeviceLikeType] = None) -> None: ...
- @overload
- def __init__(self, storage: Storage) -> None: ...
- @overload
- def __init__(self, other: Tensor) -> None: ...
- @overload
- def __init__(self, size: _size, *, device: Optional[DeviceLikeType] = None) -> None: ...
- def __int__(self) -> builtins.int: ...
- def __invert__(self) -> Tensor: ...
- @overload
- def __ior__(self, other: Tensor) -> Tensor: ...
- @overload
- def __ior__(self, other: Union[Number, _complex]) -> Tensor: ...
- @overload
- def __ior__(self, other: Any) -> Tensor: ...
- @overload
- def __irshift__(self, other: Tensor) -> Tensor: ...
- @overload
- def __irshift__(self, other: Union[Number, _complex]) -> Tensor: ...
- @overload
- def __irshift__(self, other: Any) -> Tensor: ...
- def __isub__(self, other: Any) -> Tensor: ...
- @overload
- def __ixor__(self, other: Tensor) -> Tensor: ...
- @overload
- def __ixor__(self, other: Union[Number, _complex]) -> Tensor: ...
- @overload
- def __ixor__(self, other: Any) -> Tensor: ...
- def __le__(self, other: Any) -> Tensor: ...
- def __long__(self) -> builtins.int: ...
- @overload
- def __lshift__(self, other: Tensor) -> Tensor: ...
- @overload
- def __lshift__(self, other: Union[Number, _complex]) -> Tensor: ...
- @overload
- def __lshift__(self, other: Any) -> Tensor: ...
- def __lt__(self, other: Any) -> Tensor: ...
- def __matmul__(self, other: Any) -> Tensor: ...
- def __mod__(self, other: Any) -> Tensor: ...
- def __mul__(self, other: Any) -> Tensor: ...
- def __ne__(self, other: Any) -> Tensor: ... # type: ignore[override]
- def __neg__(self) -> Tensor: ...
- def __new__(cls, *args, **kwargs) -> Self: ...
- def __nonzero__(self) -> builtins.bool: ...
- @overload
- def __or__(self, other: Tensor) -> Tensor: ...
- @overload
- def __or__(self, other: Union[Number, _complex]) -> Tensor: ...
- @overload
- def __or__(self, other: Any) -> Tensor: ...
- def __pow__(self, other: Any) -> Tensor: ...
- def __radd__(self, other: Any) -> Tensor: ...
- def __rand__(self, other: Any) -> Tensor: ...
- def __rfloordiv__(self, other: Any) -> Tensor: ...
- def __rmul__(self, other: Any) -> Tensor: ...
- def __ror__(self, other: Any) -> Tensor: ...
- def __rpow__(self, other: Any) -> Tensor: ...
- @overload
- def __rshift__(self, other: Tensor) -> Tensor: ...
- @overload
- def __rshift__(self, other: Union[Number, _complex]) -> Tensor: ...
- @overload
- def __rshift__(self, other: Any) -> Tensor: ...
- def __rsub__(self, other: Any) -> Tensor: ...
- def __rtruediv__(self, other: Any) -> Tensor: ...
- def __rxor__(self, other: Any) -> Tensor: ...
- def __setitem__(self, indices: Union[Union[SupportsIndex, Union[None, _bool, _int, slice, ellipsis, Tensor], _NestedSequence[Union[None, _bool, _int, slice, ellipsis, Tensor]]], tuple[Union[SupportsIndex, Union[None, _bool, _int, slice, ellipsis, Tensor], _NestedSequence[Union[None, _bool, _int, slice, ellipsis, Tensor]]], ...]], val: Union[Tensor, Number]) -> None: ...
- def __sub__(self, other: Any) -> Tensor: ...
- def __truediv__(self, other: Any) -> Tensor: ...
- @overload
- def __xor__(self, other: Tensor) -> Tensor: ...
- @overload
- def __xor__(self, other: Union[Number, _complex]) -> Tensor: ...
- @overload
- def __xor__(self, other: Any) -> Tensor: ...
- def _addmm_activation(self, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, use_gelu: _bool = False) -> Tensor: ...
- def _autocast_to_full_precision(self, cuda_enabled: _bool, cpu_enabled: _bool) -> Tensor: ...
- def _autocast_to_reduced_precision(self, cuda_enabled: _bool, cpu_enabled: _bool, cuda_dtype: _dtype, cpu_dtype: _dtype) -> Tensor: ...
- def _coalesced_(self, coalesced: _bool) -> Tensor: ...
- def _conj(self) -> Tensor: ...
- def _conj_physical(self) -> Tensor: ...
- def _dimI(self) -> _int: ...
- def _dimV(self) -> _int: ...
- def _indices(self) -> Tensor: ...
- def _is_all_true(self) -> Tensor: ...
- def _is_any_true(self) -> Tensor: ...
- def _is_view(self) -> _bool: ...
- def _is_zerotensor(self) -> _bool: ...
- def _lazy_clone(self) -> Tensor: ...
- @staticmethod
- def _make_subclass(cls: Type[S], data: Tensor, require_grad: _bool = False, dispatch_strides: _bool = False, dispatch_device: _bool = False, device_for_backend_keys: Optional[_device] = None) -> S: ...
- def _neg_view(self) -> Tensor: ...
- def _nested_tensor_size(self) -> Tensor: ...
- def _nested_tensor_storage_offsets(self) -> Tensor: ...
- def _nested_tensor_strides(self) -> Tensor: ...
- def _nnz(self) -> _int: ...
- def _sparse_mask_projection(self, mask: Tensor, accumulate_matches: _bool = False) -> Tensor: ...
- def _to_dense(self, dtype: Optional[_dtype] = None, masked_grad: Optional[_bool] = None) -> Tensor: ...
- @overload
- def _to_sparse(self, *, layout: Optional[_layout] = None, blocksize: Optional[Union[_int, _size]] = None, dense_dim: Optional[_int] = None) -> Tensor: ...
- @overload
- def _to_sparse(self, sparse_dim: _int) -> Tensor: ...
- def _to_sparse_bsc(self, blocksize: Union[_int, _size], dense_dim: Optional[_int] = None) -> Tensor: ...
- def _to_sparse_bsr(self, blocksize: Union[_int, _size], dense_dim: Optional[_int] = None) -> Tensor: ...
- def _to_sparse_csc(self, dense_dim: Optional[_int] = None) -> Tensor: ...
- def _to_sparse_csr(self, dense_dim: Optional[_int] = None) -> Tensor: ...
- def _values(self) -> Tensor: ...
- def abs(self) -> Tensor:
- r"""
- abs() -> Tensor
-
- See :func:`torch.abs`
- """
- ...
- def abs_(self) -> Tensor:
- r"""
- abs_() -> Tensor
-
- In-place version of :meth:`~Tensor.abs`
- """
- ...
- def absolute(self) -> Tensor:
- r"""
- absolute() -> Tensor
-
- Alias for :func:`abs`
- """
- ...
- def absolute_(self) -> Tensor:
- r"""
- absolute_() -> Tensor
-
- In-place version of :meth:`~Tensor.absolute`
- Alias for :func:`abs_`
- """
- ...
- def acos(self) -> Tensor:
- r"""
- acos() -> Tensor
-
- See :func:`torch.acos`
- """
- ...
- def acos_(self) -> Tensor:
- r"""
- acos_() -> Tensor
-
- In-place version of :meth:`~Tensor.acos`
- """
- ...
- def acosh(self) -> Tensor:
- r"""
- acosh() -> Tensor
-
- See :func:`torch.acosh`
- """
- ...
- def acosh_(self) -> Tensor:
- r"""
- acosh_() -> Tensor
-
- In-place version of :meth:`~Tensor.acosh`
- """
- ...
- def add(self, other: Union[Tensor, Number, _complex, torch.SymInt, torch.SymFloat], *, alpha: Optional[Union[Number, _complex]] = 1, out: Optional[Tensor] = None) -> Tensor:
- r"""
- add(other, *, alpha=1) -> Tensor
-
- Add a scalar or tensor to :attr:`self` tensor. If both :attr:`alpha`
- and :attr:`other` are specified, each element of :attr:`other` is scaled by
- :attr:`alpha` before being used.
-
- When :attr:`other` is a tensor, the shape of :attr:`other` must be
- :ref:`broadcastable <broadcasting-semantics>` with the shape of the underlying
- tensor
-
- See :func:`torch.add`
- """
- ...
- def add_(self, other: Union[Tensor, Number, _complex, torch.SymInt, torch.SymFloat], *, alpha: Optional[Union[Number, _complex]] = 1) -> Tensor:
- r"""
- add_(other, *, alpha=1) -> Tensor
-
- In-place version of :meth:`~Tensor.add`
- """
- ...
- def addbmm(self, batch1: Tensor, batch2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor:
- r"""
- addbmm(batch1, batch2, *, beta=1, alpha=1) -> Tensor
-
- See :func:`torch.addbmm`
- """
- ...
- def addbmm_(self, batch1: Tensor, batch2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor:
- r"""
- addbmm_(batch1, batch2, *, beta=1, alpha=1) -> Tensor
-
- In-place version of :meth:`~Tensor.addbmm`
- """
- ...
- def addcdiv(self, tensor1: Tensor, tensor2: Tensor, *, value: Union[Number, _complex] = 1) -> Tensor:
- r"""
- addcdiv(tensor1, tensor2, *, value=1) -> Tensor
-
- See :func:`torch.addcdiv`
- """
- ...
- def addcdiv_(self, tensor1: Tensor, tensor2: Tensor, *, value: Union[Number, _complex] = 1) -> Tensor:
- r"""
- addcdiv_(tensor1, tensor2, *, value=1) -> Tensor
-
- In-place version of :meth:`~Tensor.addcdiv`
- """
- ...
- def addcmul(self, tensor1: Tensor, tensor2: Tensor, *, value: Union[Number, _complex] = 1) -> Tensor:
- r"""
- addcmul(tensor1, tensor2, *, value=1) -> Tensor
-
- See :func:`torch.addcmul`
- """
- ...
- def addcmul_(self, tensor1: Tensor, tensor2: Tensor, *, value: Union[Number, _complex] = 1) -> Tensor:
- r"""
- addcmul_(tensor1, tensor2, *, value=1) -> Tensor
-
- In-place version of :meth:`~Tensor.addcmul`
- """
- ...
- def addmm(self, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor:
- r"""
- addmm(mat1, mat2, *, beta=1, alpha=1) -> Tensor
-
- See :func:`torch.addmm`
- """
- ...
- def addmm_(self, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor:
- r"""
- addmm_(mat1, mat2, *, beta=1, alpha=1) -> Tensor
-
- In-place version of :meth:`~Tensor.addmm`
- """
- ...
- def addmv(self, mat: Tensor, vec: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor:
- r"""
- addmv(mat, vec, *, beta=1, alpha=1) -> Tensor
-
- See :func:`torch.addmv`
- """
- ...
- def addmv_(self, mat: Tensor, vec: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor:
- r"""
- addmv_(mat, vec, *, beta=1, alpha=1) -> Tensor
-
- In-place version of :meth:`~Tensor.addmv`
- """
- ...
- def addr(self, vec1: Tensor, vec2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor:
- r"""
- addr(vec1, vec2, *, beta=1, alpha=1) -> Tensor
-
- See :func:`torch.addr`
- """
- ...
- def addr_(self, vec1: Tensor, vec2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor:
- r"""
- addr_(vec1, vec2, *, beta=1, alpha=1) -> Tensor
-
- In-place version of :meth:`~Tensor.addr`
- """
- ...
- def adjoint(self) -> Tensor:
- r"""
- adjoint() -> Tensor
-
- Alias for :func:`adjoint`
- """
- ...
- def align_as(self, other: Tensor) -> Tensor:
- r"""
- align_as(other) -> Tensor
-
- Permutes the dimensions of the :attr:`self` tensor to match the dimension order
- in the :attr:`other` tensor, adding size-one dims for any new names.
-
- This operation is useful for explicit broadcasting by names (see examples).
-
- All of the dims of :attr:`self` must be named in order to use this method.
- The resulting tensor is a view on the original tensor.
-
- All dimension names of :attr:`self` must be present in ``other.names``.
- :attr:`other` may contain named dimensions that are not in ``self.names``;
- the output tensor has a size-one dimension for each of those new names.
-
- To align a tensor to a specific order, use :meth:`~Tensor.align_to`.
-
- Examples::
-
- # Example 1: Applying a mask
- >>> mask = torch.randint(2, [127, 128], dtype=torch.bool).refine_names('W', 'H')
- >>> imgs = torch.randn(32, 128, 127, 3, names=('N', 'H', 'W', 'C'))
- >>> imgs.masked_fill_(mask.align_as(imgs), 0)
-
-
- # Example 2: Applying a per-channel-scale
- >>> def scale_channels(input, scale):
- >>> scale = scale.refine_names('C')
- >>> return input * scale.align_as(input)
-
- >>> num_channels = 3
- >>> scale = torch.randn(num_channels, names=('C',))
- >>> imgs = torch.rand(32, 128, 128, num_channels, names=('N', 'H', 'W', 'C'))
- >>> more_imgs = torch.rand(32, num_channels, 128, 128, names=('N', 'C', 'H', 'W'))
- >>> videos = torch.randn(3, num_channels, 128, 128, 128, names=('N', 'C', 'H', 'W', 'D'))
-
- # scale_channels is agnostic to the dimension order of the input
- >>> scale_channels(imgs, scale)
- >>> scale_channels(more_imgs, scale)
- >>> scale_channels(videos, scale)
-
- .. warning::
- The named tensor API is experimental and subject to change.
- """
- ...
- @overload
- def align_to(self, order: Sequence[Union[str, ellipsis, None]], ellipsis_idx: _int) -> Tensor: ...
- @overload
- def align_to(self, names: Sequence[Union[str, ellipsis, None]]) -> Tensor: ...
- @overload
- def all(self) -> Tensor:
- r"""
- all(dim=None, keepdim=False) -> Tensor
-
- See :func:`torch.all`
- """
- ...
- @overload
- def all(self, dim: Optional[_size] = None, keepdim: _bool = False) -> Tensor:
- r"""
- all(dim=None, keepdim=False) -> Tensor
-
- See :func:`torch.all`
- """
- ...
- @overload
- def all(self, dim: _int, keepdim: _bool = False) -> Tensor:
- r"""
- all(dim=None, keepdim=False) -> Tensor
-
- See :func:`torch.all`
- """
- ...
- @overload
- def all(self, dim: Union[str, ellipsis, None], keepdim: _bool = False) -> Tensor:
- r"""
- all(dim=None, keepdim=False) -> Tensor
-
- See :func:`torch.all`
- """
- ...
- def allclose(self, other: Tensor, rtol: _float = 1e-05, atol: _float = 1e-08, equal_nan: _bool = False) -> _bool:
- r"""
- allclose(other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor
-
- See :func:`torch.allclose`
- """
- ...
- def amax(self, dim: Union[_int, _size] = (), keepdim: _bool = False) -> Tensor:
- r"""
- amax(dim=None, keepdim=False) -> Tensor
-
- See :func:`torch.amax`
- """
- ...
- def amin(self, dim: Union[_int, _size] = (), keepdim: _bool = False) -> Tensor:
- r"""
- amin(dim=None, keepdim=False) -> Tensor
-
- See :func:`torch.amin`
- """
- ...
- def aminmax(self, *, dim: Optional[_int] = None, keepdim: _bool = False) -> torch.return_types.aminmax:
- r"""
- aminmax(*, dim=None, keepdim=False) -> (Tensor min, Tensor max)
-
- See :func:`torch.aminmax`
- """
- ...
- def angle(self) -> Tensor:
- r"""
- angle() -> Tensor
-
- See :func:`torch.angle`
- """
- ...
- @overload
- def any(self) -> Tensor:
- r"""
- any(dim=None, keepdim=False) -> Tensor
-
- See :func:`torch.any`
- """
- ...
- @overload
- def any(self, dim: Optional[_size] = None, keepdim: _bool = False) -> Tensor:
- r"""
- any(dim=None, keepdim=False) -> Tensor
-
- See :func:`torch.any`
- """
- ...
- @overload
- def any(self, dim: _int, keepdim: _bool = False) -> Tensor:
- r"""
- any(dim=None, keepdim=False) -> Tensor
-
- See :func:`torch.any`
- """
- ...
- @overload
- def any(self, dim: Union[str, ellipsis, None], keepdim: _bool = False) -> Tensor:
- r"""
- any(dim=None, keepdim=False) -> Tensor
-
- See :func:`torch.any`
- """
- ...
- def apply_(self, callable: Callable) -> Tensor:
- r"""
- apply_(callable) -> Tensor
-
- Applies the function :attr:`callable` to each element in the tensor, replacing
- each element with the value returned by :attr:`callable`.
-
- .. note::
-
- This function only works with CPU tensors and should not be used in code
- sections that require high performance.
- """
- ...
- def arccos(self) -> Tensor:
- r"""
- arccos() -> Tensor
-
- See :func:`torch.arccos`
- """
- ...
- def arccos_(self) -> Tensor:
- r"""
- arccos_() -> Tensor
-
- In-place version of :meth:`~Tensor.arccos`
- """
- ...
- def arccosh(self) -> Tensor:
- r"""
- acosh() -> Tensor
-
- See :func:`torch.arccosh`
- """
- ...
- def arccosh_(self) -> Tensor:
- r"""
- acosh_() -> Tensor
-
- In-place version of :meth:`~Tensor.arccosh`
- """
- ...
- def arcsin(self) -> Tensor:
- r"""
- arcsin() -> Tensor
-
- See :func:`torch.arcsin`
- """
- ...
- def arcsin_(self) -> Tensor:
- r"""
- arcsin_() -> Tensor
-
- In-place version of :meth:`~Tensor.arcsin`
- """
- ...
- def arcsinh(self) -> Tensor:
- r"""
- arcsinh() -> Tensor
-
- See :func:`torch.arcsinh`
- """
- ...
- def arcsinh_(self) -> Tensor:
- r"""
- arcsinh_() -> Tensor
-
- In-place version of :meth:`~Tensor.arcsinh`
- """
- ...
- def arctan(self) -> Tensor:
- r"""
- arctan() -> Tensor
-
- See :func:`torch.arctan`
- """
- ...
- def arctan2(self, other: Tensor) -> Tensor:
- r"""
- arctan2(other) -> Tensor
-
- See :func:`torch.arctan2`
- """
- ...
- def arctan2_(self, other: Tensor) -> Tensor:
- r"""
- atan2_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.arctan2`
- """
- ...
- def arctan_(self) -> Tensor:
- r"""
- arctan_() -> Tensor
-
- In-place version of :meth:`~Tensor.arctan`
- """
- ...
- def arctanh(self) -> Tensor:
- r"""
- arctanh() -> Tensor
-
- See :func:`torch.arctanh`
- """
- ...
- def arctanh_(self) -> Tensor:
- r"""
- arctanh_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.arctanh`
- """
- ...
- def argmax(self, dim: Optional[_int] = None, keepdim: _bool = False) -> Tensor:
- r"""
- argmax(dim=None, keepdim=False) -> LongTensor
-
- See :func:`torch.argmax`
- """
- ...
- def argmin(self, dim: Optional[_int] = None, keepdim: _bool = False) -> Tensor:
- r"""
- argmin(dim=None, keepdim=False) -> LongTensor
-
- See :func:`torch.argmin`
- """
- ...
- @overload
- def argsort(self, *, stable: _bool, dim: _int = -1, descending: _bool = False) -> Tensor:
- r"""
- argsort(dim=-1, descending=False) -> LongTensor
-
- See :func:`torch.argsort`
- """
- ...
- @overload
- def argsort(self, dim: _int = -1, descending: _bool = False) -> Tensor:
- r"""
- argsort(dim=-1, descending=False) -> LongTensor
-
- See :func:`torch.argsort`
- """
- ...
- @overload
- def argsort(self, dim: Union[str, ellipsis, None], descending: _bool = False) -> Tensor:
- r"""
- argsort(dim=-1, descending=False) -> LongTensor
-
- See :func:`torch.argsort`
- """
- ...
- def argwhere(self) -> Tensor:
- r"""
- argwhere() -> Tensor
-
- See :func:`torch.argwhere`
- """
- ...
- def as_strided(self, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None) -> Tensor:
- r"""
- as_strided(size, stride, storage_offset=None) -> Tensor
-
- See :func:`torch.as_strided`
- """
- ...
- def as_strided_(self, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None) -> Tensor:
- r"""
- as_strided_(size, stride, storage_offset=None) -> Tensor
-
- In-place version of :meth:`~Tensor.as_strided`
- """
- ...
- def as_strided_scatter(self, src: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None) -> Tensor:
- r"""
- as_strided_scatter(src, size, stride, storage_offset=None) -> Tensor
-
- See :func:`torch.as_strided_scatter`
- """
- ...
- def as_subclass(self, cls: Type[S]) -> S:
- r"""
- as_subclass(cls) -> Tensor
-
- Makes a ``cls`` instance with the same data pointer as ``self``. Changes
- in the output mirror changes in ``self``, and the output stays attached
- to the autograd graph. ``cls`` must be a subclass of ``Tensor``.
- """
- ...
- def asin(self) -> Tensor:
- r"""
- asin() -> Tensor
-
- See :func:`torch.asin`
- """
- ...
- def asin_(self) -> Tensor:
- r"""
- asin_() -> Tensor
-
- In-place version of :meth:`~Tensor.asin`
- """
- ...
- def asinh(self) -> Tensor:
- r"""
- asinh() -> Tensor
-
- See :func:`torch.asinh`
- """
- ...
- def asinh_(self) -> Tensor:
- r"""
- asinh_() -> Tensor
-
- In-place version of :meth:`~Tensor.asinh`
- """
- ...
- def atan(self) -> Tensor:
- r"""
- atan() -> Tensor
-
- See :func:`torch.atan`
- """
- ...
- def atan2(self, other: Tensor) -> Tensor:
- r"""
- atan2(other) -> Tensor
-
- See :func:`torch.atan2`
- """
- ...
- def atan2_(self, other: Tensor) -> Tensor:
- r"""
- atan2_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.atan2`
- """
- ...
- def atan_(self) -> Tensor:
- r"""
- atan_() -> Tensor
-
- In-place version of :meth:`~Tensor.atan`
- """
- ...
- def atanh(self) -> Tensor:
- r"""
- atanh() -> Tensor
-
- See :func:`torch.atanh`
- """
- ...
- def atanh_(self) -> Tensor:
- r"""
- atanh_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.atanh`
- """
- ...
- def baddbmm(self, batch1: Tensor, batch2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor:
- r"""
- baddbmm(batch1, batch2, *, beta=1, alpha=1) -> Tensor
-
- See :func:`torch.baddbmm`
- """
- ...
- def baddbmm_(self, batch1: Tensor, batch2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor:
- r"""
- baddbmm_(batch1, batch2, *, beta=1, alpha=1) -> Tensor
-
- In-place version of :meth:`~Tensor.baddbmm`
- """
- ...
- @overload
- def bernoulli(self, *, generator: Optional[Generator] = None) -> Tensor:
- r"""
- bernoulli(*, generator=None) -> Tensor
-
- Returns a result tensor where each :math:`\texttt{result[i]}` is independently
- sampled from :math:`\text{Bernoulli}(\texttt{self[i]})`. :attr:`self` must have
- floating point ``dtype``, and the result will have the same ``dtype``.
-
- See :func:`torch.bernoulli`
- """
- ...
- @overload
- def bernoulli(self, p: _float, *, generator: Optional[Generator] = None) -> Tensor:
- r"""
- bernoulli(*, generator=None) -> Tensor
-
- Returns a result tensor where each :math:`\texttt{result[i]}` is independently
- sampled from :math:`\text{Bernoulli}(\texttt{self[i]})`. :attr:`self` must have
- floating point ``dtype``, and the result will have the same ``dtype``.
-
- See :func:`torch.bernoulli`
- """
- ...
- @overload
- def bernoulli_(self, p: Tensor, *, generator: Optional[Generator] = None) -> Tensor:
- r"""
- bernoulli_(p=0.5, *, generator=None) -> Tensor
-
- Fills each location of :attr:`self` with an independent sample from
- :math:`\text{Bernoulli}(\texttt{p})`. :attr:`self` can have integral
- ``dtype``.
-
- :attr:`p` should either be a scalar or tensor containing probabilities to be
- used for drawing the binary random number.
-
- If it is a tensor, the :math:`\text{i}^{th}` element of :attr:`self` tensor
- will be set to a value sampled from
- :math:`\text{Bernoulli}(\texttt{p\_tensor[i]})`. In this case `p` must have
- floating point ``dtype``.
-
- See also :meth:`~Tensor.bernoulli` and :func:`torch.bernoulli`
- """
- ...
- @overload
- def bernoulli_(self, p: _float = 0.5, *, generator: Optional[Generator] = None) -> Tensor:
- r"""
- bernoulli_(p=0.5, *, generator=None) -> Tensor
-
- Fills each location of :attr:`self` with an independent sample from
- :math:`\text{Bernoulli}(\texttt{p})`. :attr:`self` can have integral
- ``dtype``.
-
- :attr:`p` should either be a scalar or tensor containing probabilities to be
- used for drawing the binary random number.
-
- If it is a tensor, the :math:`\text{i}^{th}` element of :attr:`self` tensor
- will be set to a value sampled from
- :math:`\text{Bernoulli}(\texttt{p\_tensor[i]})`. In this case `p` must have
- floating point ``dtype``.
-
- See also :meth:`~Tensor.bernoulli` and :func:`torch.bernoulli`
- """
- ...
- def bfloat16(self) -> Tensor:
- r"""
- bfloat16(memory_format=torch.preserve_format) -> Tensor
- ``self.bfloat16()`` is equivalent to ``self.to(torch.bfloat16)``. See :func:`to`.
-
- Args:
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned Tensor. Default: ``torch.preserve_format``.
- """
- ...
- def bincount(self, weights: Optional[Tensor] = None, minlength: _int = 0) -> Tensor:
- r"""
- bincount(weights=None, minlength=0) -> Tensor
-
- See :func:`torch.bincount`
- """
- ...
- @overload
- def bitwise_and(self, other: Tensor) -> Tensor:
- r"""
- bitwise_and() -> Tensor
-
- See :func:`torch.bitwise_and`
- """
- ...
- @overload
- def bitwise_and(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- bitwise_and() -> Tensor
-
- See :func:`torch.bitwise_and`
- """
- ...
- @overload
- def bitwise_and_(self, other: Tensor) -> Tensor:
- r"""
- bitwise_and_() -> Tensor
-
- In-place version of :meth:`~Tensor.bitwise_and`
- """
- ...
- @overload
- def bitwise_and_(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- bitwise_and_() -> Tensor
-
- In-place version of :meth:`~Tensor.bitwise_and`
- """
- ...
- @overload
- def bitwise_left_shift(self, other: Tensor) -> Tensor:
- r"""
- bitwise_left_shift(other) -> Tensor
-
- See :func:`torch.bitwise_left_shift`
- """
- ...
- @overload
- def bitwise_left_shift(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- bitwise_left_shift(other) -> Tensor
-
- See :func:`torch.bitwise_left_shift`
- """
- ...
- @overload
- def bitwise_left_shift_(self, other: Tensor) -> Tensor:
- r"""
- bitwise_left_shift_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.bitwise_left_shift`
- """
- ...
- @overload
- def bitwise_left_shift_(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- bitwise_left_shift_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.bitwise_left_shift`
- """
- ...
- def bitwise_not(self) -> Tensor:
- r"""
- bitwise_not() -> Tensor
-
- See :func:`torch.bitwise_not`
- """
- ...
- def bitwise_not_(self) -> Tensor:
- r"""
- bitwise_not_() -> Tensor
-
- In-place version of :meth:`~Tensor.bitwise_not`
- """
- ...
- @overload
- def bitwise_or(self, other: Tensor) -> Tensor:
- r"""
- bitwise_or() -> Tensor
-
- See :func:`torch.bitwise_or`
- """
- ...
- @overload
- def bitwise_or(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- bitwise_or() -> Tensor
-
- See :func:`torch.bitwise_or`
- """
- ...
- @overload
- def bitwise_or_(self, other: Tensor) -> Tensor:
- r"""
- bitwise_or_() -> Tensor
-
- In-place version of :meth:`~Tensor.bitwise_or`
- """
- ...
- @overload
- def bitwise_or_(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- bitwise_or_() -> Tensor
-
- In-place version of :meth:`~Tensor.bitwise_or`
- """
- ...
- @overload
- def bitwise_right_shift(self, other: Tensor) -> Tensor:
- r"""
- bitwise_right_shift(other) -> Tensor
-
- See :func:`torch.bitwise_right_shift`
- """
- ...
- @overload
- def bitwise_right_shift(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- bitwise_right_shift(other) -> Tensor
-
- See :func:`torch.bitwise_right_shift`
- """
- ...
- @overload
- def bitwise_right_shift_(self, other: Tensor) -> Tensor:
- r"""
- bitwise_right_shift_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.bitwise_right_shift`
- """
- ...
- @overload
- def bitwise_right_shift_(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- bitwise_right_shift_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.bitwise_right_shift`
- """
- ...
- @overload
- def bitwise_xor(self, other: Tensor) -> Tensor:
- r"""
- bitwise_xor() -> Tensor
-
- See :func:`torch.bitwise_xor`
- """
- ...
- @overload
- def bitwise_xor(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- bitwise_xor() -> Tensor
-
- See :func:`torch.bitwise_xor`
- """
- ...
- @overload
- def bitwise_xor_(self, other: Tensor) -> Tensor:
- r"""
- bitwise_xor_() -> Tensor
-
- In-place version of :meth:`~Tensor.bitwise_xor`
- """
- ...
- @overload
- def bitwise_xor_(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- bitwise_xor_() -> Tensor
-
- In-place version of :meth:`~Tensor.bitwise_xor`
- """
- ...
- def bmm(self, mat2: Tensor) -> Tensor:
- r"""
- bmm(batch2) -> Tensor
-
- See :func:`torch.bmm`
- """
- ...
- def bool(self) -> Tensor:
- r"""
- bool(memory_format=torch.preserve_format) -> Tensor
-
- ``self.bool()`` is equivalent to ``self.to(torch.bool)``. See :func:`to`.
-
- Args:
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned Tensor. Default: ``torch.preserve_format``.
- """
- ...
- @overload
- def broadcast_to(self, size: Sequence[Union[_int, SymInt]]) -> Tensor:
- r"""
- broadcast_to(shape) -> Tensor
-
- See :func:`torch.broadcast_to`.
- """
- ...
- @overload
- def broadcast_to(self, *size: _int) -> Tensor:
- r"""
- broadcast_to(shape) -> Tensor
-
- See :func:`torch.broadcast_to`.
- """
- ...
- def byte(self) -> Tensor:
- r"""
- byte(memory_format=torch.preserve_format) -> Tensor
-
- ``self.byte()`` is equivalent to ``self.to(torch.uint8)``. See :func:`to`.
-
- Args:
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned Tensor. Default: ``torch.preserve_format``.
- """
- ...
- def cauchy_(self, median: _float = 0, sigma: _float = 1, *, generator: Optional[Generator] = None) -> Tensor:
- r"""
- cauchy_(median=0, sigma=1, *, generator=None) -> Tensor
-
- Fills the tensor with numbers drawn from the Cauchy distribution:
-
- .. math::
-
- f(x) = \dfrac{1}{\pi} \dfrac{\sigma}{(x - \text{median})^2 + \sigma^2}
-
- .. note::
- Sigma (:math:`\sigma`) is used to denote the scale parameter in Cauchy distribution.
- """
- ...
- def ccol_indices(self) -> Tensor: ...
- def ceil(self) -> Tensor:
- r"""
- ceil() -> Tensor
-
- See :func:`torch.ceil`
- """
- ...
- def ceil_(self) -> Tensor:
- r"""
- ceil_() -> Tensor
-
- In-place version of :meth:`~Tensor.ceil`
- """
- ...
- def chalf(self, *, memory_format: Optional[memory_format] = None) -> Tensor:
- r"""
- chalf(memory_format=torch.preserve_format) -> Tensor
-
- ``self.chalf()`` is equivalent to ``self.to(torch.complex32)``. See :func:`to`.
-
- Args:
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned Tensor. Default: ``torch.preserve_format``.
- """
- ...
- def char(self) -> Tensor:
- r"""
- char(memory_format=torch.preserve_format) -> Tensor
-
- ``self.char()`` is equivalent to ``self.to(torch.int8)``. See :func:`to`.
-
- Args:
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned Tensor. Default: ``torch.preserve_format``.
- """
- ...
- def cholesky(self, upper: _bool = False) -> Tensor:
- r"""
- cholesky(upper=False) -> Tensor
-
- See :func:`torch.cholesky`
- """
- ...
- def cholesky_inverse(self, upper: _bool = False) -> Tensor:
- r"""
- cholesky_inverse(upper=False) -> Tensor
-
- See :func:`torch.cholesky_inverse`
- """
- ...
- def cholesky_solve(self, input2: Tensor, upper: _bool = False) -> Tensor:
- r"""
- cholesky_solve(input2, upper=False) -> Tensor
-
- See :func:`torch.cholesky_solve`
- """
- ...
- def chunk(self, chunks: _int, dim: _int = 0) -> Tuple[Tensor, ...]:
- r"""
- chunk(chunks, dim=0) -> List of Tensors
-
- See :func:`torch.chunk`
- """
- ...
- @overload
- def clamp(self, min: Optional[Tensor] = None, max: Optional[Tensor] = None) -> Tensor:
- r"""
- clamp(min=None, max=None) -> Tensor
-
- See :func:`torch.clamp`
- """
- ...
- @overload
- def clamp(self, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None) -> Tensor:
- r"""
- clamp(min=None, max=None) -> Tensor
-
- See :func:`torch.clamp`
- """
- ...
- @overload
- def clamp_(self, min: Optional[Tensor] = None, max: Optional[Tensor] = None) -> Tensor:
- r"""
- clamp_(min=None, max=None) -> Tensor
-
- In-place version of :meth:`~Tensor.clamp`
- """
- ...
- @overload
- def clamp_(self, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None) -> Tensor:
- r"""
- clamp_(min=None, max=None) -> Tensor
-
- In-place version of :meth:`~Tensor.clamp`
- """
- ...
- @overload
- def clamp_max(self, max: Tensor) -> Tensor: ...
- @overload
- def clamp_max(self, max: Union[Number, _complex]) -> Tensor: ...
- @overload
- def clamp_max_(self, max: Tensor) -> Tensor: ...
- @overload
- def clamp_max_(self, max: Union[Number, _complex]) -> Tensor: ...
- @overload
- def clamp_min(self, min: Tensor) -> Tensor: ...
- @overload
- def clamp_min(self, min: Union[Number, _complex]) -> Tensor: ...
- @overload
- def clamp_min_(self, min: Tensor) -> Tensor: ...
- @overload
- def clamp_min_(self, min: Union[Number, _complex]) -> Tensor: ...
- @overload
- def clip(self, min: Optional[Tensor] = None, max: Optional[Tensor] = None) -> Tensor:
- r"""
- clip(min=None, max=None) -> Tensor
-
- Alias for :meth:`~Tensor.clamp`.
- """
- ...
- @overload
- def clip(self, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None) -> Tensor:
- r"""
- clip(min=None, max=None) -> Tensor
-
- Alias for :meth:`~Tensor.clamp`.
- """
- ...
- @overload
- def clip_(self, min: Optional[Tensor] = None, max: Optional[Tensor] = None) -> Tensor:
- r"""
- clip_(min=None, max=None) -> Tensor
-
- Alias for :meth:`~Tensor.clamp_`.
- """
- ...
- @overload
- def clip_(self, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None) -> Tensor:
- r"""
- clip_(min=None, max=None) -> Tensor
-
- Alias for :meth:`~Tensor.clamp_`.
- """
- ...
- def clone(self, *, memory_format: Optional[memory_format] = None) -> Tensor:
- r"""
- clone(*, memory_format=torch.preserve_format) -> Tensor
-
- See :func:`torch.clone`
- """
- ...
- def coalesce(self) -> Tensor:
- r"""
- coalesce() -> Tensor
-
- Returns a coalesced copy of :attr:`self` if :attr:`self` is an
- :ref:`uncoalesced tensor <sparse-uncoalesced-coo-docs>`.
-
- Returns :attr:`self` if :attr:`self` is a coalesced tensor.
-
- .. warning::
- Throws an error if :attr:`self` is not a sparse COO tensor.
- """
- ...
- def col_indices(self) -> Tensor:
- r"""
- col_indices() -> IntTensor
-
- Returns the tensor containing the column indices of the :attr:`self`
- tensor when :attr:`self` is a sparse CSR tensor of layout ``sparse_csr``.
- The ``col_indices`` tensor is strictly of shape (:attr:`self`.nnz())
- and of type ``int32`` or ``int64``. When using MKL routines such as sparse
- matrix multiplication, it is necessary to use ``int32`` indexing in order
- to avoid downcasting and potentially losing information.
-
- Example::
- >>> csr = torch.eye(5,5).to_sparse_csr()
- >>> csr.col_indices()
- tensor([0, 1, 2, 3, 4], dtype=torch.int32)
- """
- ...
- def conj(self) -> Tensor:
- r"""
- conj() -> Tensor
-
- See :func:`torch.conj`
- """
- ...
- def conj_physical(self) -> Tensor:
- r"""
- conj_physical() -> Tensor
-
- See :func:`torch.conj_physical`
- """
- ...
- def conj_physical_(self) -> Tensor:
- r"""
- conj_physical_() -> Tensor
-
- In-place version of :meth:`~Tensor.conj_physical`
- """
- ...
- def contiguous(self, memory_format=torch.contiguous_format) -> Tensor:
- r"""
- contiguous(memory_format=torch.contiguous_format) -> Tensor
-
- Returns a contiguous in memory tensor containing the same data as :attr:`self` tensor. If
- :attr:`self` tensor is already in the specified memory format, this function returns the
- :attr:`self` tensor.
-
- Args:
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned Tensor. Default: ``torch.contiguous_format``.
- """
- ...
- def copy_(self, src: Tensor, non_blocking: _bool = False) -> Tensor:
- r"""
- copy_(src, non_blocking=False) -> Tensor
-
- Copies the elements from :attr:`src` into :attr:`self` tensor and returns
- :attr:`self`.
-
- The :attr:`src` tensor must be :ref:`broadcastable <broadcasting-semantics>`
- with the :attr:`self` tensor. It may be of a different data type or reside on a
- different device.
-
- Args:
- src (Tensor): the source tensor to copy from
- non_blocking (bool): if ``True`` and this copy is between CPU and GPU,
- the copy may occur asynchronously with respect to the host. For other
- cases, this argument has no effect.
- """
- ...
- @overload
- def copysign(self, other: Tensor) -> Tensor:
- r"""
- copysign(other) -> Tensor
-
- See :func:`torch.copysign`
- """
- ...
- @overload
- def copysign(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- copysign(other) -> Tensor
-
- See :func:`torch.copysign`
- """
- ...
- @overload
- def copysign_(self, other: Tensor) -> Tensor:
- r"""
- copysign_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.copysign`
- """
- ...
- @overload
- def copysign_(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- copysign_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.copysign`
- """
- ...
- def corrcoef(self) -> Tensor:
- r"""
- corrcoef() -> Tensor
-
- See :func:`torch.corrcoef`
- """
- ...
- def cos(self) -> Tensor:
- r"""
- cos() -> Tensor
-
- See :func:`torch.cos`
- """
- ...
- def cos_(self) -> Tensor:
- r"""
- cos_() -> Tensor
-
- In-place version of :meth:`~Tensor.cos`
- """
- ...
- def cosh(self) -> Tensor:
- r"""
- cosh() -> Tensor
-
- See :func:`torch.cosh`
- """
- ...
- def cosh_(self) -> Tensor:
- r"""
- cosh_() -> Tensor
-
- In-place version of :meth:`~Tensor.cosh`
- """
- ...
- @overload
- def count_nonzero(self, dim: Optional[_int] = None) -> Tensor:
- r"""
- count_nonzero(dim=None) -> Tensor
-
- See :func:`torch.count_nonzero`
- """
- ...
- @overload
- def count_nonzero(self, dim: _size) -> Tensor:
- r"""
- count_nonzero(dim=None) -> Tensor
-
- See :func:`torch.count_nonzero`
- """
- ...
- @overload
- def count_nonzero(self, *dim: _int) -> Tensor:
- r"""
- count_nonzero(dim=None) -> Tensor
-
- See :func:`torch.count_nonzero`
- """
- ...
- def cov(self, *, correction: _int = 1, fweights: Optional[Tensor] = None, aweights: Optional[Tensor] = None) -> Tensor:
- r"""
- cov(*, correction=1, fweights=None, aweights=None) -> Tensor
-
- See :func:`torch.cov`
- """
- ...
- def cpu(self, memory_format: torch.memory_format = torch.preserve_format) -> Tensor:
- r"""
- cpu(memory_format=torch.preserve_format) -> Tensor
-
- Returns a copy of this object in CPU memory.
-
- If this object is already in CPU memory and on the correct device,
- then no copy is performed and the original object is returned.
-
- Args:
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned Tensor. Default: ``torch.preserve_format``.
- """
- ...
- def cross(self, other: Tensor, dim: Optional[_int] = None) -> Tensor:
- r"""
- cross(other, dim=None) -> Tensor
-
- See :func:`torch.cross`
- """
- ...
- def crow_indices(self) -> Tensor:
- r"""
- crow_indices() -> IntTensor
-
- Returns the tensor containing the compressed row indices of the :attr:`self`
- tensor when :attr:`self` is a sparse CSR tensor of layout ``sparse_csr``.
- The ``crow_indices`` tensor is strictly of shape (:attr:`self`.size(0) + 1)
- and of type ``int32`` or ``int64``. When using MKL routines such as sparse
- matrix multiplication, it is necessary to use ``int32`` indexing in order
- to avoid downcasting and potentially losing information.
-
- Example::
- >>> csr = torch.eye(5,5).to_sparse_csr()
- >>> csr.crow_indices()
- tensor([0, 1, 2, 3, 4, 5], dtype=torch.int32)
- """
- ...
- def cuda(self, device: Optional[Union[_device, _int, str]] = None, non_blocking: _bool = False, memory_format: torch.memory_format = torch.preserve_format) -> Tensor:
- r"""
- cuda(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor
-
- Returns a copy of this object in CUDA memory.
-
- If this object is already in CUDA memory and on the correct device,
- then no copy is performed and the original object is returned.
-
- Args:
- device (:class:`torch.device`): The destination GPU device.
- Defaults to the current CUDA device.
- non_blocking (bool): If ``True`` and the source is in pinned memory,
- the copy will be asynchronous with respect to the host.
- Otherwise, the argument has no effect. Default: ``False``.
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned Tensor. Default: ``torch.preserve_format``.
- """
- ...
- @overload
- def cummax(self, dim: _int) -> torch.return_types.cummax:
- r"""
- cummax(dim) -> (Tensor, Tensor)
-
- See :func:`torch.cummax`
- """
- ...
- @overload
- def cummax(self, dim: Union[str, ellipsis, None]) -> torch.return_types.cummax:
- r"""
- cummax(dim) -> (Tensor, Tensor)
-
- See :func:`torch.cummax`
- """
- ...
- @overload
- def cummin(self, dim: _int) -> torch.return_types.cummin:
- r"""
- cummin(dim) -> (Tensor, Tensor)
-
- See :func:`torch.cummin`
- """
- ...
- @overload
- def cummin(self, dim: Union[str, ellipsis, None]) -> torch.return_types.cummin:
- r"""
- cummin(dim) -> (Tensor, Tensor)
-
- See :func:`torch.cummin`
- """
- ...
- @overload
- def cumprod(self, dim: _int, *, dtype: Optional[_dtype] = None) -> Tensor:
- r"""
- cumprod(dim, dtype=None) -> Tensor
-
- See :func:`torch.cumprod`
- """
- ...
- @overload
- def cumprod(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor:
- r"""
- cumprod(dim, dtype=None) -> Tensor
-
- See :func:`torch.cumprod`
- """
- ...
- @overload
- def cumprod_(self, dim: _int, *, dtype: Optional[_dtype] = None) -> Tensor:
- r"""
- cumprod_(dim, dtype=None) -> Tensor
-
- In-place version of :meth:`~Tensor.cumprod`
- """
- ...
- @overload
- def cumprod_(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor:
- r"""
- cumprod_(dim, dtype=None) -> Tensor
-
- In-place version of :meth:`~Tensor.cumprod`
- """
- ...
- @overload
- def cumsum(self, dim: _int, *, dtype: Optional[_dtype] = None) -> Tensor:
- r"""
- cumsum(dim, dtype=None) -> Tensor
-
- See :func:`torch.cumsum`
- """
- ...
- @overload
- def cumsum(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor:
- r"""
- cumsum(dim, dtype=None) -> Tensor
-
- See :func:`torch.cumsum`
- """
- ...
- @overload
- def cumsum_(self, dim: _int, *, dtype: Optional[_dtype] = None) -> Tensor:
- r"""
- cumsum_(dim, dtype=None) -> Tensor
-
- In-place version of :meth:`~Tensor.cumsum`
- """
- ...
- @overload
- def cumsum_(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor:
- r"""
- cumsum_(dim, dtype=None) -> Tensor
-
- In-place version of :meth:`~Tensor.cumsum`
- """
- ...
- def data_ptr(self) -> _int:
- r"""
- data_ptr() -> int
-
- Returns the address of the first element of :attr:`self` tensor.
- """
- ...
- def deg2rad(self) -> Tensor:
- r"""
- deg2rad() -> Tensor
-
- See :func:`torch.deg2rad`
- """
- ...
- def deg2rad_(self) -> Tensor:
- r"""
- deg2rad_() -> Tensor
-
- In-place version of :meth:`~Tensor.deg2rad`
- """
- ...
- def dense_dim(self) -> _int:
- r"""
- dense_dim() -> int
-
- Return the number of dense dimensions in a :ref:`sparse tensor <sparse-docs>` :attr:`self`.
-
- .. note::
- Returns ``len(self.shape)`` if :attr:`self` is not a sparse tensor.
-
- See also :meth:`Tensor.sparse_dim` and :ref:`hybrid tensors <sparse-hybrid-coo-docs>`.
- """
- ...
- def dequantize(self) -> Tensor:
- r"""
- dequantize() -> Tensor
-
- Given a quantized Tensor, dequantize it and return the dequantized float Tensor.
- """
- ...
- def det(self) -> Tensor:
- r"""
- det() -> Tensor
-
- See :func:`torch.det`
- """
- ...
- def detach(self) -> Tensor: ...
- def detach_(self) -> Tensor: ...
- def diag(self, diagonal: _int = 0) -> Tensor:
- r"""
- diag(diagonal=0) -> Tensor
-
- See :func:`torch.diag`
- """
- ...
- def diag_embed(self, offset: _int = 0, dim1: _int = -2, dim2: _int = -1) -> Tensor:
- r"""
- diag_embed(offset=0, dim1=-2, dim2=-1) -> Tensor
-
- See :func:`torch.diag_embed`
- """
- ...
- def diagflat(self, offset: _int = 0) -> Tensor:
- r"""
- diagflat(offset=0) -> Tensor
-
- See :func:`torch.diagflat`
- """
- ...
- @overload
- def diagonal(self, *, outdim: Union[str, ellipsis, None], dim1: Union[str, ellipsis, None], dim2: Union[str, ellipsis, None], offset: _int = 0) -> Tensor:
- r"""
- diagonal(offset=0, dim1=0, dim2=1) -> Tensor
-
- See :func:`torch.diagonal`
- """
- ...
- @overload
- def diagonal(self, offset: _int = 0, dim1: _int = 0, dim2: _int = 1) -> Tensor:
- r"""
- diagonal(offset=0, dim1=0, dim2=1) -> Tensor
-
- See :func:`torch.diagonal`
- """
- ...
- def diagonal_scatter(self, src: Tensor, offset: _int = 0, dim1: _int = 0, dim2: _int = 1) -> Tensor:
- r"""
- diagonal_scatter(src, offset=0, dim1=0, dim2=1) -> Tensor
-
- See :func:`torch.diagonal_scatter`
- """
- ...
- def diff(self, n: _int = 1, dim: _int = -1, prepend: Optional[Tensor] = None, append: Optional[Tensor] = None) -> Tensor:
- r"""
- diff(n=1, dim=-1, prepend=None, append=None) -> Tensor
-
- See :func:`torch.diff`
- """
- ...
- def digamma(self) -> Tensor:
- r"""
- digamma() -> Tensor
-
- See :func:`torch.digamma`
- """
- ...
- def digamma_(self) -> Tensor:
- r"""
- digamma_() -> Tensor
-
- In-place version of :meth:`~Tensor.digamma`
- """
- ...
- def dim(self) -> _int:
- r"""
- dim() -> int
-
- Returns the number of dimensions of :attr:`self` tensor.
- """
- ...
- def dist(self, other: Tensor, p: Union[Number, _complex] = 2) -> Tensor:
- r"""
- dist(other, p=2) -> Tensor
-
- See :func:`torch.dist`
- """
- ...
- def div(self, other: Union[Tensor, Number], *, rounding_mode: Optional[str] = None) -> Tensor:
- r"""
- div(value, *, rounding_mode=None) -> Tensor
-
- See :func:`torch.div`
- """
- ...
- def div_(self, other: Union[Tensor, Number], *, rounding_mode: Optional[str] = None) -> Tensor:
- r"""
- div_(value, *, rounding_mode=None) -> Tensor
-
- In-place version of :meth:`~Tensor.div`
- """
- ...
- @overload
- def divide(self, other: Tensor) -> Tensor:
- r"""
- divide(value, *, rounding_mode=None) -> Tensor
-
- See :func:`torch.divide`
- """
- ...
- @overload
- def divide(self, other: Tensor, *, rounding_mode: Optional[str]) -> Tensor:
- r"""
- divide(value, *, rounding_mode=None) -> Tensor
-
- See :func:`torch.divide`
- """
- ...
- @overload
- def divide(self, other: Union[Number, _complex], *, rounding_mode: Optional[str]) -> Tensor:
- r"""
- divide(value, *, rounding_mode=None) -> Tensor
-
- See :func:`torch.divide`
- """
- ...
- @overload
- def divide(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- divide(value, *, rounding_mode=None) -> Tensor
-
- See :func:`torch.divide`
- """
- ...
- @overload
- def divide_(self, other: Tensor) -> Tensor:
- r"""
- divide_(value, *, rounding_mode=None) -> Tensor
-
- In-place version of :meth:`~Tensor.divide`
- """
- ...
- @overload
- def divide_(self, other: Tensor, *, rounding_mode: Optional[str]) -> Tensor:
- r"""
- divide_(value, *, rounding_mode=None) -> Tensor
-
- In-place version of :meth:`~Tensor.divide`
- """
- ...
- @overload
- def divide_(self, other: Union[Number, _complex], *, rounding_mode: Optional[str]) -> Tensor:
- r"""
- divide_(value, *, rounding_mode=None) -> Tensor
-
- In-place version of :meth:`~Tensor.divide`
- """
- ...
- @overload
- def divide_(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- divide_(value, *, rounding_mode=None) -> Tensor
-
- In-place version of :meth:`~Tensor.divide`
- """
- ...
- def dot(self, tensor: Tensor) -> Tensor:
- r"""
- dot(other) -> Tensor
-
- See :func:`torch.dot`
- """
- ...
- def double(self) -> Tensor:
- r"""
- double(memory_format=torch.preserve_format) -> Tensor
-
- ``self.double()`` is equivalent to ``self.to(torch.float64)``. See :func:`to`.
-
- Args:
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned Tensor. Default: ``torch.preserve_format``.
- """
- ...
- @overload
- def dsplit(self, sections: _int) -> Tuple[Tensor, ...]:
- r"""
- dsplit(split_size_or_sections) -> List of Tensors
-
- See :func:`torch.dsplit`
- """
- ...
- @overload
- def dsplit(self, indices: _size) -> Tuple[Tensor, ...]:
- r"""
- dsplit(split_size_or_sections) -> List of Tensors
-
- See :func:`torch.dsplit`
- """
- ...
- @overload
- def dsplit(self, *indices: _int) -> Tuple[Tensor, ...]:
- r"""
- dsplit(split_size_or_sections) -> List of Tensors
-
- See :func:`torch.dsplit`
- """
- ...
- def element_size(self) -> _int:
- r"""
- element_size() -> int
-
- Returns the size in bytes of an individual element.
-
- Example::
-
- >>> torch.tensor([]).element_size()
- 4
- >>> torch.tensor([], dtype=torch.uint8).element_size()
- 1
- """
- ...
- @overload
- def eq(self, other: Tensor) -> Tensor:
- r"""
- eq(other) -> Tensor
-
- See :func:`torch.eq`
- """
- ...
- @overload
- def eq(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- eq(other) -> Tensor
-
- See :func:`torch.eq`
- """
- ...
- @overload
- def eq_(self, other: Tensor) -> Tensor:
- r"""
- eq_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.eq`
- """
- ...
- @overload
- def eq_(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- eq_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.eq`
- """
- ...
- def equal(self, other: Tensor) -> _bool:
- r"""
- equal(other) -> bool
-
- See :func:`torch.equal`
- """
- ...
- def erf(self) -> Tensor:
- r"""
- erf() -> Tensor
-
- See :func:`torch.erf`
- """
- ...
- def erf_(self) -> Tensor:
- r"""
- erf_() -> Tensor
-
- In-place version of :meth:`~Tensor.erf`
- """
- ...
- def erfc(self) -> Tensor:
- r"""
- erfc() -> Tensor
-
- See :func:`torch.erfc`
- """
- ...
- def erfc_(self) -> Tensor:
- r"""
- erfc_() -> Tensor
-
- In-place version of :meth:`~Tensor.erfc`
- """
- ...
- def erfinv(self) -> Tensor:
- r"""
- erfinv() -> Tensor
-
- See :func:`torch.erfinv`
- """
- ...
- def erfinv_(self) -> Tensor:
- r"""
- erfinv_() -> Tensor
-
- In-place version of :meth:`~Tensor.erfinv`
- """
- ...
- def exp(self) -> Tensor:
- r"""
- exp() -> Tensor
-
- See :func:`torch.exp`
- """
- ...
- def exp2(self) -> Tensor:
- r"""
- exp2() -> Tensor
-
- See :func:`torch.exp2`
- """
- ...
- def exp2_(self) -> Tensor:
- r"""
- exp2_() -> Tensor
-
- In-place version of :meth:`~Tensor.exp2`
- """
- ...
- def exp_(self) -> Tensor:
- r"""
- exp_() -> Tensor
-
- In-place version of :meth:`~Tensor.exp`
- """
- ...
- @overload
- def expand(self, size: Sequence[Union[_int, SymInt]], *, implicit: _bool = False) -> Tensor:
- r"""
- expand(*sizes) -> Tensor
-
- Returns a new view of the :attr:`self` tensor with singleton dimensions expanded
- to a larger size.
-
- Passing -1 as the size for a dimension means not changing the size of
- that dimension.
-
- Tensor can be also expanded to a larger number of dimensions, and the
- new ones will be appended at the front. For the new dimensions, the
- size cannot be set to -1.
-
- Expanding a tensor does not allocate new memory, but only creates a
- new view on the existing tensor where a dimension of size one is
- expanded to a larger size by setting the ``stride`` to 0. Any dimension
- of size 1 can be expanded to an arbitrary value without allocating new
- memory.
-
- Args:
- *sizes (torch.Size or int...): the desired expanded size
-
- .. warning::
-
- More than one element of an expanded tensor may refer to a single
- memory location. As a result, in-place operations (especially ones that
- are vectorized) may result in incorrect behavior. If you need to write
- to the tensors, please clone them first.
-
- Example::
-
- >>> x = torch.tensor([[1], [2], [3]])
- >>> x.size()
- torch.Size([3, 1])
- >>> x.expand(3, 4)
- tensor([[ 1, 1, 1, 1],
- [ 2, 2, 2, 2],
- [ 3, 3, 3, 3]])
- >>> x.expand(-1, 4) # -1 means not changing the size of that dimension
- tensor([[ 1, 1, 1, 1],
- [ 2, 2, 2, 2],
- [ 3, 3, 3, 3]])
- """
- ...
- @overload
- def expand(self, *size: _int, implicit: _bool = False) -> Tensor:
- r"""
- expand(*sizes) -> Tensor
-
- Returns a new view of the :attr:`self` tensor with singleton dimensions expanded
- to a larger size.
-
- Passing -1 as the size for a dimension means not changing the size of
- that dimension.
-
- Tensor can be also expanded to a larger number of dimensions, and the
- new ones will be appended at the front. For the new dimensions, the
- size cannot be set to -1.
-
- Expanding a tensor does not allocate new memory, but only creates a
- new view on the existing tensor where a dimension of size one is
- expanded to a larger size by setting the ``stride`` to 0. Any dimension
- of size 1 can be expanded to an arbitrary value without allocating new
- memory.
-
- Args:
- *sizes (torch.Size or int...): the desired expanded size
-
- .. warning::
-
- More than one element of an expanded tensor may refer to a single
- memory location. As a result, in-place operations (especially ones that
- are vectorized) may result in incorrect behavior. If you need to write
- to the tensors, please clone them first.
-
- Example::
-
- >>> x = torch.tensor([[1], [2], [3]])
- >>> x.size()
- torch.Size([3, 1])
- >>> x.expand(3, 4)
- tensor([[ 1, 1, 1, 1],
- [ 2, 2, 2, 2],
- [ 3, 3, 3, 3]])
- >>> x.expand(-1, 4) # -1 means not changing the size of that dimension
- tensor([[ 1, 1, 1, 1],
- [ 2, 2, 2, 2],
- [ 3, 3, 3, 3]])
- """
- ...
- def expand_as(self, other: Tensor) -> Tensor:
- r"""
- expand_as(other) -> Tensor
-
- Expand this tensor to the same size as :attr:`other`.
- ``self.expand_as(other)`` is equivalent to ``self.expand(other.size())``.
-
- Please see :meth:`~Tensor.expand` for more information about ``expand``.
-
- Args:
- other (:class:`torch.Tensor`): The result tensor has the same size
- as :attr:`other`.
- """
- ...
- def expm1(self) -> Tensor:
- r"""
- expm1() -> Tensor
-
- See :func:`torch.expm1`
- """
- ...
- def expm1_(self) -> Tensor:
- r"""
- expm1_() -> Tensor
-
- In-place version of :meth:`~Tensor.expm1`
- """
- ...
- def exponential_(self, lambd: _float = 1, *, generator: Optional[Generator] = None) -> Tensor:
- r"""
- exponential_(lambd=1, *, generator=None) -> Tensor
-
- Fills :attr:`self` tensor with elements drawn from the PDF (probability density function):
-
- .. math::
-
- f(x) = \lambda e^{-\lambda x}, x > 0
-
- .. note::
- In probability theory, exponential distribution is supported on interval [0, :math:`\inf`) (i.e., :math:`x >= 0`)
- implying that zero can be sampled from the exponential distribution.
- However, :func:`torch.Tensor.exponential_` does not sample zero,
- which means that its actual support is the interval (0, :math:`\inf`).
-
- Note that :func:`torch.distributions.exponential.Exponential` is supported on the interval [0, :math:`\inf`) and can sample zero.
- """
- ...
- @overload
- def fill_(self, value: Tensor) -> Tensor:
- r"""
- fill_(value) -> Tensor
-
- Fills :attr:`self` tensor with the specified value.
- """
- ...
- @overload
- def fill_(self, value: Union[Number, _complex]) -> Tensor:
- r"""
- fill_(value) -> Tensor
-
- Fills :attr:`self` tensor with the specified value.
- """
- ...
- def fill_diagonal_(self, fill_value: Union[Number, _complex], wrap: _bool = False) -> Tensor:
- r"""
- fill_diagonal_(fill_value, wrap=False) -> Tensor
-
- Fill the main diagonal of a tensor that has at least 2-dimensions.
- When dims>2, all dimensions of input must be of equal length.
- This function modifies the input tensor in-place, and returns the input tensor.
-
- Arguments:
- fill_value (Scalar): the fill value
- wrap (bool): the diagonal 'wrapped' after N columns for tall matrices.
-
- Example::
-
- >>> a = torch.zeros(3, 3)
- >>> a.fill_diagonal_(5)
- tensor([[5., 0., 0.],
- [0., 5., 0.],
- [0., 0., 5.]])
- >>> b = torch.zeros(7, 3)
- >>> b.fill_diagonal_(5)
- tensor([[5., 0., 0.],
- [0., 5., 0.],
- [0., 0., 5.],
- [0., 0., 0.],
- [0., 0., 0.],
- [0., 0., 0.],
- [0., 0., 0.]])
- >>> c = torch.zeros(7, 3)
- >>> c.fill_diagonal_(5, wrap=True)
- tensor([[5., 0., 0.],
- [0., 5., 0.],
- [0., 0., 5.],
- [0., 0., 0.],
- [5., 0., 0.],
- [0., 5., 0.],
- [0., 0., 5.]])
- """
- ...
- def fix(self) -> Tensor:
- r"""
- fix() -> Tensor
-
- See :func:`torch.fix`.
- """
- ...
- def fix_(self) -> Tensor:
- r"""
- fix_() -> Tensor
-
- In-place version of :meth:`~Tensor.fix`
- """
- ...
- @overload
- def flatten(self, start_dim: _int = 0, end_dim: _int = -1) -> Tensor:
- r"""
- flatten(start_dim=0, end_dim=-1) -> Tensor
-
- See :func:`torch.flatten`
- """
- ...
- @overload
- def flatten(self, start_dim: _int, end_dim: _int, out_dim: Union[str, ellipsis, None]) -> Tensor:
- r"""
- flatten(start_dim=0, end_dim=-1) -> Tensor
-
- See :func:`torch.flatten`
- """
- ...
- @overload
- def flatten(self, start_dim: Union[str, ellipsis, None], end_dim: Union[str, ellipsis, None], out_dim: Union[str, ellipsis, None]) -> Tensor:
- r"""
- flatten(start_dim=0, end_dim=-1) -> Tensor
-
- See :func:`torch.flatten`
- """
- ...
- @overload
- def flatten(self, dims: Sequence[Union[str, ellipsis, None]], out_dim: Union[str, ellipsis, None]) -> Tensor:
- r"""
- flatten(start_dim=0, end_dim=-1) -> Tensor
-
- See :func:`torch.flatten`
- """
- ...
- @overload
- def flip(self, dims: _size) -> Tensor:
- r"""
- flip(dims) -> Tensor
-
- See :func:`torch.flip`
- """
- ...
- @overload
- def flip(self, *dims: _int) -> Tensor:
- r"""
- flip(dims) -> Tensor
-
- See :func:`torch.flip`
- """
- ...
- def fliplr(self) -> Tensor:
- r"""
- fliplr() -> Tensor
-
- See :func:`torch.fliplr`
- """
- ...
- def flipud(self) -> Tensor:
- r"""
- flipud() -> Tensor
-
- See :func:`torch.flipud`
- """
- ...
- def float(self) -> Tensor:
- r"""
- float(memory_format=torch.preserve_format) -> Tensor
-
- ``self.float()`` is equivalent to ``self.to(torch.float32)``. See :func:`to`.
-
- Args:
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned Tensor. Default: ``torch.preserve_format``.
- """
- ...
- @overload
- def float_power(self, exponent: Tensor) -> Tensor:
- r"""
- float_power(exponent) -> Tensor
-
- See :func:`torch.float_power`
- """
- ...
- @overload
- def float_power(self, exponent: Union[Number, _complex]) -> Tensor:
- r"""
- float_power(exponent) -> Tensor
-
- See :func:`torch.float_power`
- """
- ...
- @overload
- def float_power_(self, exponent: Tensor) -> Tensor:
- r"""
- float_power_(exponent) -> Tensor
-
- In-place version of :meth:`~Tensor.float_power`
- """
- ...
- @overload
- def float_power_(self, exponent: Union[Number, _complex]) -> Tensor:
- r"""
- float_power_(exponent) -> Tensor
-
- In-place version of :meth:`~Tensor.float_power`
- """
- ...
- def floor(self) -> Tensor:
- r"""
- floor() -> Tensor
-
- See :func:`torch.floor`
- """
- ...
- def floor_(self) -> Tensor:
- r"""
- floor_() -> Tensor
-
- In-place version of :meth:`~Tensor.floor`
- """
- ...
- def floor_divide(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- floor_divide(value) -> Tensor
-
- See :func:`torch.floor_divide`
- """
- ...
- def floor_divide_(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat]) -> Tensor:
- r"""
- floor_divide_(value) -> Tensor
-
- In-place version of :meth:`~Tensor.floor_divide`
- """
- ...
- def fmax(self, other: Tensor) -> Tensor:
- r"""
- fmax(other) -> Tensor
-
- See :func:`torch.fmax`
- """
- ...
- def fmin(self, other: Tensor) -> Tensor:
- r"""
- fmin(other) -> Tensor
-
- See :func:`torch.fmin`
- """
- ...
- @overload
- def fmod(self, other: Tensor) -> Tensor:
- r"""
- fmod(divisor) -> Tensor
-
- See :func:`torch.fmod`
- """
- ...
- @overload
- def fmod(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- fmod(divisor) -> Tensor
-
- See :func:`torch.fmod`
- """
- ...
- @overload
- def fmod_(self, other: Tensor) -> Tensor:
- r"""
- fmod_(divisor) -> Tensor
-
- In-place version of :meth:`~Tensor.fmod`
- """
- ...
- @overload
- def fmod_(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- fmod_(divisor) -> Tensor
-
- In-place version of :meth:`~Tensor.fmod`
- """
- ...
- def frac(self) -> Tensor:
- r"""
- frac() -> Tensor
-
- See :func:`torch.frac`
- """
- ...
- def frac_(self) -> Tensor:
- r"""
- frac_() -> Tensor
-
- In-place version of :meth:`~Tensor.frac`
- """
- ...
- def frexp(self) -> torch.return_types.frexp:
- r"""
- frexp(input) -> (Tensor mantissa, Tensor exponent)
-
- See :func:`torch.frexp`
- """
- ...
- @overload
- def gather(self, dim: _int, index: Tensor, *, sparse_grad: _bool = False) -> Tensor:
- r"""
- gather(dim, index) -> Tensor
-
- See :func:`torch.gather`
- """
- ...
- @overload
- def gather(self, dim: Union[str, ellipsis, None], index: Tensor, *, sparse_grad: _bool = False) -> Tensor:
- r"""
- gather(dim, index) -> Tensor
-
- See :func:`torch.gather`
- """
- ...
- def gcd(self, other: Tensor) -> Tensor:
- r"""
- gcd(other) -> Tensor
-
- See :func:`torch.gcd`
- """
- ...
- def gcd_(self, other: Tensor) -> Tensor:
- r"""
- gcd_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.gcd`
- """
- ...
- @overload
- def ge(self, other: Tensor) -> Tensor:
- r"""
- ge(other) -> Tensor
-
- See :func:`torch.ge`.
- """
- ...
- @overload
- def ge(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- ge(other) -> Tensor
-
- See :func:`torch.ge`.
- """
- ...
- @overload
- def ge_(self, other: Tensor) -> Tensor:
- r"""
- ge_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.ge`.
- """
- ...
- @overload
- def ge_(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- ge_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.ge`.
- """
- ...
- def geometric_(self, p: _float, *, generator: Optional[Generator] = None) -> Tensor:
- r"""
- geometric_(p, *, generator=None) -> Tensor
-
- Fills :attr:`self` tensor with elements drawn from the geometric distribution:
-
- .. math::
-
- P(X=k) = (1 - p)^{k - 1} p, k = 1, 2, ...
-
- .. note::
- :func:`torch.Tensor.geometric_` `k`-th trial is the first success hence draws samples in :math:`\{1, 2, \ldots\}`, whereas
- :func:`torch.distributions.geometric.Geometric` :math:`(k+1)`-th trial is the first success
- hence draws samples in :math:`\{0, 1, \ldots\}`.
- """
- ...
- def geqrf(self) -> torch.return_types.geqrf:
- r"""
- geqrf() -> (Tensor, Tensor)
-
- See :func:`torch.geqrf`
- """
- ...
- def ger(self, vec2: Tensor) -> Tensor:
- r"""
- ger(vec2) -> Tensor
-
- See :func:`torch.ger`
- """
- ...
- def get_device(self) -> _int:
- r"""
- get_device() -> Device ordinal (Integer)
-
- For CUDA tensors, this function returns the device ordinal of the GPU on which the tensor resides.
- For CPU tensors, this function returns `-1`.
-
- Example::
-
- >>> x = torch.randn(3, 4, 5, device='cuda:0')
- >>> x.get_device()
- 0
- >>> x.cpu().get_device()
- -1
- """
- ...
- @overload
- def greater(self, other: Tensor) -> Tensor:
- r"""
- greater(other) -> Tensor
-
- See :func:`torch.greater`.
- """
- ...
- @overload
- def greater(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- greater(other) -> Tensor
-
- See :func:`torch.greater`.
- """
- ...
- @overload
- def greater_(self, other: Tensor) -> Tensor:
- r"""
- greater_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.greater`.
- """
- ...
- @overload
- def greater_(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- greater_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.greater`.
- """
- ...
- @overload
- def greater_equal(self, other: Tensor) -> Tensor:
- r"""
- greater_equal(other) -> Tensor
-
- See :func:`torch.greater_equal`.
- """
- ...
- @overload
- def greater_equal(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- greater_equal(other) -> Tensor
-
- See :func:`torch.greater_equal`.
- """
- ...
- @overload
- def greater_equal_(self, other: Tensor) -> Tensor:
- r"""
- greater_equal_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.greater_equal`.
- """
- ...
- @overload
- def greater_equal_(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- greater_equal_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.greater_equal`.
- """
- ...
- @overload
- def gt(self, other: Tensor) -> Tensor:
- r"""
- gt(other) -> Tensor
-
- See :func:`torch.gt`.
- """
- ...
- @overload
- def gt(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- gt(other) -> Tensor
-
- See :func:`torch.gt`.
- """
- ...
- @overload
- def gt_(self, other: Tensor) -> Tensor:
- r"""
- gt_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.gt`.
- """
- ...
- @overload
- def gt_(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- gt_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.gt`.
- """
- ...
- def half(self) -> Tensor:
- r"""
- half(memory_format=torch.preserve_format) -> Tensor
-
- ``self.half()`` is equivalent to ``self.to(torch.float16)``. See :func:`to`.
-
- Args:
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned Tensor. Default: ``torch.preserve_format``.
- """
- ...
- def hardshrink(self, lambd: Union[Number, _complex] = 0.5) -> Tensor:
- r"""
- hardshrink(lambd=0.5) -> Tensor
-
- See :func:`torch.nn.functional.hardshrink`
- """
- ...
- def has_names(self) -> _bool:
- r"""
- Is ``True`` if any of this tensor's dimensions are named. Otherwise, is ``False``.
- """
- ...
- def heaviside(self, values: Tensor) -> Tensor:
- r"""
- heaviside(values) -> Tensor
-
- See :func:`torch.heaviside`
- """
- ...
- def heaviside_(self, values: Tensor) -> Tensor:
- r"""
- heaviside_(values) -> Tensor
-
- In-place version of :meth:`~Tensor.heaviside`
- """
- ...
- def histc(self, bins: _int = 100, min: Union[Number, _complex] = 0, max: Union[Number, _complex] = 0) -> Tensor:
- r"""
- histc(bins=100, min=0, max=0) -> Tensor
-
- See :func:`torch.histc`
- """
- ...
- @overload
- def histogram(self, bins: Tensor, *, weight: Optional[Tensor] = None, density: _bool = False) -> torch.return_types.histogram:
- r"""
- histogram(input, bins, *, range=None, weight=None, density=False) -> (Tensor, Tensor)
-
- See :func:`torch.histogram`
- """
- ...
- @overload
- def histogram(self, bins: _int = 100, *, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> torch.return_types.histogram:
- r"""
- histogram(input, bins, *, range=None, weight=None, density=False) -> (Tensor, Tensor)
-
- See :func:`torch.histogram`
- """
- ...
- @overload
- def hsplit(self, sections: _int) -> Tuple[Tensor, ...]:
- r"""
- hsplit(split_size_or_sections) -> List of Tensors
-
- See :func:`torch.hsplit`
- """
- ...
- @overload
- def hsplit(self, indices: _size) -> Tuple[Tensor, ...]:
- r"""
- hsplit(split_size_or_sections) -> List of Tensors
-
- See :func:`torch.hsplit`
- """
- ...
- @overload
- def hsplit(self, *indices: _int) -> Tuple[Tensor, ...]:
- r"""
- hsplit(split_size_or_sections) -> List of Tensors
-
- See :func:`torch.hsplit`
- """
- ...
- def hypot(self, other: Tensor) -> Tensor:
- r"""
- hypot(other) -> Tensor
-
- See :func:`torch.hypot`
- """
- ...
- def hypot_(self, other: Tensor) -> Tensor:
- r"""
- hypot_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.hypot`
- """
- ...
- def i0(self) -> Tensor:
- r"""
- i0() -> Tensor
-
- See :func:`torch.i0`
- """
- ...
- def i0_(self) -> Tensor:
- r"""
- i0_() -> Tensor
-
- In-place version of :meth:`~Tensor.i0`
- """
- ...
- def igamma(self, other: Tensor) -> Tensor:
- r"""
- igamma(other) -> Tensor
-
- See :func:`torch.igamma`
- """
- ...
- def igamma_(self, other: Tensor) -> Tensor:
- r"""
- igamma_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.igamma`
- """
- ...
- def igammac(self, other: Tensor) -> Tensor:
- r"""
- igammac(other) -> Tensor
- See :func:`torch.igammac`
- """
- ...
- def igammac_(self, other: Tensor) -> Tensor:
- r"""
- igammac_(other) -> Tensor
- In-place version of :meth:`~Tensor.igammac`
- """
- ...
- @overload
- def index_add(self, dim: _int, index: Tensor, source: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor:
- r"""
- index_add(dim, index, source, *, alpha=1) -> Tensor
-
- Out-of-place version of :meth:`torch.Tensor.index_add_`.
- """
- ...
- @overload
- def index_add(self, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor:
- r"""
- index_add(dim, index, source, *, alpha=1) -> Tensor
-
- Out-of-place version of :meth:`torch.Tensor.index_add_`.
- """
- ...
- def index_add_(self, dim: _int, index: Tensor, source: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor:
- r"""
- index_add_(dim, index, source, *, alpha=1) -> Tensor
-
- Accumulate the elements of :attr:`alpha` times ``source`` into the :attr:`self`
- tensor by adding to the indices in the order given in :attr:`index`. For example,
- if ``dim == 0``, ``index[i] == j``, and ``alpha=-1``, then the ``i``\ th row of
- ``source`` is subtracted from the ``j``\ th row of :attr:`self`.
-
- The :attr:`dim`\ th dimension of ``source`` must have the same size as the
- length of :attr:`index` (which must be a vector), and all other dimensions must
- match :attr:`self`, or an error will be raised.
-
- For a 3-D tensor the output is given as::
-
- self[index[i], :, :] += alpha * src[i, :, :] # if dim == 0
- self[:, index[i], :] += alpha * src[:, i, :] # if dim == 1
- self[:, :, index[i]] += alpha * src[:, :, i] # if dim == 2
-
- Note:
- This operation may behave nondeterministically when given tensors on a CUDA device. See :doc:`/notes/randomness` for more information.
-
- Args:
- dim (int): dimension along which to index
- index (Tensor): indices of ``source`` to select from,
- should have dtype either `torch.int64` or `torch.int32`
- source (Tensor): the tensor containing values to add
-
- Keyword args:
- alpha (Number): the scalar multiplier for ``source``
-
- Example::
-
- >>> x = torch.ones(5, 3)
- >>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
- >>> index = torch.tensor([0, 4, 2])
- >>> x.index_add_(0, index, t)
- tensor([[ 2., 3., 4.],
- [ 1., 1., 1.],
- [ 8., 9., 10.],
- [ 1., 1., 1.],
- [ 5., 6., 7.]])
- >>> x.index_add_(0, index, t, alpha=-1)
- tensor([[ 1., 1., 1.],
- [ 1., 1., 1.],
- [ 1., 1., 1.],
- [ 1., 1., 1.],
- [ 1., 1., 1.]])
- """
- ...
- @overload
- def index_copy(self, dim: _int, index: Tensor, source: Tensor) -> Tensor:
- r"""
- index_copy(dim, index, tensor2) -> Tensor
-
- Out-of-place version of :meth:`torch.Tensor.index_copy_`.
- """
- ...
- @overload
- def index_copy(self, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor) -> Tensor:
- r"""
- index_copy(dim, index, tensor2) -> Tensor
-
- Out-of-place version of :meth:`torch.Tensor.index_copy_`.
- """
- ...
- @overload
- def index_copy_(self, dim: _int, index: Tensor, source: Tensor) -> Tensor:
- r"""
- index_copy_(dim, index, tensor) -> Tensor
-
- Copies the elements of :attr:`tensor` into the :attr:`self` tensor by selecting
- the indices in the order given in :attr:`index`. For example, if ``dim == 0``
- and ``index[i] == j``, then the ``i``\ th row of :attr:`tensor` is copied to the
- ``j``\ th row of :attr:`self`.
-
- The :attr:`dim`\ th dimension of :attr:`tensor` must have the same size as the
- length of :attr:`index` (which must be a vector), and all other dimensions must
- match :attr:`self`, or an error will be raised.
-
- .. note::
- If :attr:`index` contains duplicate entries, multiple elements from
- :attr:`tensor` will be copied to the same index of :attr:`self`. The result
- is nondeterministic since it depends on which copy occurs last.
-
- Args:
- dim (int): dimension along which to index
- index (LongTensor): indices of :attr:`tensor` to select from
- tensor (Tensor): the tensor containing values to copy
-
- Example::
-
- >>> x = torch.zeros(5, 3)
- >>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
- >>> index = torch.tensor([0, 4, 2])
- >>> x.index_copy_(0, index, t)
- tensor([[ 1., 2., 3.],
- [ 0., 0., 0.],
- [ 7., 8., 9.],
- [ 0., 0., 0.],
- [ 4., 5., 6.]])
- """
- ...
- @overload
- def index_copy_(self, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor) -> Tensor:
- r"""
- index_copy_(dim, index, tensor) -> Tensor
-
- Copies the elements of :attr:`tensor` into the :attr:`self` tensor by selecting
- the indices in the order given in :attr:`index`. For example, if ``dim == 0``
- and ``index[i] == j``, then the ``i``\ th row of :attr:`tensor` is copied to the
- ``j``\ th row of :attr:`self`.
-
- The :attr:`dim`\ th dimension of :attr:`tensor` must have the same size as the
- length of :attr:`index` (which must be a vector), and all other dimensions must
- match :attr:`self`, or an error will be raised.
-
- .. note::
- If :attr:`index` contains duplicate entries, multiple elements from
- :attr:`tensor` will be copied to the same index of :attr:`self`. The result
- is nondeterministic since it depends on which copy occurs last.
-
- Args:
- dim (int): dimension along which to index
- index (LongTensor): indices of :attr:`tensor` to select from
- tensor (Tensor): the tensor containing values to copy
-
- Example::
-
- >>> x = torch.zeros(5, 3)
- >>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
- >>> index = torch.tensor([0, 4, 2])
- >>> x.index_copy_(0, index, t)
- tensor([[ 1., 2., 3.],
- [ 0., 0., 0.],
- [ 7., 8., 9.],
- [ 0., 0., 0.],
- [ 4., 5., 6.]])
- """
- ...
- @overload
- def index_fill(self, dim: _int, index: Tensor, value: Tensor) -> Tensor:
- r"""
- index_fill(dim, index, value) -> Tensor
-
- Out-of-place version of :meth:`torch.Tensor.index_fill_`.
- """
- ...
- @overload
- def index_fill(self, dim: Union[str, ellipsis, None], index: Tensor, value: Tensor) -> Tensor:
- r"""
- index_fill(dim, index, value) -> Tensor
-
- Out-of-place version of :meth:`torch.Tensor.index_fill_`.
- """
- ...
- @overload
- def index_fill(self, dim: _int, index: Tensor, value: Union[Number, _complex]) -> Tensor:
- r"""
- index_fill(dim, index, value) -> Tensor
-
- Out-of-place version of :meth:`torch.Tensor.index_fill_`.
- """
- ...
- @overload
- def index_fill(self, dim: Union[str, ellipsis, None], index: Tensor, value: Union[Number, _complex]) -> Tensor:
- r"""
- index_fill(dim, index, value) -> Tensor
-
- Out-of-place version of :meth:`torch.Tensor.index_fill_`.
- """
- ...
- @overload
- def index_fill_(self, dim: _int, index: Tensor, value: Tensor) -> Tensor:
- r"""
- index_fill_(dim, index, value) -> Tensor
-
- Fills the elements of the :attr:`self` tensor with value :attr:`value` by
- selecting the indices in the order given in :attr:`index`.
-
- Args:
- dim (int): dimension along which to index
- index (LongTensor): indices of :attr:`self` tensor to fill in
- value (float): the value to fill with
-
- Example::
- >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
- >>> index = torch.tensor([0, 2])
- >>> x.index_fill_(1, index, -1)
- tensor([[-1., 2., -1.],
- [-1., 5., -1.],
- [-1., 8., -1.]])
- """
- ...
- @overload
- def index_fill_(self, dim: Union[str, ellipsis, None], index: Tensor, value: Tensor) -> Tensor:
- r"""
- index_fill_(dim, index, value) -> Tensor
-
- Fills the elements of the :attr:`self` tensor with value :attr:`value` by
- selecting the indices in the order given in :attr:`index`.
-
- Args:
- dim (int): dimension along which to index
- index (LongTensor): indices of :attr:`self` tensor to fill in
- value (float): the value to fill with
-
- Example::
- >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
- >>> index = torch.tensor([0, 2])
- >>> x.index_fill_(1, index, -1)
- tensor([[-1., 2., -1.],
- [-1., 5., -1.],
- [-1., 8., -1.]])
- """
- ...
- @overload
- def index_fill_(self, dim: _int, index: Tensor, value: Union[Number, _complex]) -> Tensor:
- r"""
- index_fill_(dim, index, value) -> Tensor
-
- Fills the elements of the :attr:`self` tensor with value :attr:`value` by
- selecting the indices in the order given in :attr:`index`.
-
- Args:
- dim (int): dimension along which to index
- index (LongTensor): indices of :attr:`self` tensor to fill in
- value (float): the value to fill with
-
- Example::
- >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
- >>> index = torch.tensor([0, 2])
- >>> x.index_fill_(1, index, -1)
- tensor([[-1., 2., -1.],
- [-1., 5., -1.],
- [-1., 8., -1.]])
- """
- ...
- @overload
- def index_fill_(self, dim: Union[str, ellipsis, None], index: Tensor, value: Union[Number, _complex]) -> Tensor:
- r"""
- index_fill_(dim, index, value) -> Tensor
-
- Fills the elements of the :attr:`self` tensor with value :attr:`value` by
- selecting the indices in the order given in :attr:`index`.
-
- Args:
- dim (int): dimension along which to index
- index (LongTensor): indices of :attr:`self` tensor to fill in
- value (float): the value to fill with
-
- Example::
- >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
- >>> index = torch.tensor([0, 2])
- >>> x.index_fill_(1, index, -1)
- tensor([[-1., 2., -1.],
- [-1., 5., -1.],
- [-1., 8., -1.]])
- """
- ...
- def index_put(self, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False) -> Tensor:
- r"""
- index_put(indices, values, accumulate=False) -> Tensor
-
- Out-place version of :meth:`~Tensor.index_put_`.
- """
- ...
- def index_put_(self, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False) -> Tensor:
- r"""
- index_put_(indices, values, accumulate=False) -> Tensor
-
- Puts values from the tensor :attr:`values` into the tensor :attr:`self` using
- the indices specified in :attr:`indices` (which is a tuple of Tensors). The
- expression ``tensor.index_put_(indices, values)`` is equivalent to
- ``tensor[indices] = values``. Returns :attr:`self`.
-
- If :attr:`accumulate` is ``True``, the elements in :attr:`values` are added to
- :attr:`self`. If accumulate is ``False``, the behavior is undefined if indices
- contain duplicate elements.
-
- Args:
- indices (tuple of LongTensor): tensors used to index into `self`.
- values (Tensor): tensor of same dtype as `self`.
- accumulate (bool): whether to accumulate into self
- """
- ...
- def index_reduce(self, dim: _int, index: Tensor, source: Tensor, reduce: str, *, include_self: _bool = True) -> Tensor: ...
- def index_reduce_(self, dim: _int, index: Tensor, source: Tensor, reduce: str, *, include_self: _bool = True) -> Tensor:
- r"""
- index_reduce_(dim, index, source, reduce, *, include_self=True) -> Tensor
-
- Accumulate the elements of ``source`` into the :attr:`self`
- tensor by accumulating to the indices in the order given in :attr:`index`
- using the reduction given by the ``reduce`` argument. For example, if ``dim == 0``,
- ``index[i] == j``, ``reduce == prod`` and ``include_self == True`` then the ``i``\ th
- row of ``source`` is multiplied by the ``j``\ th row of :attr:`self`. If
- :obj:`include_self="True"`, the values in the :attr:`self` tensor are included
- in the reduction, otherwise, rows in the :attr:`self` tensor that are accumulated
- to are treated as if they were filled with the reduction identites.
-
- The :attr:`dim`\ th dimension of ``source`` must have the same size as the
- length of :attr:`index` (which must be a vector), and all other dimensions must
- match :attr:`self`, or an error will be raised.
-
- For a 3-D tensor with :obj:`reduce="prod"` and :obj:`include_self=True` the
- output is given as::
-
- self[index[i], :, :] *= src[i, :, :] # if dim == 0
- self[:, index[i], :] *= src[:, i, :] # if dim == 1
- self[:, :, index[i]] *= src[:, :, i] # if dim == 2
-
- Note:
- This operation may behave nondeterministically when given tensors on a CUDA device. See :doc:`/notes/randomness` for more information.
-
- .. note::
-
- This function only supports floating point tensors.
-
- .. warning::
-
- This function is in beta and may change in the near future.
-
- Args:
- dim (int): dimension along which to index
- index (Tensor): indices of ``source`` to select from,
- should have dtype either `torch.int64` or `torch.int32`
- source (FloatTensor): the tensor containing values to accumulate
- reduce (str): the reduction operation to apply
- (:obj:`"prod"`, :obj:`"mean"`, :obj:`"amax"`, :obj:`"amin"`)
-
- Keyword args:
- include_self (bool): whether the elements from the ``self`` tensor are
- included in the reduction
-
- Example::
-
- >>> x = torch.empty(5, 3).fill_(2)
- >>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]], dtype=torch.float)
- >>> index = torch.tensor([0, 4, 2, 0])
- >>> x.index_reduce_(0, index, t, 'prod')
- tensor([[20., 44., 72.],
- [ 2., 2., 2.],
- [14., 16., 18.],
- [ 2., 2., 2.],
- [ 8., 10., 12.]])
- >>> x = torch.empty(5, 3).fill_(2)
- >>> x.index_reduce_(0, index, t, 'prod', include_self=False)
- tensor([[10., 22., 36.],
- [ 2., 2., 2.],
- [ 7., 8., 9.],
- [ 2., 2., 2.],
- [ 4., 5., 6.]])
- """
- ...
- @overload
- def index_select(self, dim: _int, index: Tensor) -> Tensor:
- r"""
- index_select(dim, index) -> Tensor
-
- See :func:`torch.index_select`
- """
- ...
- @overload
- def index_select(self, dim: Union[str, ellipsis, None], index: Tensor) -> Tensor:
- r"""
- index_select(dim, index) -> Tensor
-
- See :func:`torch.index_select`
- """
- ...
- def indices(self) -> Tensor:
- r"""
- indices() -> Tensor
-
- Return the indices tensor of a :ref:`sparse COO tensor <sparse-coo-docs>`.
-
- .. warning::
- Throws an error if :attr:`self` is not a sparse COO tensor.
-
- See also :meth:`Tensor.values`.
-
- .. note::
- This method can only be called on a coalesced sparse tensor. See
- :meth:`Tensor.coalesce` for details.
- """
- ...
- def inner(self, other: Tensor) -> Tensor:
- r"""
- inner(other) -> Tensor
-
- See :func:`torch.inner`.
- """
- ...
- def int(self) -> Tensor:
- r"""
- int(memory_format=torch.preserve_format) -> Tensor
-
- ``self.int()`` is equivalent to ``self.to(torch.int32)``. See :func:`to`.
-
- Args:
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned Tensor. Default: ``torch.preserve_format``.
- """
- ...
- def int_repr(self) -> Tensor:
- r"""
- int_repr() -> Tensor
-
- Given a quantized Tensor,
- ``self.int_repr()`` returns a CPU Tensor with uint8_t as data type that stores the
- underlying uint8_t values of the given Tensor.
- """
- ...
- def inverse(self) -> Tensor:
- r"""
- inverse() -> Tensor
-
- See :func:`torch.inverse`
- """
- ...
- def is_coalesced(self) -> _bool:
- r"""
- is_coalesced() -> bool
-
- Returns ``True`` if :attr:`self` is a :ref:`sparse COO tensor
- <sparse-coo-docs>` that is coalesced, ``False`` otherwise.
-
- .. warning::
- Throws an error if :attr:`self` is not a sparse COO tensor.
-
- See :meth:`coalesce` and :ref:`uncoalesced tensors <sparse-uncoalesced-coo-docs>`.
- """
- ...
- def is_complex(self) -> _bool:
- r"""
- is_complex() -> bool
-
- Returns True if the data type of :attr:`self` is a complex data type.
- """
- ...
- def is_conj(self) -> _bool:
- r"""
- is_conj() -> bool
-
- Returns True if the conjugate bit of :attr:`self` is set to true.
- """
- ...
- def is_contiguous(self, memory_format=torch.contiguous_format) -> _bool:
- r"""
- is_contiguous(memory_format=torch.contiguous_format) -> bool
-
- Returns True if :attr:`self` tensor is contiguous in memory in the order specified
- by memory format.
-
- Args:
- memory_format (:class:`torch.memory_format`, optional): Specifies memory allocation
- order. Default: ``torch.contiguous_format``.
- """
- ...
- is_cpu: _bool
- r"""Is ``True`` if the Tensor is stored on the CPU, ``False`` otherwise."""
- is_cuda: _bool
- r"""Is ``True`` if the Tensor is stored on the GPU, ``False`` otherwise."""
- def is_distributed(self) -> _bool: ...
- def is_floating_point(self) -> _bool:
- r"""
- is_floating_point() -> bool
-
- Returns True if the data type of :attr:`self` is a floating point data type.
- """
- ...
- def is_inference(self) -> _bool:
- r"""
- is_inference() -> bool
-
- See :func:`torch.is_inference`
- """
- ...
- is_ipu: _bool
- r"""Is ``True`` if the Tensor is stored on the IPU, ``False`` otherwise."""
- is_leaf: _bool
- r"""All Tensors that have :attr:`requires_grad` which is ``False`` will be leaf Tensors by convention.
-
- For Tensors that have :attr:`requires_grad` which is ``True``, they will be leaf Tensors if they were
- created by the user. This means that they are not the result of an operation and so
- :attr:`grad_fn` is None.
-
- Only leaf Tensors will have their :attr:`grad` populated during a call to :func:`backward`.
- To get :attr:`grad` populated for non-leaf Tensors, you can use :func:`retain_grad`.
-
- Example::
-
- >>> a = torch.rand(10, requires_grad=True)
- >>> a.is_leaf
- True
- >>> b = torch.rand(10, requires_grad=True).cuda()
- >>> b.is_leaf
- False
- # b was created by the operation that cast a cpu Tensor into a cuda Tensor
- >>> c = torch.rand(10, requires_grad=True) + 2
- >>> c.is_leaf
- False
- # c was created by the addition operation
- >>> d = torch.rand(10).cuda()
- >>> d.is_leaf
- True
- # d does not require gradients and so has no operation creating it (that is tracked by the autograd engine)
- >>> e = torch.rand(10).cuda().requires_grad_()
- >>> e.is_leaf
- True
- # e requires gradients and has no operations creating it
- >>> f = torch.rand(10, requires_grad=True, device="cuda")
- >>> f.is_leaf
- True
- # f requires grad, has no operation creating it"""
- is_maia: _bool
- is_meta: _bool
- r"""Is ``True`` if the Tensor is a meta tensor, ``False`` otherwise. Meta tensors
- are like normal tensors, but they carry no data."""
- is_mkldnn: _bool
- is_mps: _bool
- r"""Is ``True`` if the Tensor is stored on the MPS device, ``False`` otherwise."""
- is_mtia: _bool
- def is_neg(self) -> _bool:
- r"""
- is_neg() -> bool
-
- Returns True if the negative bit of :attr:`self` is set to true.
- """
- ...
- is_nested: _bool
- def is_nonzero(self) -> _bool: ...
- def is_pinned(self, device: Optional[Optional[DeviceLikeType]] = None) -> _bool:
- r"""
- Returns true if this tensor resides in pinned memory.
- """
- ...
- is_quantized: _bool
- r"""Is ``True`` if the Tensor is quantized, ``False`` otherwise."""
- def is_same_size(self, other: Tensor) -> _bool: ...
- def is_set_to(self, tensor: Tensor) -> _bool:
- r"""
- is_set_to(tensor) -> bool
-
- Returns True if both tensors are pointing to the exact same memory (same
- storage, offset, size and stride).
- """
- ...
- def is_signed(self) -> _bool:
- r"""
- is_signed() -> bool
-
- Returns True if the data type of :attr:`self` is a signed data type.
- """
- ...
- is_sparse: _bool
- r"""Is ``True`` if the Tensor uses sparse COO storage layout, ``False`` otherwise."""
- is_sparse_csr: _bool
- r"""Is ``True`` if the Tensor uses sparse CSR storage layout, ``False`` otherwise."""
- is_vulkan: _bool
- def isclose(self, other: Tensor, rtol: _float = 1e-05, atol: _float = 1e-08, equal_nan: _bool = False) -> Tensor:
- r"""
- isclose(other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor
-
- See :func:`torch.isclose`
- """
- ...
- def isfinite(self) -> Tensor:
- r"""
- isfinite() -> Tensor
-
- See :func:`torch.isfinite`
- """
- ...
- def isinf(self) -> Tensor:
- r"""
- isinf() -> Tensor
-
- See :func:`torch.isinf`
- """
- ...
- def isnan(self) -> Tensor:
- r"""
- isnan() -> Tensor
-
- See :func:`torch.isnan`
- """
- ...
- def isneginf(self) -> Tensor:
- r"""
- isneginf() -> Tensor
-
- See :func:`torch.isneginf`
- """
- ...
- def isposinf(self) -> Tensor:
- r"""
- isposinf() -> Tensor
-
- See :func:`torch.isposinf`
- """
- ...
- def isreal(self) -> Tensor:
- r"""
- isreal() -> Tensor
-
- See :func:`torch.isreal`
- """
- ...
- def istft(self, n_fft: _int, hop_length: Optional[_int] = None, win_length: Optional[_int] = None, window: Optional[Tensor] = None, center: _bool = True, normalized: _bool = False, onesided: Optional[_bool] = None, length: Optional[_int] = None, return_complex: _bool = False) -> Tensor:
- r"""
- istft(n_fft, hop_length=None, win_length=None, window=None,
- center=True, normalized=False, onesided=True, length=None) -> Tensor
-
- See :func:`torch.istft`
- """
- ...
- def item(self) -> Number:
- r"""
- item() -> number
-
- Returns the value of this tensor as a standard Python number. This only works
- for tensors with one element. For other cases, see :meth:`~Tensor.tolist`.
-
- This operation is not differentiable.
-
- Example::
-
- >>> x = torch.tensor([1.0])
- >>> x.item()
- 1.0
- """
- ...
- def kron(self, other: Tensor) -> Tensor:
- r"""
- kron(other) -> Tensor
-
- See :func:`torch.kron`
- """
- ...
- @overload
- def kthvalue(self, k: _int, dim: _int = -1, keepdim: _bool = False) -> torch.return_types.kthvalue:
- r"""
- kthvalue(k, dim=None, keepdim=False) -> (Tensor, LongTensor)
-
- See :func:`torch.kthvalue`
- """
- ...
- @overload
- def kthvalue(self, k: _int, dim: Union[str, ellipsis, None], keepdim: _bool = False) -> torch.return_types.kthvalue:
- r"""
- kthvalue(k, dim=None, keepdim=False) -> (Tensor, LongTensor)
-
- See :func:`torch.kthvalue`
- """
- ...
- def lcm(self, other: Tensor) -> Tensor:
- r"""
- lcm(other) -> Tensor
-
- See :func:`torch.lcm`
- """
- ...
- def lcm_(self, other: Tensor) -> Tensor:
- r"""
- lcm_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.lcm`
- """
- ...
- def ldexp(self, other: Tensor) -> Tensor:
- r"""
- ldexp(other) -> Tensor
-
- See :func:`torch.ldexp`
- """
- ...
- def ldexp_(self, other: Tensor) -> Tensor:
- r"""
- ldexp_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.ldexp`
- """
- ...
- @overload
- def le(self, other: Tensor) -> Tensor:
- r"""
- le(other) -> Tensor
-
- See :func:`torch.le`.
- """
- ...
- @overload
- def le(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- le(other) -> Tensor
-
- See :func:`torch.le`.
- """
- ...
- @overload
- def le_(self, other: Tensor) -> Tensor:
- r"""
- le_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.le`.
- """
- ...
- @overload
- def le_(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- le_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.le`.
- """
- ...
- @overload
- def lerp(self, end: Tensor, weight: Tensor) -> Tensor:
- r"""
- lerp(end, weight) -> Tensor
-
- See :func:`torch.lerp`
- """
- ...
- @overload
- def lerp(self, end: Tensor, weight: Union[Number, _complex]) -> Tensor:
- r"""
- lerp(end, weight) -> Tensor
-
- See :func:`torch.lerp`
- """
- ...
- @overload
- def lerp_(self, end: Tensor, weight: Tensor) -> Tensor:
- r"""
- lerp_(end, weight) -> Tensor
-
- In-place version of :meth:`~Tensor.lerp`
- """
- ...
- @overload
- def lerp_(self, end: Tensor, weight: Union[Number, _complex]) -> Tensor:
- r"""
- lerp_(end, weight) -> Tensor
-
- In-place version of :meth:`~Tensor.lerp`
- """
- ...
- @overload
- def less(self, other: Tensor) -> Tensor:
- r"""
- lt(other) -> Tensor
-
- See :func:`torch.less`.
- """
- ...
- @overload
- def less(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- lt(other) -> Tensor
-
- See :func:`torch.less`.
- """
- ...
- @overload
- def less_(self, other: Tensor) -> Tensor:
- r"""
- less_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.less`.
- """
- ...
- @overload
- def less_(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- less_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.less`.
- """
- ...
- @overload
- def less_equal(self, other: Tensor) -> Tensor:
- r"""
- less_equal(other) -> Tensor
-
- See :func:`torch.less_equal`.
- """
- ...
- @overload
- def less_equal(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- less_equal(other) -> Tensor
-
- See :func:`torch.less_equal`.
- """
- ...
- @overload
- def less_equal_(self, other: Tensor) -> Tensor:
- r"""
- less_equal_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.less_equal`.
- """
- ...
- @overload
- def less_equal_(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- less_equal_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.less_equal`.
- """
- ...
- def lgamma(self) -> Tensor:
- r"""
- lgamma() -> Tensor
-
- See :func:`torch.lgamma`
- """
- ...
- def lgamma_(self) -> Tensor:
- r"""
- lgamma_() -> Tensor
-
- In-place version of :meth:`~Tensor.lgamma`
- """
- ...
- def log(self) -> Tensor:
- r"""
- log() -> Tensor
-
- See :func:`torch.log`
- """
- ...
- def log10(self) -> Tensor:
- r"""
- log10() -> Tensor
-
- See :func:`torch.log10`
- """
- ...
- def log10_(self) -> Tensor:
- r"""
- log10_() -> Tensor
-
- In-place version of :meth:`~Tensor.log10`
- """
- ...
- def log1p(self) -> Tensor:
- r"""
- log1p() -> Tensor
-
- See :func:`torch.log1p`
- """
- ...
- def log1p_(self) -> Tensor:
- r"""
- log1p_() -> Tensor
-
- In-place version of :meth:`~Tensor.log1p`
- """
- ...
- def log2(self) -> Tensor:
- r"""
- log2() -> Tensor
-
- See :func:`torch.log2`
- """
- ...
- def log2_(self) -> Tensor:
- r"""
- log2_() -> Tensor
-
- In-place version of :meth:`~Tensor.log2`
- """
- ...
- def log_(self) -> Tensor:
- r"""
- log_() -> Tensor
-
- In-place version of :meth:`~Tensor.log`
- """
- ...
- def log_normal_(self, mean: _float = 1, std: _float = 2, *, generator: Optional[Generator] = None) -> Tensor:
- r"""
- log_normal_(mean=1, std=2, *, generator=None)
-
- Fills :attr:`self` tensor with numbers samples from the log-normal distribution
- parameterized by the given mean :math:`\mu` and standard deviation
- :math:`\sigma`. Note that :attr:`mean` and :attr:`std` are the mean and
- standard deviation of the underlying normal distribution, and not of the
- returned distribution:
-
- .. math::
-
- f(x) = \dfrac{1}{x \sigma \sqrt{2\pi}}\ e^{-\frac{(\ln x - \mu)^2}{2\sigma^2}}
- """
- ...
- @overload
- def log_softmax(self, dim: _int, dtype: Optional[_dtype] = None) -> Tensor: ...
- @overload
- def log_softmax(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor: ...
- def logaddexp(self, other: Tensor) -> Tensor:
- r"""
- logaddexp(other) -> Tensor
-
- See :func:`torch.logaddexp`
- """
- ...
- def logaddexp2(self, other: Tensor) -> Tensor:
- r"""
- logaddexp2(other) -> Tensor
-
- See :func:`torch.logaddexp2`
- """
- ...
- @overload
- def logcumsumexp(self, dim: _int) -> Tensor:
- r"""
- logcumsumexp(dim) -> Tensor
-
- See :func:`torch.logcumsumexp`
- """
- ...
- @overload
- def logcumsumexp(self, dim: Union[str, ellipsis, None]) -> Tensor:
- r"""
- logcumsumexp(dim) -> Tensor
-
- See :func:`torch.logcumsumexp`
- """
- ...
- def logdet(self) -> Tensor:
- r"""
- logdet() -> Tensor
-
- See :func:`torch.logdet`
- """
- ...
- def logical_and(self, other: Tensor) -> Tensor:
- r"""
- logical_and() -> Tensor
-
- See :func:`torch.logical_and`
- """
- ...
- def logical_and_(self, other: Tensor) -> Tensor:
- r"""
- logical_and_() -> Tensor
-
- In-place version of :meth:`~Tensor.logical_and`
- """
- ...
- def logical_not(self) -> Tensor:
- r"""
- logical_not() -> Tensor
-
- See :func:`torch.logical_not`
- """
- ...
- def logical_not_(self) -> Tensor:
- r"""
- logical_not_() -> Tensor
-
- In-place version of :meth:`~Tensor.logical_not`
- """
- ...
- def logical_or(self, other: Tensor) -> Tensor:
- r"""
- logical_or() -> Tensor
-
- See :func:`torch.logical_or`
- """
- ...
- def logical_or_(self, other: Tensor) -> Tensor:
- r"""
- logical_or_() -> Tensor
-
- In-place version of :meth:`~Tensor.logical_or`
- """
- ...
- def logical_xor(self, other: Tensor) -> Tensor:
- r"""
- logical_xor() -> Tensor
-
- See :func:`torch.logical_xor`
- """
- ...
- def logical_xor_(self, other: Tensor) -> Tensor:
- r"""
- logical_xor_() -> Tensor
-
- In-place version of :meth:`~Tensor.logical_xor`
- """
- ...
- def logit(self, eps: Optional[_float] = None) -> Tensor:
- r"""
- logit() -> Tensor
-
- See :func:`torch.logit`
- """
- ...
- def logit_(self, eps: Optional[_float] = None) -> Tensor:
- r"""
- logit_() -> Tensor
-
- In-place version of :meth:`~Tensor.logit`
- """
- ...
- @overload
- def logsumexp(self, dim: Union[_int, _size], keepdim: _bool = False) -> Tensor:
- r"""
- logsumexp(dim, keepdim=False) -> Tensor
-
- See :func:`torch.logsumexp`
- """
- ...
- @overload
- def logsumexp(self, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool = False) -> Tensor:
- r"""
- logsumexp(dim, keepdim=False) -> Tensor
-
- See :func:`torch.logsumexp`
- """
- ...
- def long(self) -> Tensor:
- r"""
- long(memory_format=torch.preserve_format) -> Tensor
-
- ``self.long()`` is equivalent to ``self.to(torch.int64)``. See :func:`to`.
-
- Args:
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned Tensor. Default: ``torch.preserve_format``.
- """
- ...
- @overload
- def lt(self, other: Tensor) -> Tensor:
- r"""
- lt(other) -> Tensor
-
- See :func:`torch.lt`.
- """
- ...
- @overload
- def lt(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- lt(other) -> Tensor
-
- See :func:`torch.lt`.
- """
- ...
- @overload
- def lt_(self, other: Tensor) -> Tensor:
- r"""
- lt_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.lt`.
- """
- ...
- @overload
- def lt_(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- lt_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.lt`.
- """
- ...
- def lu_solve(self, LU_data: Tensor, LU_pivots: Tensor) -> Tensor:
- r"""
- lu_solve(LU_data, LU_pivots) -> Tensor
-
- See :func:`torch.lu_solve`
- """
- ...
- def map2_(self, x: Tensor, y: Tensor, callable: Callable) -> Tensor: ...
- def map_(self, tensor: Tensor, callable: Callable) -> Tensor:
- r"""
- map_(tensor, callable)
-
- Applies :attr:`callable` for each element in :attr:`self` tensor and the given
- :attr:`tensor` and stores the results in :attr:`self` tensor. :attr:`self` tensor and
- the given :attr:`tensor` must be :ref:`broadcastable <broadcasting-semantics>`.
-
- The :attr:`callable` should have the signature::
-
- def callable(a, b) -> number
- """
- ...
- @overload
- def masked_fill(self, mask: Tensor, value: Tensor) -> Tensor:
- r"""
- masked_fill(mask, value) -> Tensor
-
- Out-of-place version of :meth:`torch.Tensor.masked_fill_`
- """
- ...
- @overload
- def masked_fill(self, mask: Tensor, value: Union[Number, _complex]) -> Tensor:
- r"""
- masked_fill(mask, value) -> Tensor
-
- Out-of-place version of :meth:`torch.Tensor.masked_fill_`
- """
- ...
- @overload
- def masked_fill_(self, mask: Tensor, value: Tensor) -> Tensor:
- r"""
- masked_fill_(mask, value)
-
- Fills elements of :attr:`self` tensor with :attr:`value` where :attr:`mask` is
- True. The shape of :attr:`mask` must be
- :ref:`broadcastable <broadcasting-semantics>` with the shape of the underlying
- tensor.
-
- Args:
- mask (BoolTensor): the boolean mask
- value (float): the value to fill in with
- """
- ...
- @overload
- def masked_fill_(self, mask: Tensor, value: Union[Number, _complex]) -> Tensor:
- r"""
- masked_fill_(mask, value)
-
- Fills elements of :attr:`self` tensor with :attr:`value` where :attr:`mask` is
- True. The shape of :attr:`mask` must be
- :ref:`broadcastable <broadcasting-semantics>` with the shape of the underlying
- tensor.
-
- Args:
- mask (BoolTensor): the boolean mask
- value (float): the value to fill in with
- """
- ...
- def masked_scatter(self, mask: Tensor, source: Tensor) -> Tensor:
- r"""
- masked_scatter(mask, tensor) -> Tensor
-
- Out-of-place version of :meth:`torch.Tensor.masked_scatter_`
-
- .. note::
-
- The inputs :attr:`self` and :attr:`mask`
- :ref:`broadcast <broadcasting-semantics>`.
-
- Example:
-
- >>> self = torch.tensor([0, 0, 0, 0, 0])
- >>> mask = torch.tensor([[0, 0, 0, 1, 1], [1, 1, 0, 1, 1]], dtype=torch.bool)
- >>> source = torch.tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
- >>> self.masked_scatter(mask, source)
- tensor([[0, 0, 0, 0, 1],
- [2, 3, 0, 4, 5]])
- """
- ...
- def masked_scatter_(self, mask: Tensor, source: Tensor) -> Tensor:
- r"""
- masked_scatter_(mask, source)
-
- Copies elements from :attr:`source` into :attr:`self` tensor at positions where
- the :attr:`mask` is True. Elements from :attr:`source` are copied into :attr:`self`
- starting at position 0 of :attr:`source` and continuing in order one-by-one for each
- occurrence of :attr:`mask` being True.
- The shape of :attr:`mask` must be :ref:`broadcastable <broadcasting-semantics>`
- with the shape of the underlying tensor. The :attr:`source` should have at least
- as many elements as the number of ones in :attr:`mask`.
-
- Args:
- mask (BoolTensor): the boolean mask
- source (Tensor): the tensor to copy from
-
- .. note::
-
- The :attr:`mask` operates on the :attr:`self` tensor, not on the given
- :attr:`source` tensor.
-
- Example:
-
- >>> self = torch.tensor([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])
- >>> mask = torch.tensor([[0, 0, 0, 1, 1], [1, 1, 0, 1, 1]], dtype=torch.bool)
- >>> source = torch.tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
- >>> self.masked_scatter_(mask, source)
- tensor([[0, 0, 0, 0, 1],
- [2, 3, 0, 4, 5]])
- """
- ...
- def masked_select(self, mask: Tensor) -> Tensor:
- r"""
- masked_select(mask) -> Tensor
-
- See :func:`torch.masked_select`
- """
- ...
- def matmul(self, other: Tensor) -> Tensor:
- r"""
- matmul(tensor2) -> Tensor
-
- See :func:`torch.matmul`
- """
- ...
- def matrix_exp(self) -> Tensor:
- r"""
- matrix_exp() -> Tensor
-
- See :func:`torch.matrix_exp`
- """
- ...
- def matrix_power(self, n: _int) -> Tensor:
- r"""
- matrix_power(n) -> Tensor
-
- .. note:: :meth:`~Tensor.matrix_power` is deprecated, use :func:`torch.linalg.matrix_power` instead.
-
- Alias for :func:`torch.linalg.matrix_power`
- """
- ...
- @overload
- def max(self) -> Tensor:
- r"""
- max(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
-
- See :func:`torch.max`
- """
- ...
- @overload
- def max(self, other: Tensor) -> Tensor:
- r"""
- max(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
-
- See :func:`torch.max`
- """
- ...
- @overload
- def max(self, dim: _int, keepdim: _bool = False) -> torch.return_types.max:
- r"""
- max(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
-
- See :func:`torch.max`
- """
- ...
- @overload
- def max(self, dim: Union[str, ellipsis, None], keepdim: _bool = False) -> torch.return_types.max:
- r"""
- max(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
-
- See :func:`torch.max`
- """
- ...
- def maximum(self, other: Tensor) -> Tensor:
- r"""
- maximum(other) -> Tensor
-
- See :func:`torch.maximum`
- """
- ...
- @overload
- def mean(self, *, dtype: Optional[_dtype] = None) -> Tensor:
- r"""
- mean(dim=None, keepdim=False, *, dtype=None) -> Tensor
-
- See :func:`torch.mean`
- """
- ...
- @overload
- def mean(self, dim: Optional[Union[_int, _size]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor:
- r"""
- mean(dim=None, keepdim=False, *, dtype=None) -> Tensor
-
- See :func:`torch.mean`
- """
- ...
- @overload
- def mean(self, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor:
- r"""
- mean(dim=None, keepdim=False, *, dtype=None) -> Tensor
-
- See :func:`torch.mean`
- """
- ...
- @overload
- def median(self) -> Tensor:
- r"""
- median(dim=None, keepdim=False) -> (Tensor, LongTensor)
-
- See :func:`torch.median`
- """
- ...
- @overload
- def median(self, dim: _int, keepdim: _bool = False) -> torch.return_types.median:
- r"""
- median(dim=None, keepdim=False) -> (Tensor, LongTensor)
-
- See :func:`torch.median`
- """
- ...
- @overload
- def median(self, dim: Union[str, ellipsis, None], keepdim: _bool = False) -> torch.return_types.median:
- r"""
- median(dim=None, keepdim=False) -> (Tensor, LongTensor)
-
- See :func:`torch.median`
- """
- ...
- @overload
- def min(self) -> Tensor:
- r"""
- min(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
-
- See :func:`torch.min`
- """
- ...
- @overload
- def min(self, other: Tensor) -> Tensor:
- r"""
- min(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
-
- See :func:`torch.min`
- """
- ...
- @overload
- def min(self, dim: _int, keepdim: _bool = False) -> torch.return_types.min:
- r"""
- min(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
-
- See :func:`torch.min`
- """
- ...
- @overload
- def min(self, dim: Union[str, ellipsis, None], keepdim: _bool = False) -> torch.return_types.min:
- r"""
- min(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
-
- See :func:`torch.min`
- """
- ...
- def minimum(self, other: Tensor) -> Tensor:
- r"""
- minimum(other) -> Tensor
-
- See :func:`torch.minimum`
- """
- ...
- def mm(self, mat2: Tensor) -> Tensor:
- r"""
- mm(mat2) -> Tensor
-
- See :func:`torch.mm`
- """
- ...
- @overload
- def mode(self, dim: _int = -1, keepdim: _bool = False) -> torch.return_types.mode:
- r"""
- mode(dim=None, keepdim=False) -> (Tensor, LongTensor)
-
- See :func:`torch.mode`
- """
- ...
- @overload
- def mode(self, dim: Union[str, ellipsis, None], keepdim: _bool = False) -> torch.return_types.mode:
- r"""
- mode(dim=None, keepdim=False) -> (Tensor, LongTensor)
-
- See :func:`torch.mode`
- """
- ...
- @overload
- def moveaxis(self, source: _int, destination: _int) -> Tensor:
- r"""
- moveaxis(source, destination) -> Tensor
-
- See :func:`torch.moveaxis`
- """
- ...
- @overload
- def moveaxis(self, source: _size, destination: _size) -> Tensor:
- r"""
- moveaxis(source, destination) -> Tensor
-
- See :func:`torch.moveaxis`
- """
- ...
- @overload
- def movedim(self, source: _int, destination: _int) -> Tensor:
- r"""
- movedim(source, destination) -> Tensor
-
- See :func:`torch.movedim`
- """
- ...
- @overload
- def movedim(self, source: _size, destination: _size) -> Tensor:
- r"""
- movedim(source, destination) -> Tensor
-
- See :func:`torch.movedim`
- """
- ...
- def msort(self) -> Tensor:
- r"""
- msort() -> Tensor
-
- See :func:`torch.msort`
- """
- ...
- def mul(self, other: Union[Tensor, Number, _complex, torch.SymInt, torch.SymFloat], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- mul(value) -> Tensor
-
- See :func:`torch.mul`.
- """
- ...
- def mul_(self, other: Union[Tensor, Number, _complex, torch.SymInt, torch.SymFloat]) -> Tensor:
- r"""
- mul_(value) -> Tensor
-
- In-place version of :meth:`~Tensor.mul`.
- """
- ...
- def multinomial(self, num_samples: _int, replacement: _bool = False, *, generator: Optional[Generator] = None) -> Tensor:
- r"""
- multinomial(num_samples, replacement=False, *, generator=None) -> Tensor
-
- See :func:`torch.multinomial`
- """
- ...
- @overload
- def multiply(self, other: Tensor) -> Tensor:
- r"""
- multiply(value) -> Tensor
-
- See :func:`torch.multiply`.
- """
- ...
- @overload
- def multiply(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- multiply(value) -> Tensor
-
- See :func:`torch.multiply`.
- """
- ...
- @overload
- def multiply_(self, other: Tensor) -> Tensor:
- r"""
- multiply_(value) -> Tensor
-
- In-place version of :meth:`~Tensor.multiply`.
- """
- ...
- @overload
- def multiply_(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- multiply_(value) -> Tensor
-
- In-place version of :meth:`~Tensor.multiply`.
- """
- ...
- def mv(self, vec: Tensor) -> Tensor:
- r"""
- mv(vec) -> Tensor
-
- See :func:`torch.mv`
- """
- ...
- def mvlgamma(self, p: _int) -> Tensor:
- r"""
- mvlgamma(p) -> Tensor
-
- See :func:`torch.mvlgamma`
- """
- ...
- def mvlgamma_(self, p: _int) -> Tensor:
- r"""
- mvlgamma_(p) -> Tensor
-
- In-place version of :meth:`~Tensor.mvlgamma`
- """
- ...
- def nan_to_num(self, nan: Optional[_float] = None, posinf: Optional[_float] = None, neginf: Optional[_float] = None) -> Tensor:
- r"""
- nan_to_num(nan=0.0, posinf=None, neginf=None) -> Tensor
-
- See :func:`torch.nan_to_num`.
- """
- ...
- def nan_to_num_(self, nan: Optional[_float] = None, posinf: Optional[_float] = None, neginf: Optional[_float] = None) -> Tensor:
- r"""
- nan_to_num_(nan=0.0, posinf=None, neginf=None) -> Tensor
-
- In-place version of :meth:`~Tensor.nan_to_num`.
- """
- ...
- def nanmean(self, dim: Optional[Union[_int, _size]] = None, keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor:
- r"""
- nanmean(dim=None, keepdim=False, *, dtype=None) -> Tensor
-
- See :func:`torch.nanmean`
- """
- ...
- @overload
- def nanmedian(self) -> Tensor:
- r"""
- nanmedian(dim=None, keepdim=False) -> (Tensor, LongTensor)
-
- See :func:`torch.nanmedian`
- """
- ...
- @overload
- def nanmedian(self, dim: _int, keepdim: _bool = False) -> torch.return_types.nanmedian:
- r"""
- nanmedian(dim=None, keepdim=False) -> (Tensor, LongTensor)
-
- See :func:`torch.nanmedian`
- """
- ...
- @overload
- def nanmedian(self, dim: Union[str, ellipsis, None], keepdim: _bool = False) -> torch.return_types.nanmedian:
- r"""
- nanmedian(dim=None, keepdim=False) -> (Tensor, LongTensor)
-
- See :func:`torch.nanmedian`
- """
- ...
- @overload
- def nanquantile(self, q: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear") -> Tensor:
- r"""
- nanquantile(q, dim=None, keepdim=False, *, interpolation='linear') -> Tensor
-
- See :func:`torch.nanquantile`
- """
- ...
- @overload
- def nanquantile(self, q: _float, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear") -> Tensor:
- r"""
- nanquantile(q, dim=None, keepdim=False, *, interpolation='linear') -> Tensor
-
- See :func:`torch.nanquantile`
- """
- ...
- def nansum(self, dim: Optional[Union[_int, _size]] = None, keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor:
- r"""
- nansum(dim=None, keepdim=False, dtype=None) -> Tensor
-
- See :func:`torch.nansum`
- """
- ...
- @overload
- def narrow(self, dim: _int, start: Tensor, length: Union[_int, SymInt]) -> Tensor:
- r"""
- narrow(dimension, start, length) -> Tensor
-
- See :func:`torch.narrow`.
- """
- ...
- @overload
- def narrow(self, dim: _int, start: Union[_int, SymInt], length: Union[_int, SymInt]) -> Tensor:
- r"""
- narrow(dimension, start, length) -> Tensor
-
- See :func:`torch.narrow`.
- """
- ...
- def narrow_copy(self, dim: _int, start: Union[_int, SymInt], length: Union[_int, SymInt]) -> Tensor:
- r"""
- narrow_copy(dimension, start, length) -> Tensor
-
- See :func:`torch.narrow_copy`.
- """
- ...
- def ndimension(self) -> _int:
- r"""
- ndimension() -> int
-
- Alias for :meth:`~Tensor.dim()`
- """
- ...
- @overload
- def ne(self, other: Tensor) -> Tensor:
- r"""
- ne(other) -> Tensor
-
- See :func:`torch.ne`.
- """
- ...
- @overload
- def ne(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- ne(other) -> Tensor
-
- See :func:`torch.ne`.
- """
- ...
- @overload
- def ne_(self, other: Tensor) -> Tensor:
- r"""
- ne_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.ne`.
- """
- ...
- @overload
- def ne_(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- ne_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.ne`.
- """
- ...
- def neg(self) -> Tensor:
- r"""
- neg() -> Tensor
-
- See :func:`torch.neg`
- """
- ...
- def neg_(self) -> Tensor:
- r"""
- neg_() -> Tensor
-
- In-place version of :meth:`~Tensor.neg`
- """
- ...
- def negative(self) -> Tensor:
- r"""
- negative() -> Tensor
-
- See :func:`torch.negative`
- """
- ...
- def negative_(self) -> Tensor:
- r"""
- negative_() -> Tensor
-
- In-place version of :meth:`~Tensor.negative`
- """
- ...
- def nelement(self) -> _int:
- r"""
- nelement() -> int
-
- Alias for :meth:`~Tensor.numel`
- """
- ...
- @overload
- def new(cls, *args: Any, device: Optional[DeviceLikeType] = None) -> Self: ...
- @overload
- def new(cls, storage: Storage) -> Self: ...
- @overload
- def new(cls, other: Tensor) -> Self: ...
- @overload
- def new(cls, size: _size, *, device: Optional[DeviceLikeType] = None) -> Self: ...
- @overload
- def new_empty(self, size: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- new_empty(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor
-
-
- Returns a Tensor of size :attr:`size` filled with uninitialized data.
- By default, the returned Tensor has the same :class:`torch.dtype` and
- :class:`torch.device` as this tensor.
-
- Args:
- size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
- shape of the output tensor.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired type of returned tensor.
- Default: if None, same :class:`torch.dtype` as this tensor.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if None, same :class:`torch.device` as this tensor.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
-
- Example::
-
- >>> tensor = torch.ones(())
- >>> tensor.new_empty((2, 3))
- tensor([[ 5.8182e-18, 4.5765e-41, -1.0545e+30],
- [ 3.0949e-41, 4.4842e-44, 0.0000e+00]])
- """
- ...
- @overload
- def new_empty(self, *size: _int, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- new_empty(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor
-
-
- Returns a Tensor of size :attr:`size` filled with uninitialized data.
- By default, the returned Tensor has the same :class:`torch.dtype` and
- :class:`torch.device` as this tensor.
-
- Args:
- size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
- shape of the output tensor.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired type of returned tensor.
- Default: if None, same :class:`torch.dtype` as this tensor.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if None, same :class:`torch.device` as this tensor.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
-
- Example::
-
- >>> tensor = torch.ones(())
- >>> tensor.new_empty((2, 3))
- tensor([[ 5.8182e-18, 4.5765e-41, -1.0545e+30],
- [ 3.0949e-41, 4.4842e-44, 0.0000e+00]])
- """
- ...
- def new_empty_strided(self, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- new_empty_strided(size, stride, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor
-
-
- Returns a Tensor of size :attr:`size` and strides :attr:`stride` filled with
- uninitialized data. By default, the returned Tensor has the same
- :class:`torch.dtype` and :class:`torch.device` as this tensor.
-
- Args:
- size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
- shape of the output tensor.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired type of returned tensor.
- Default: if None, same :class:`torch.dtype` as this tensor.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if None, same :class:`torch.device` as this tensor.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
-
- Example::
-
- >>> tensor = torch.ones(())
- >>> tensor.new_empty_strided((2, 3), (3, 1))
- tensor([[ 5.8182e-18, 4.5765e-41, -1.0545e+30],
- [ 3.0949e-41, 4.4842e-44, 0.0000e+00]])
- """
- ...
- def new_full(self, size: Sequence[Union[_int, SymInt]], fill_value: Union[Number, _complex], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- new_full(size, fill_value, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor
-
-
- Returns a Tensor of size :attr:`size` filled with :attr:`fill_value`.
- By default, the returned Tensor has the same :class:`torch.dtype` and
- :class:`torch.device` as this tensor.
-
- Args:
- fill_value (scalar): the number to fill the output tensor with.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired type of returned tensor.
- Default: if None, same :class:`torch.dtype` as this tensor.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if None, same :class:`torch.device` as this tensor.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
-
- Example::
-
- >>> tensor = torch.ones((2,), dtype=torch.float64)
- >>> tensor.new_full((3, 4), 3.141592)
- tensor([[ 3.1416, 3.1416, 3.1416, 3.1416],
- [ 3.1416, 3.1416, 3.1416, 3.1416],
- [ 3.1416, 3.1416, 3.1416, 3.1416]], dtype=torch.float64)
- """
- ...
- @overload
- def new_ones(self, size: _size, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
- r"""
- new_ones(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor
-
-
- Returns a Tensor of size :attr:`size` filled with ``1``.
- By default, the returned Tensor has the same :class:`torch.dtype` and
- :class:`torch.device` as this tensor.
-
- Args:
- size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
- shape of the output tensor.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired type of returned tensor.
- Default: if None, same :class:`torch.dtype` as this tensor.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if None, same :class:`torch.device` as this tensor.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
-
- Example::
-
- >>> tensor = torch.tensor((), dtype=torch.int32)
- >>> tensor.new_ones((2, 3))
- tensor([[ 1, 1, 1],
- [ 1, 1, 1]], dtype=torch.int32)
- """
- ...
- @overload
- def new_ones(self, size: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- new_ones(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor
-
-
- Returns a Tensor of size :attr:`size` filled with ``1``.
- By default, the returned Tensor has the same :class:`torch.dtype` and
- :class:`torch.device` as this tensor.
-
- Args:
- size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
- shape of the output tensor.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired type of returned tensor.
- Default: if None, same :class:`torch.dtype` as this tensor.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if None, same :class:`torch.device` as this tensor.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
-
- Example::
-
- >>> tensor = torch.tensor((), dtype=torch.int32)
- >>> tensor.new_ones((2, 3))
- tensor([[ 1, 1, 1],
- [ 1, 1, 1]], dtype=torch.int32)
- """
- ...
- @overload
- def new_ones(self, *size: _int, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- new_ones(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor
-
-
- Returns a Tensor of size :attr:`size` filled with ``1``.
- By default, the returned Tensor has the same :class:`torch.dtype` and
- :class:`torch.device` as this tensor.
-
- Args:
- size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
- shape of the output tensor.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired type of returned tensor.
- Default: if None, same :class:`torch.dtype` as this tensor.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if None, same :class:`torch.device` as this tensor.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
-
- Example::
-
- >>> tensor = torch.tensor((), dtype=torch.int32)
- >>> tensor.new_ones((2, 3))
- tensor([[ 1, 1, 1],
- [ 1, 1, 1]], dtype=torch.int32)
- """
- ...
- def new_tensor(self, data: Any, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
- r"""
- new_tensor(data, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor
-
-
- Returns a new Tensor with :attr:`data` as the tensor data.
- By default, the returned Tensor has the same :class:`torch.dtype` and
- :class:`torch.device` as this tensor.
-
- .. warning::
-
- :func:`new_tensor` always copies :attr:`data`. If you have a Tensor
- ``data`` and want to avoid a copy, use :func:`torch.Tensor.requires_grad_`
- or :func:`torch.Tensor.detach`.
- If you have a numpy array and want to avoid a copy, use
- :func:`torch.from_numpy`.
-
- .. warning::
-
- When data is a tensor `x`, :func:`new_tensor()` reads out 'the data' from whatever it is passed,
- and constructs a leaf variable. Therefore ``tensor.new_tensor(x)`` is equivalent to ``x.clone().detach()``
- and ``tensor.new_tensor(x, requires_grad=True)`` is equivalent to ``x.clone().detach().requires_grad_(True)``.
- The equivalents using ``clone()`` and ``detach()`` are recommended.
-
- Args:
- data (array_like): The returned Tensor copies :attr:`data`.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired type of returned tensor.
- Default: if None, same :class:`torch.dtype` as this tensor.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if None, same :class:`torch.device` as this tensor.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
-
- Example::
-
- >>> tensor = torch.ones((2,), dtype=torch.int8)
- >>> data = [[0, 1], [2, 3]]
- >>> tensor.new_tensor(data)
- tensor([[ 0, 1],
- [ 2, 3]], dtype=torch.int8)
- """
- ...
- @overload
- def new_zeros(self, size: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- new_zeros(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor
-
-
- Returns a Tensor of size :attr:`size` filled with ``0``.
- By default, the returned Tensor has the same :class:`torch.dtype` and
- :class:`torch.device` as this tensor.
-
- Args:
- size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
- shape of the output tensor.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired type of returned tensor.
- Default: if None, same :class:`torch.dtype` as this tensor.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if None, same :class:`torch.device` as this tensor.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
-
- Example::
-
- >>> tensor = torch.tensor((), dtype=torch.float64)
- >>> tensor.new_zeros((2, 3))
- tensor([[ 0., 0., 0.],
- [ 0., 0., 0.]], dtype=torch.float64)
- """
- ...
- @overload
- def new_zeros(self, *size: _int, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- new_zeros(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor
-
-
- Returns a Tensor of size :attr:`size` filled with ``0``.
- By default, the returned Tensor has the same :class:`torch.dtype` and
- :class:`torch.device` as this tensor.
-
- Args:
- size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
- shape of the output tensor.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired type of returned tensor.
- Default: if None, same :class:`torch.dtype` as this tensor.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if None, same :class:`torch.device` as this tensor.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
-
- Example::
-
- >>> tensor = torch.tensor((), dtype=torch.float64)
- >>> tensor.new_zeros((2, 3))
- tensor([[ 0., 0., 0.],
- [ 0., 0., 0.]], dtype=torch.float64)
- """
- ...
- def nextafter(self, other: Tensor) -> Tensor:
- r"""
- nextafter(other) -> Tensor
- See :func:`torch.nextafter`
- """
- ...
- def nextafter_(self, other: Tensor) -> Tensor:
- r"""
- nextafter_(other) -> Tensor
- In-place version of :meth:`~Tensor.nextafter`
- """
- ...
- @overload
- def nonzero(self, *, as_tuple: Literal[False] = False) -> Tensor:
- r"""
- nonzero() -> LongTensor
-
- See :func:`torch.nonzero`
- """
- ...
- @overload
- def nonzero(self, *, as_tuple: Literal[True]) -> Tuple[Tensor, ...]:
- r"""
- nonzero() -> LongTensor
-
- See :func:`torch.nonzero`
- """
- ...
- def nonzero_static(self, *, size: _int, fill_value: _int = -1) -> Tensor:
- r"""
- nonzero_static(input, *, size, fill_value=-1) -> Tensor
-
- Returns a 2-D tensor where each row is the index for a non-zero value.
- The returned Tensor has the same `torch.dtype` as `torch.nonzero()`.
-
- Args:
- input (Tensor): the input tensor to count non-zero elements.
-
- Keyword args:
- size (int): the size of non-zero elements expected to be included in the out
- tensor. Pad the out tensor with `fill_value` if the `size` is larger
- than total number of non-zero elements, truncate out tensor if `size`
- is smaller. The size must be a non-negative integer.
- fill_value (int): the value to fill the output tensor with when `size` is larger
- than the total number of non-zero elements. Default is `-1` to represent
- invalid index.
-
- Example:
-
- # Example 1: Padding
- >>> input_tensor = torch.tensor([[1, 0], [3, 2]])
- >>> static_size = 4
- >>> t = torch.nonzero_static(input_tensor, size = static_size)
- tensor([[ 0, 0],
- [ 1, 0],
- [ 1, 1],
- [ -1, -1]], dtype=torch.int64)
-
- # Example 2: Truncating
- >>> input_tensor = torch.tensor([[1, 0], [3, 2]])
- >>> static_size = 2
- >>> t = torch.nonzero_static(input_tensor, size = static_size)
- tensor([[ 0, 0],
- [ 1, 0]], dtype=torch.int64)
-
- # Example 3: 0 size
- >>> input_tensor = torch.tensor([10])
- >>> static_size = 0
- >>> t = torch.nonzero_static(input_tensor, size = static_size)
- tensor([], size=(0, 1), dtype=torch.int64)
-
- # Example 4: 0 rank input
- >>> input_tensor = torch.tensor(10)
- >>> static_size = 2
- >>> t = torch.nonzero_static(input_tensor, size = static_size)
- tensor([], size=(2, 0), dtype=torch.int64)
- """
- ...
- def normal_(self, mean: _float = 0, std: _float = 1, *, generator: Optional[Generator] = None) -> Tensor:
- r"""
- normal_(mean=0, std=1, *, generator=None) -> Tensor
-
- Fills :attr:`self` tensor with elements samples from the normal distribution
- parameterized by :attr:`mean` and :attr:`std`.
- """
- ...
- @overload
- def not_equal(self, other: Tensor) -> Tensor:
- r"""
- not_equal(other) -> Tensor
-
- See :func:`torch.not_equal`.
- """
- ...
- @overload
- def not_equal(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- not_equal(other) -> Tensor
-
- See :func:`torch.not_equal`.
- """
- ...
- @overload
- def not_equal_(self, other: Tensor) -> Tensor:
- r"""
- not_equal_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.not_equal`.
- """
- ...
- @overload
- def not_equal_(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- not_equal_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.not_equal`.
- """
- ...
- def numel(self) -> _int:
- r"""
- numel() -> int
-
- See :func:`torch.numel`
- """
- ...
- def numpy(self, *, force: _bool = False) -> numpy.ndarray:
- r"""
- numpy(*, force=False) -> numpy.ndarray
-
- Returns the tensor as a NumPy :class:`ndarray`.
-
- If :attr:`force` is ``False`` (the default), the conversion
- is performed only if the tensor is on the CPU, does not require grad,
- does not have its conjugate bit set, and is a dtype and layout that
- NumPy supports. The returned ndarray and the tensor will share their
- storage, so changes to the tensor will be reflected in the ndarray
- and vice versa.
-
- If :attr:`force` is ``True`` this is equivalent to
- calling ``t.detach().cpu().resolve_conj().resolve_neg().numpy()``.
- If the tensor isn't on the CPU or the conjugate or negative bit is set,
- the tensor won't share its storage with the returned ndarray.
- Setting :attr:`force` to ``True`` can be a useful shorthand.
-
- Args:
- force (bool): if ``True``, the ndarray may be a copy of the tensor
- instead of always sharing memory, defaults to ``False``.
- """
- ...
- def orgqr(self, input2: Tensor) -> Tensor:
- r"""
- orgqr(input2) -> Tensor
-
- See :func:`torch.orgqr`
- """
- ...
- def ormqr(self, input2: Tensor, input3: Tensor, left: _bool = True, transpose: _bool = False) -> Tensor:
- r"""
- ormqr(input2, input3, left=True, transpose=False) -> Tensor
-
- See :func:`torch.ormqr`
- """
- ...
- def outer(self, vec2: Tensor) -> Tensor:
- r"""
- outer(vec2) -> Tensor
-
- See :func:`torch.outer`.
- """
- ...
- @overload
- def permute(self, dims: _size) -> Tensor:
- r"""
- permute(*dims) -> Tensor
-
- See :func:`torch.permute`
- """
- ...
- @overload
- def permute(self, *dims: _int) -> Tensor:
- r"""
- permute(*dims) -> Tensor
-
- See :func:`torch.permute`
- """
- ...
- def pin_memory(self, device: Optional[Optional[DeviceLikeType]] = None) -> Tensor:
- r"""
- pin_memory() -> Tensor
-
- Copies the tensor to pinned memory, if it's not already pinned.
- """
- ...
- def pinverse(self, rcond: _float = 1e-15) -> Tensor:
- r"""
- pinverse() -> Tensor
-
- See :func:`torch.pinverse`
- """
- ...
- def polygamma(self, n: _int) -> Tensor:
- r"""
- polygamma(n) -> Tensor
-
- See :func:`torch.polygamma`
- """
- ...
- def polygamma_(self, n: _int) -> Tensor:
- r"""
- polygamma_(n) -> Tensor
-
- In-place version of :meth:`~Tensor.polygamma`
- """
- ...
- def positive(self) -> Tensor:
- r"""
- positive() -> Tensor
-
- See :func:`torch.positive`
- """
- ...
- @overload
- def pow(self, exponent: Tensor) -> Tensor:
- r"""
- pow(exponent) -> Tensor
-
- See :func:`torch.pow`
- """
- ...
- @overload
- def pow(self, exponent: Union[Number, _complex]) -> Tensor:
- r"""
- pow(exponent) -> Tensor
-
- See :func:`torch.pow`
- """
- ...
- @overload
- def pow_(self, exponent: Tensor) -> Tensor:
- r"""
- pow_(exponent) -> Tensor
-
- In-place version of :meth:`~Tensor.pow`
- """
- ...
- @overload
- def pow_(self, exponent: Union[Number, _complex]) -> Tensor:
- r"""
- pow_(exponent) -> Tensor
-
- In-place version of :meth:`~Tensor.pow`
- """
- ...
- def prelu(self, weight: Tensor) -> Tensor: ...
- @overload
- def prod(self, *, dtype: Optional[_dtype] = None) -> Tensor:
- r"""
- prod(dim=None, keepdim=False, dtype=None) -> Tensor
-
- See :func:`torch.prod`
- """
- ...
- @overload
- def prod(self, dim: _int, keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor:
- r"""
- prod(dim=None, keepdim=False, dtype=None) -> Tensor
-
- See :func:`torch.prod`
- """
- ...
- @overload
- def prod(self, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor:
- r"""
- prod(dim=None, keepdim=False, dtype=None) -> Tensor
-
- See :func:`torch.prod`
- """
- ...
- def put(self, index: Tensor, source: Tensor, accumulate: _bool = False) -> Tensor:
- r"""
- put(input, index, source, accumulate=False) -> Tensor
-
- Out-of-place version of :meth:`torch.Tensor.put_`.
- `input` corresponds to `self` in :meth:`torch.Tensor.put_`.
- """
- ...
- def put_(self, index: Tensor, source: Tensor, accumulate: _bool = False) -> Tensor:
- r"""
- put_(index, source, accumulate=False) -> Tensor
-
- Copies the elements from :attr:`source` into the positions specified by
- :attr:`index`. For the purpose of indexing, the :attr:`self` tensor is treated as if
- it were a 1-D tensor.
-
- :attr:`index` and :attr:`source` need to have the same number of elements, but not necessarily
- the same shape.
-
- If :attr:`accumulate` is ``True``, the elements in :attr:`source` are added to
- :attr:`self`. If accumulate is ``False``, the behavior is undefined if :attr:`index`
- contain duplicate elements.
-
- Args:
- index (LongTensor): the indices into self
- source (Tensor): the tensor containing values to copy from
- accumulate (bool): whether to accumulate into self
-
- Example::
-
- >>> src = torch.tensor([[4, 3, 5],
- ... [6, 7, 8]])
- >>> src.put_(torch.tensor([1, 3]), torch.tensor([9, 10]))
- tensor([[ 4, 9, 5],
- [ 10, 7, 8]])
- """
- ...
- def q_per_channel_axis(self) -> _int:
- r"""
- q_per_channel_axis() -> int
-
- Given a Tensor quantized by linear (affine) per-channel quantization,
- returns the index of dimension on which per-channel quantization is applied.
- """
- ...
- def q_per_channel_scales(self) -> Tensor:
- r"""
- q_per_channel_scales() -> Tensor
-
- Given a Tensor quantized by linear (affine) per-channel quantization,
- returns a Tensor of scales of the underlying quantizer. It has the number of
- elements that matches the corresponding dimensions (from q_per_channel_axis) of
- the tensor.
- """
- ...
- def q_per_channel_zero_points(self) -> Tensor:
- r"""
- q_per_channel_zero_points() -> Tensor
-
- Given a Tensor quantized by linear (affine) per-channel quantization,
- returns a tensor of zero_points of the underlying quantizer. It has the number of
- elements that matches the corresponding dimensions (from q_per_channel_axis) of
- the tensor.
- """
- ...
- def q_scale(self) -> _float:
- r"""
- q_scale() -> float
-
- Given a Tensor quantized by linear(affine) quantization,
- returns the scale of the underlying quantizer().
- """
- ...
- def q_zero_point(self) -> _int:
- r"""
- q_zero_point() -> int
-
- Given a Tensor quantized by linear(affine) quantization,
- returns the zero_point of the underlying quantizer().
- """
- ...
- def qr(self, some: _bool = True) -> torch.return_types.qr:
- r"""
- qr(some=True) -> (Tensor, Tensor)
-
- See :func:`torch.qr`
- """
- ...
- def qscheme(self) -> _qscheme:
- r"""
- qscheme() -> torch.qscheme
-
- Returns the quantization scheme of a given QTensor.
- """
- ...
- @overload
- def quantile(self, q: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear") -> Tensor:
- r"""
- quantile(q, dim=None, keepdim=False, *, interpolation='linear') -> Tensor
-
- See :func:`torch.quantile`
- """
- ...
- @overload
- def quantile(self, q: _float, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear") -> Tensor:
- r"""
- quantile(q, dim=None, keepdim=False, *, interpolation='linear') -> Tensor
-
- See :func:`torch.quantile`
- """
- ...
- def rad2deg(self) -> Tensor:
- r"""
- rad2deg() -> Tensor
-
- See :func:`torch.rad2deg`
- """
- ...
- def rad2deg_(self) -> Tensor:
- r"""
- rad2deg_() -> Tensor
-
- In-place version of :meth:`~Tensor.rad2deg`
- """
- ...
- @overload
- def random_(self, *, generator: Optional[Generator] = None) -> Tensor:
- r"""
- random_(from=0, to=None, *, generator=None) -> Tensor
-
- Fills :attr:`self` tensor with numbers sampled from the discrete uniform
- distribution over ``[from, to - 1]``. If not specified, the values are usually
- only bounded by :attr:`self` tensor's data type. However, for floating point
- types, if unspecified, range will be ``[0, 2^mantissa]`` to ensure that every
- value is representable. For example, `torch.tensor(1, dtype=torch.double).random_()`
- will be uniform in ``[0, 2^53]``.
- """
- ...
- @overload
- def random_(self, from_: _int, to: Optional[_int], *, generator: Optional[Generator] = None) -> Tensor:
- r"""
- random_(from=0, to=None, *, generator=None) -> Tensor
-
- Fills :attr:`self` tensor with numbers sampled from the discrete uniform
- distribution over ``[from, to - 1]``. If not specified, the values are usually
- only bounded by :attr:`self` tensor's data type. However, for floating point
- types, if unspecified, range will be ``[0, 2^mantissa]`` to ensure that every
- value is representable. For example, `torch.tensor(1, dtype=torch.double).random_()`
- will be uniform in ``[0, 2^53]``.
- """
- ...
- @overload
- def random_(self, to: _int, *, generator: Optional[Generator] = None) -> Tensor:
- r"""
- random_(from=0, to=None, *, generator=None) -> Tensor
-
- Fills :attr:`self` tensor with numbers sampled from the discrete uniform
- distribution over ``[from, to - 1]``. If not specified, the values are usually
- only bounded by :attr:`self` tensor's data type. However, for floating point
- types, if unspecified, range will be ``[0, 2^mantissa]`` to ensure that every
- value is representable. For example, `torch.tensor(1, dtype=torch.double).random_()`
- will be uniform in ``[0, 2^53]``.
- """
- ...
- def ravel(self) -> Tensor:
- r"""
- ravel() -> Tensor
-
- see :func:`torch.ravel`
- """
- ...
- def reciprocal(self) -> Tensor:
- r"""
- reciprocal() -> Tensor
-
- See :func:`torch.reciprocal`
- """
- ...
- def reciprocal_(self) -> Tensor:
- r"""
- reciprocal_() -> Tensor
-
- In-place version of :meth:`~Tensor.reciprocal`
- """
- ...
- def record_stream(self, s: Stream) -> None:
- r"""
- record_stream(stream)
-
- Marks the tensor as having been used by this stream. When the tensor
- is deallocated, ensure the tensor memory is not reused for another tensor
- until all work queued on :attr:`stream` at the time of deallocation is
- complete.
-
- .. note::
-
- The caching allocator is aware of only the stream where a tensor was
- allocated. Due to the awareness, it already correctly manages the life
- cycle of tensors on only one stream. But if a tensor is used on a stream
- different from the stream of origin, the allocator might reuse the memory
- unexpectedly. Calling this method lets the allocator know which streams
- have used the tensor.
-
- .. warning::
-
- This method is most suitable for use cases where you are providing a
- function that created a tensor on a side stream, and want users to be able
- to make use of the tensor without having to think carefully about stream
- safety when making use of them. These safety guarantees come at some
- performance and predictability cost (analogous to the tradeoff between GC
- and manual memory management), so if you are in a situation where
- you manage the full lifetime of your tensors, you may consider instead
- manually managing CUDA events so that calling this method is not necessary.
- In particular, when you call this method, on later allocations the
- allocator will poll the recorded stream to see if all operations have
- completed yet; you can potentially race with side stream computation and
- non-deterministically reuse or fail to reuse memory for an allocation.
-
- You can safely use tensors allocated on side streams without
- :meth:`~Tensor.record_stream`; you must manually ensure that
- any non-creation stream uses of a tensor are synced back to the creation
- stream before you deallocate the tensor. As the CUDA caching allocator
- guarantees that the memory will only be reused with the same creation stream,
- this is sufficient to ensure that writes to future reallocations of the
- memory will be delayed until non-creation stream uses are done.
- (Counterintuitively, you may observe that on the CPU side we have already
- reallocated the tensor, even though CUDA kernels on the old tensor are
- still in progress. This is fine, because CUDA operations on the new
- tensor will appropriately wait for the old operations to complete, as they
- are all on the same stream.)
-
- Concretely, this looks like this::
-
- with torch.cuda.stream(s0):
- x = torch.zeros(N)
-
- s1.wait_stream(s0)
- with torch.cuda.stream(s1):
- y = some_comm_op(x)
-
- ... some compute on s0 ...
-
- # synchronize creation stream s0 to side stream s1
- # before deallocating x
- s0.wait_stream(s1)
- del x
-
- Note that some discretion is required when deciding when to perform
- ``s0.wait_stream(s1)``. In particular, if we were to wait immediately
- after ``some_comm_op``, there wouldn't be any point in having the side
- stream; it would be equivalent to have run ``some_comm_op`` on ``s0``.
- Instead, the synchronization must be placed at some appropriate, later
- point in time where you expect the side stream ``s1`` to have finished
- work. This location is typically identified via profiling, e.g., using
- Chrome traces produced
- :meth:`torch.autograd.profiler.profile.export_chrome_trace`. If you
- place the wait too early, work on s0 will block until ``s1`` has finished,
- preventing further overlapping of communication and computation. If you
- place the wait too late, you will use more memory than is strictly
- necessary (as you are keeping ``x`` live for longer.) For a concrete
- example of how this guidance can be applied in practice, see this post:
- `FSDP and CUDACachingAllocator
- <https://dev-discuss.pytorch.org/t/fsdp-cudacachingallocator-an-outsider-newb-perspective/1486>`_.
- """
- ...
- def refine_names(self, names: Sequence[Union[str, ellipsis, None]]) -> Tensor: ...
- def relu(self) -> Tensor: ...
- def relu_(self) -> Tensor: ...
- @overload
- def remainder(self, other: Tensor) -> Tensor:
- r"""
- remainder(divisor) -> Tensor
-
- See :func:`torch.remainder`
- """
- ...
- @overload
- def remainder(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- remainder(divisor) -> Tensor
-
- See :func:`torch.remainder`
- """
- ...
- @overload
- def remainder_(self, other: Tensor) -> Tensor:
- r"""
- remainder_(divisor) -> Tensor
-
- In-place version of :meth:`~Tensor.remainder`
- """
- ...
- @overload
- def remainder_(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- remainder_(divisor) -> Tensor
-
- In-place version of :meth:`~Tensor.remainder`
- """
- ...
- def rename(self, names: Optional[Sequence[Union[str, ellipsis, None]]]) -> Tensor: ...
- def rename_(self, names: Optional[Sequence[Union[str, ellipsis, None]]]) -> Tensor: ...
- def renorm(self, p: Union[Number, _complex], dim: _int, maxnorm: Union[Number, _complex]) -> Tensor:
- r"""
- renorm(p, dim, maxnorm) -> Tensor
-
- See :func:`torch.renorm`
- """
- ...
- def renorm_(self, p: Union[Number, _complex], dim: _int, maxnorm: Union[Number, _complex]) -> Tensor:
- r"""
- renorm_(p, dim, maxnorm) -> Tensor
-
- In-place version of :meth:`~Tensor.renorm`
- """
- ...
- @overload
- def repeat(self, repeats: Sequence[Union[_int, SymInt]]) -> Tensor:
- r"""
- repeat(*sizes) -> Tensor
-
- Repeats this tensor along the specified dimensions.
-
- Unlike :meth:`~Tensor.expand`, this function copies the tensor's data.
-
- .. warning::
-
- :meth:`~Tensor.repeat` behaves differently from
- `numpy.repeat <https://docs.scipy.org/doc/numpy/reference/generated/numpy.repeat.html>`_,
- but is more similar to
- `numpy.tile <https://docs.scipy.org/doc/numpy/reference/generated/numpy.tile.html>`_.
- For the operator similar to `numpy.repeat`, see :func:`torch.repeat_interleave`.
-
- Args:
- sizes (torch.Size or int...): The number of times to repeat this tensor along each
- dimension
-
- Example::
-
- >>> x = torch.tensor([1, 2, 3])
- >>> x.repeat(4, 2)
- tensor([[ 1, 2, 3, 1, 2, 3],
- [ 1, 2, 3, 1, 2, 3],
- [ 1, 2, 3, 1, 2, 3],
- [ 1, 2, 3, 1, 2, 3]])
- >>> x.repeat(4, 2, 1).size()
- torch.Size([4, 2, 3])
- """
- ...
- @overload
- def repeat(self, *repeats: _int) -> Tensor:
- r"""
- repeat(*sizes) -> Tensor
-
- Repeats this tensor along the specified dimensions.
-
- Unlike :meth:`~Tensor.expand`, this function copies the tensor's data.
-
- .. warning::
-
- :meth:`~Tensor.repeat` behaves differently from
- `numpy.repeat <https://docs.scipy.org/doc/numpy/reference/generated/numpy.repeat.html>`_,
- but is more similar to
- `numpy.tile <https://docs.scipy.org/doc/numpy/reference/generated/numpy.tile.html>`_.
- For the operator similar to `numpy.repeat`, see :func:`torch.repeat_interleave`.
-
- Args:
- sizes (torch.Size or int...): The number of times to repeat this tensor along each
- dimension
-
- Example::
-
- >>> x = torch.tensor([1, 2, 3])
- >>> x.repeat(4, 2)
- tensor([[ 1, 2, 3, 1, 2, 3],
- [ 1, 2, 3, 1, 2, 3],
- [ 1, 2, 3, 1, 2, 3],
- [ 1, 2, 3, 1, 2, 3]])
- >>> x.repeat(4, 2, 1).size()
- torch.Size([4, 2, 3])
- """
- ...
- @overload
- def repeat_interleave(self, repeats: Tensor, dim: Optional[_int] = None, *, output_size: Optional[Union[_int, SymInt]] = None) -> Tensor:
- r"""
- repeat_interleave(repeats, dim=None, *, output_size=None) -> Tensor
-
- See :func:`torch.repeat_interleave`.
- """
- ...
- @overload
- def repeat_interleave(self, repeats: Union[_int, SymInt], dim: Optional[_int] = None, *, output_size: Optional[Union[_int, SymInt]] = None) -> Tensor:
- r"""
- repeat_interleave(repeats, dim=None, *, output_size=None) -> Tensor
-
- See :func:`torch.repeat_interleave`.
- """
- ...
- def requires_grad_(self, mode: _bool = True) -> Tensor:
- r"""
- requires_grad_(requires_grad=True) -> Tensor
-
- Change if autograd should record operations on this tensor: sets this tensor's
- :attr:`requires_grad` attribute in-place. Returns this tensor.
-
- :func:`requires_grad_`'s main use case is to tell autograd to begin recording
- operations on a Tensor ``tensor``. If ``tensor`` has ``requires_grad=False``
- (because it was obtained through a DataLoader, or required preprocessing or
- initialization), ``tensor.requires_grad_()`` makes it so that autograd will
- begin to record operations on ``tensor``.
-
- Args:
- requires_grad (bool): If autograd should record operations on this tensor.
- Default: ``True``.
-
- Example::
-
- >>> # Let's say we want to preprocess some saved weights and use
- >>> # the result as new weights.
- >>> saved_weights = [0.1, 0.2, 0.3, 0.25]
- >>> loaded_weights = torch.tensor(saved_weights)
- >>> weights = preprocess(loaded_weights) # some function
- >>> weights
- tensor([-0.5503, 0.4926, -2.1158, -0.8303])
-
- >>> # Now, start to record operations done to weights
- >>> weights.requires_grad_()
- >>> out = weights.pow(2).sum()
- >>> out.backward()
- >>> weights.grad
- tensor([-1.1007, 0.9853, -4.2316, -1.6606])
- """
- ...
- @overload
- def reshape(self, shape: Sequence[Union[_int, SymInt]]) -> Tensor:
- r"""
- reshape(*shape) -> Tensor
-
- Returns a tensor with the same data and number of elements as :attr:`self`
- but with the specified shape. This method returns a view if :attr:`shape` is
- compatible with the current shape. See :meth:`torch.Tensor.view` on when it is
- possible to return a view.
-
- See :func:`torch.reshape`
-
- Args:
- shape (tuple of ints or int...): the desired shape
- """
- ...
- @overload
- def reshape(self, *shape: _int) -> Tensor:
- r"""
- reshape(*shape) -> Tensor
-
- Returns a tensor with the same data and number of elements as :attr:`self`
- but with the specified shape. This method returns a view if :attr:`shape` is
- compatible with the current shape. See :meth:`torch.Tensor.view` on when it is
- possible to return a view.
-
- See :func:`torch.reshape`
-
- Args:
- shape (tuple of ints or int...): the desired shape
- """
- ...
- def reshape_as(self, other: Tensor) -> Tensor:
- r"""
- reshape_as(other) -> Tensor
-
- Returns this tensor as the same shape as :attr:`other`.
- ``self.reshape_as(other)`` is equivalent to ``self.reshape(other.sizes())``.
- This method returns a view if ``other.sizes()`` is compatible with the current
- shape. See :meth:`torch.Tensor.view` on when it is possible to return a view.
-
- Please see :meth:`reshape` for more information about ``reshape``.
-
- Args:
- other (:class:`torch.Tensor`): The result tensor has the same shape
- as :attr:`other`.
- """
- ...
- @overload
- def resize_(self, size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format] = None) -> Tensor:
- r"""
- resize_(*sizes, memory_format=torch.contiguous_format) -> Tensor
-
- Resizes :attr:`self` tensor to the specified size. If the number of elements is
- larger than the current storage size, then the underlying storage is resized
- to fit the new number of elements. If the number of elements is smaller, the
- underlying storage is not changed. Existing elements are preserved but any new
- memory is uninitialized.
-
- .. warning::
-
- This is a low-level method. The storage is reinterpreted as C-contiguous,
- ignoring the current strides (unless the target size equals the current
- size, in which case the tensor is left unchanged). For most purposes, you
- will instead want to use :meth:`~Tensor.view()`, which checks for
- contiguity, or :meth:`~Tensor.reshape()`, which copies data if needed. To
- change the size in-place with custom strides, see :meth:`~Tensor.set_()`.
-
- .. note::
-
- If :func:`torch.use_deterministic_algorithms()` and
- :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
- ``True``, new elements are initialized to prevent nondeterministic behavior
- from using the result as an input to an operation. Floating point and
- complex values are set to NaN, and integer values are set to the maximum
- value.
-
- Args:
- sizes (torch.Size or int...): the desired size
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- Tensor. Default: ``torch.contiguous_format``. Note that memory format of
- :attr:`self` is going to be unaffected if ``self.size()`` matches ``sizes``.
-
- Example::
-
- >>> x = torch.tensor([[1, 2], [3, 4], [5, 6]])
- >>> x.resize_(2, 2)
- tensor([[ 1, 2],
- [ 3, 4]])
- """
- ...
- @overload
- def resize_(self, *size: _int, memory_format: Optional[memory_format] = None) -> Tensor:
- r"""
- resize_(*sizes, memory_format=torch.contiguous_format) -> Tensor
-
- Resizes :attr:`self` tensor to the specified size. If the number of elements is
- larger than the current storage size, then the underlying storage is resized
- to fit the new number of elements. If the number of elements is smaller, the
- underlying storage is not changed. Existing elements are preserved but any new
- memory is uninitialized.
-
- .. warning::
-
- This is a low-level method. The storage is reinterpreted as C-contiguous,
- ignoring the current strides (unless the target size equals the current
- size, in which case the tensor is left unchanged). For most purposes, you
- will instead want to use :meth:`~Tensor.view()`, which checks for
- contiguity, or :meth:`~Tensor.reshape()`, which copies data if needed. To
- change the size in-place with custom strides, see :meth:`~Tensor.set_()`.
-
- .. note::
-
- If :func:`torch.use_deterministic_algorithms()` and
- :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
- ``True``, new elements are initialized to prevent nondeterministic behavior
- from using the result as an input to an operation. Floating point and
- complex values are set to NaN, and integer values are set to the maximum
- value.
-
- Args:
- sizes (torch.Size or int...): the desired size
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- Tensor. Default: ``torch.contiguous_format``. Note that memory format of
- :attr:`self` is going to be unaffected if ``self.size()`` matches ``sizes``.
-
- Example::
-
- >>> x = torch.tensor([[1, 2], [3, 4], [5, 6]])
- >>> x.resize_(2, 2)
- tensor([[ 1, 2],
- [ 3, 4]])
- """
- ...
- def resize_as_(self, the_template: Tensor, *, memory_format: Optional[memory_format] = None) -> Tensor:
- r"""
- resize_as_(tensor, memory_format=torch.contiguous_format) -> Tensor
-
- Resizes the :attr:`self` tensor to be the same size as the specified
- :attr:`tensor`. This is equivalent to ``self.resize_(tensor.size())``.
-
- Args:
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- Tensor. Default: ``torch.contiguous_format``. Note that memory format of
- :attr:`self` is going to be unaffected if ``self.size()`` matches ``tensor.size()``.
- """
- ...
- def resize_as_sparse_(self, the_template: Tensor) -> Tensor: ...
- def resolve_conj(self) -> Tensor:
- r"""
- resolve_conj() -> Tensor
-
- See :func:`torch.resolve_conj`
- """
- ...
- def resolve_neg(self) -> Tensor:
- r"""
- resolve_neg() -> Tensor
-
- See :func:`torch.resolve_neg`
- """
- ...
- def retain_grad(self) -> None:
- r"""
- retain_grad() -> None
-
- Enables this Tensor to have their :attr:`grad` populated during
- :func:`backward`. This is a no-op for leaf tensors.
- """
- ...
- def roll(self, shifts: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]], dims: Union[_int, _size] = ()) -> Tensor:
- r"""
- roll(shifts, dims) -> Tensor
-
- See :func:`torch.roll`
- """
- ...
- def rot90(self, k: _int = 1, dims: _size = (0,1)) -> Tensor:
- r"""
- rot90(k, dims) -> Tensor
-
- See :func:`torch.rot90`
- """
- ...
- @overload
- def round(self) -> Tensor:
- r"""
- round(decimals=0) -> Tensor
-
- See :func:`torch.round`
- """
- ...
- @overload
- def round(self, *, decimals: _int) -> Tensor:
- r"""
- round(decimals=0) -> Tensor
-
- See :func:`torch.round`
- """
- ...
- @overload
- def round_(self) -> Tensor:
- r"""
- round_(decimals=0) -> Tensor
-
- In-place version of :meth:`~Tensor.round`
- """
- ...
- @overload
- def round_(self, *, decimals: _int) -> Tensor:
- r"""
- round_(decimals=0) -> Tensor
-
- In-place version of :meth:`~Tensor.round`
- """
- ...
- def row_indices(self) -> Tensor: ...
- def rsqrt(self) -> Tensor:
- r"""
- rsqrt() -> Tensor
-
- See :func:`torch.rsqrt`
- """
- ...
- def rsqrt_(self) -> Tensor:
- r"""
- rsqrt_() -> Tensor
-
- In-place version of :meth:`~Tensor.rsqrt`
- """
- ...
- @overload
- def scatter(self, dim: _int, index: Tensor, src: Tensor) -> Tensor:
- r"""
- scatter(dim, index, src) -> Tensor
-
- Out-of-place version of :meth:`torch.Tensor.scatter_`
- """
- ...
- @overload
- def scatter(self, dim: _int, index: Tensor, src: Tensor, *, reduce: str) -> Tensor:
- r"""
- scatter(dim, index, src) -> Tensor
-
- Out-of-place version of :meth:`torch.Tensor.scatter_`
- """
- ...
- @overload
- def scatter(self, dim: _int, index: Tensor, value: Union[Number, _complex], *, reduce: str) -> Tensor:
- r"""
- scatter(dim, index, src) -> Tensor
-
- Out-of-place version of :meth:`torch.Tensor.scatter_`
- """
- ...
- @overload
- def scatter(self, dim: Union[str, ellipsis, None], index: Tensor, src: Tensor) -> Tensor:
- r"""
- scatter(dim, index, src) -> Tensor
-
- Out-of-place version of :meth:`torch.Tensor.scatter_`
- """
- ...
- @overload
- def scatter(self, dim: _int, index: Tensor, value: Union[Number, _complex]) -> Tensor:
- r"""
- scatter(dim, index, src) -> Tensor
-
- Out-of-place version of :meth:`torch.Tensor.scatter_`
- """
- ...
- @overload
- def scatter(self, dim: Union[str, ellipsis, None], index: Tensor, value: Union[Number, _complex]) -> Tensor:
- r"""
- scatter(dim, index, src) -> Tensor
-
- Out-of-place version of :meth:`torch.Tensor.scatter_`
- """
- ...
- @overload
- def scatter_(self, dim: _int, index: Tensor, src: Tensor) -> Tensor:
- r"""
- scatter_(dim, index, src, *, reduce=None) -> Tensor
-
- Writes all values from the tensor :attr:`src` into :attr:`self` at the indices
- specified in the :attr:`index` tensor. For each value in :attr:`src`, its output
- index is specified by its index in :attr:`src` for ``dimension != dim`` and by
- the corresponding value in :attr:`index` for ``dimension = dim``.
-
- For a 3-D tensor, :attr:`self` is updated as::
-
- self[index[i][j][k]][j][k] = src[i][j][k] # if dim == 0
- self[i][index[i][j][k]][k] = src[i][j][k] # if dim == 1
- self[i][j][index[i][j][k]] = src[i][j][k] # if dim == 2
-
- This is the reverse operation of the manner described in :meth:`~Tensor.gather`.
-
- :attr:`self`, :attr:`index` and :attr:`src` (if it is a Tensor) should all have
- the same number of dimensions. It is also required that
- ``index.size(d) <= src.size(d)`` for all dimensions ``d``, and that
- ``index.size(d) <= self.size(d)`` for all dimensions ``d != dim``.
- Note that ``index`` and ``src`` do not broadcast.
-
- Moreover, as for :meth:`~Tensor.gather`, the values of :attr:`index` must be
- between ``0`` and ``self.size(dim) - 1`` inclusive.
-
- .. warning::
-
- When indices are not unique, the behavior is non-deterministic (one of the
- values from ``src`` will be picked arbitrarily) and the gradient will be
- incorrect (it will be propagated to all locations in the source that
- correspond to the same index)!
-
- .. note::
-
- The backward pass is implemented only for ``src.shape == index.shape``.
-
- Additionally accepts an optional :attr:`reduce` argument that allows
- specification of an optional reduction operation, which is applied to all
- values in the tensor :attr:`src` into :attr:`self` at the indices
- specified in the :attr:`index`. For each value in :attr:`src`, the reduction
- operation is applied to an index in :attr:`self` which is specified by
- its index in :attr:`src` for ``dimension != dim`` and by the corresponding
- value in :attr:`index` for ``dimension = dim``.
-
- Given a 3-D tensor and reduction using the multiplication operation, :attr:`self`
- is updated as::
-
- self[index[i][j][k]][j][k] *= src[i][j][k] # if dim == 0
- self[i][index[i][j][k]][k] *= src[i][j][k] # if dim == 1
- self[i][j][index[i][j][k]] *= src[i][j][k] # if dim == 2
-
- Reducing with the addition operation is the same as using
- :meth:`~torch.Tensor.scatter_add_`.
-
- .. warning::
- The reduce argument with Tensor ``src`` is deprecated and will be removed in
- a future PyTorch release. Please use :meth:`~torch.Tensor.scatter_reduce_`
- instead for more reduction options.
-
- Args:
- dim (int): the axis along which to index
- index (LongTensor): the indices of elements to scatter, can be either empty
- or of the same dimensionality as ``src``. When empty, the operation
- returns ``self`` unchanged.
- src (Tensor): the source element(s) to scatter.
-
- Keyword args:
- reduce (str, optional): reduction operation to apply, can be either
- ``'add'`` or ``'multiply'``.
-
- Example::
-
- >>> src = torch.arange(1, 11).reshape((2, 5))
- >>> src
- tensor([[ 1, 2, 3, 4, 5],
- [ 6, 7, 8, 9, 10]])
- >>> index = torch.tensor([[0, 1, 2, 0]])
- >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(0, index, src)
- tensor([[1, 0, 0, 4, 0],
- [0, 2, 0, 0, 0],
- [0, 0, 3, 0, 0]])
- >>> index = torch.tensor([[0, 1, 2], [0, 1, 4]])
- >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(1, index, src)
- tensor([[1, 2, 3, 0, 0],
- [6, 7, 0, 0, 8],
- [0, 0, 0, 0, 0]])
-
- >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
- ... 1.23, reduce='multiply')
- tensor([[2.0000, 2.0000, 2.4600, 2.0000],
- [2.0000, 2.0000, 2.0000, 2.4600]])
- >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
- ... 1.23, reduce='add')
- tensor([[2.0000, 2.0000, 3.2300, 2.0000],
- [2.0000, 2.0000, 2.0000, 3.2300]])
-
- .. function:: scatter_(dim, index, value, *, reduce=None) -> Tensor:
- :noindex:
-
- Writes the value from :attr:`value` into :attr:`self` at the indices
- specified in the :attr:`index` tensor. This operation is equivalent to the previous version,
- with the :attr:`src` tensor filled entirely with :attr:`value`.
-
- Args:
- dim (int): the axis along which to index
- index (LongTensor): the indices of elements to scatter, can be either empty
- or of the same dimensionality as ``src``. When empty, the operation
- returns ``self`` unchanged.
- value (Scalar): the value to scatter.
-
- Keyword args:
- reduce (str, optional): reduction operation to apply, can be either
- ``'add'`` or ``'multiply'``.
-
- Example::
-
- >>> index = torch.tensor([[0, 1]])
- >>> value = 2
- >>> torch.zeros(3, 5).scatter_(0, index, value)
- tensor([[2., 0., 0., 0., 0.],
- [0., 2., 0., 0., 0.],
- [0., 0., 0., 0., 0.]])
- """
- ...
- @overload
- def scatter_(self, dim: _int, index: Tensor, src: Tensor, *, reduce: str) -> Tensor:
- r"""
- scatter_(dim, index, src, *, reduce=None) -> Tensor
-
- Writes all values from the tensor :attr:`src` into :attr:`self` at the indices
- specified in the :attr:`index` tensor. For each value in :attr:`src`, its output
- index is specified by its index in :attr:`src` for ``dimension != dim`` and by
- the corresponding value in :attr:`index` for ``dimension = dim``.
-
- For a 3-D tensor, :attr:`self` is updated as::
-
- self[index[i][j][k]][j][k] = src[i][j][k] # if dim == 0
- self[i][index[i][j][k]][k] = src[i][j][k] # if dim == 1
- self[i][j][index[i][j][k]] = src[i][j][k] # if dim == 2
-
- This is the reverse operation of the manner described in :meth:`~Tensor.gather`.
-
- :attr:`self`, :attr:`index` and :attr:`src` (if it is a Tensor) should all have
- the same number of dimensions. It is also required that
- ``index.size(d) <= src.size(d)`` for all dimensions ``d``, and that
- ``index.size(d) <= self.size(d)`` for all dimensions ``d != dim``.
- Note that ``index`` and ``src`` do not broadcast.
-
- Moreover, as for :meth:`~Tensor.gather`, the values of :attr:`index` must be
- between ``0`` and ``self.size(dim) - 1`` inclusive.
-
- .. warning::
-
- When indices are not unique, the behavior is non-deterministic (one of the
- values from ``src`` will be picked arbitrarily) and the gradient will be
- incorrect (it will be propagated to all locations in the source that
- correspond to the same index)!
-
- .. note::
-
- The backward pass is implemented only for ``src.shape == index.shape``.
-
- Additionally accepts an optional :attr:`reduce` argument that allows
- specification of an optional reduction operation, which is applied to all
- values in the tensor :attr:`src` into :attr:`self` at the indices
- specified in the :attr:`index`. For each value in :attr:`src`, the reduction
- operation is applied to an index in :attr:`self` which is specified by
- its index in :attr:`src` for ``dimension != dim`` and by the corresponding
- value in :attr:`index` for ``dimension = dim``.
-
- Given a 3-D tensor and reduction using the multiplication operation, :attr:`self`
- is updated as::
-
- self[index[i][j][k]][j][k] *= src[i][j][k] # if dim == 0
- self[i][index[i][j][k]][k] *= src[i][j][k] # if dim == 1
- self[i][j][index[i][j][k]] *= src[i][j][k] # if dim == 2
-
- Reducing with the addition operation is the same as using
- :meth:`~torch.Tensor.scatter_add_`.
-
- .. warning::
- The reduce argument with Tensor ``src`` is deprecated and will be removed in
- a future PyTorch release. Please use :meth:`~torch.Tensor.scatter_reduce_`
- instead for more reduction options.
-
- Args:
- dim (int): the axis along which to index
- index (LongTensor): the indices of elements to scatter, can be either empty
- or of the same dimensionality as ``src``. When empty, the operation
- returns ``self`` unchanged.
- src (Tensor): the source element(s) to scatter.
-
- Keyword args:
- reduce (str, optional): reduction operation to apply, can be either
- ``'add'`` or ``'multiply'``.
-
- Example::
-
- >>> src = torch.arange(1, 11).reshape((2, 5))
- >>> src
- tensor([[ 1, 2, 3, 4, 5],
- [ 6, 7, 8, 9, 10]])
- >>> index = torch.tensor([[0, 1, 2, 0]])
- >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(0, index, src)
- tensor([[1, 0, 0, 4, 0],
- [0, 2, 0, 0, 0],
- [0, 0, 3, 0, 0]])
- >>> index = torch.tensor([[0, 1, 2], [0, 1, 4]])
- >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(1, index, src)
- tensor([[1, 2, 3, 0, 0],
- [6, 7, 0, 0, 8],
- [0, 0, 0, 0, 0]])
-
- >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
- ... 1.23, reduce='multiply')
- tensor([[2.0000, 2.0000, 2.4600, 2.0000],
- [2.0000, 2.0000, 2.0000, 2.4600]])
- >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
- ... 1.23, reduce='add')
- tensor([[2.0000, 2.0000, 3.2300, 2.0000],
- [2.0000, 2.0000, 2.0000, 3.2300]])
-
- .. function:: scatter_(dim, index, value, *, reduce=None) -> Tensor:
- :noindex:
-
- Writes the value from :attr:`value` into :attr:`self` at the indices
- specified in the :attr:`index` tensor. This operation is equivalent to the previous version,
- with the :attr:`src` tensor filled entirely with :attr:`value`.
-
- Args:
- dim (int): the axis along which to index
- index (LongTensor): the indices of elements to scatter, can be either empty
- or of the same dimensionality as ``src``. When empty, the operation
- returns ``self`` unchanged.
- value (Scalar): the value to scatter.
-
- Keyword args:
- reduce (str, optional): reduction operation to apply, can be either
- ``'add'`` or ``'multiply'``.
-
- Example::
-
- >>> index = torch.tensor([[0, 1]])
- >>> value = 2
- >>> torch.zeros(3, 5).scatter_(0, index, value)
- tensor([[2., 0., 0., 0., 0.],
- [0., 2., 0., 0., 0.],
- [0., 0., 0., 0., 0.]])
- """
- ...
- @overload
- def scatter_(self, dim: _int, index: Tensor, value: Union[Number, _complex], *, reduce: str) -> Tensor:
- r"""
- scatter_(dim, index, src, *, reduce=None) -> Tensor
-
- Writes all values from the tensor :attr:`src` into :attr:`self` at the indices
- specified in the :attr:`index` tensor. For each value in :attr:`src`, its output
- index is specified by its index in :attr:`src` for ``dimension != dim`` and by
- the corresponding value in :attr:`index` for ``dimension = dim``.
-
- For a 3-D tensor, :attr:`self` is updated as::
-
- self[index[i][j][k]][j][k] = src[i][j][k] # if dim == 0
- self[i][index[i][j][k]][k] = src[i][j][k] # if dim == 1
- self[i][j][index[i][j][k]] = src[i][j][k] # if dim == 2
-
- This is the reverse operation of the manner described in :meth:`~Tensor.gather`.
-
- :attr:`self`, :attr:`index` and :attr:`src` (if it is a Tensor) should all have
- the same number of dimensions. It is also required that
- ``index.size(d) <= src.size(d)`` for all dimensions ``d``, and that
- ``index.size(d) <= self.size(d)`` for all dimensions ``d != dim``.
- Note that ``index`` and ``src`` do not broadcast.
-
- Moreover, as for :meth:`~Tensor.gather`, the values of :attr:`index` must be
- between ``0`` and ``self.size(dim) - 1`` inclusive.
-
- .. warning::
-
- When indices are not unique, the behavior is non-deterministic (one of the
- values from ``src`` will be picked arbitrarily) and the gradient will be
- incorrect (it will be propagated to all locations in the source that
- correspond to the same index)!
-
- .. note::
-
- The backward pass is implemented only for ``src.shape == index.shape``.
-
- Additionally accepts an optional :attr:`reduce` argument that allows
- specification of an optional reduction operation, which is applied to all
- values in the tensor :attr:`src` into :attr:`self` at the indices
- specified in the :attr:`index`. For each value in :attr:`src`, the reduction
- operation is applied to an index in :attr:`self` which is specified by
- its index in :attr:`src` for ``dimension != dim`` and by the corresponding
- value in :attr:`index` for ``dimension = dim``.
-
- Given a 3-D tensor and reduction using the multiplication operation, :attr:`self`
- is updated as::
-
- self[index[i][j][k]][j][k] *= src[i][j][k] # if dim == 0
- self[i][index[i][j][k]][k] *= src[i][j][k] # if dim == 1
- self[i][j][index[i][j][k]] *= src[i][j][k] # if dim == 2
-
- Reducing with the addition operation is the same as using
- :meth:`~torch.Tensor.scatter_add_`.
-
- .. warning::
- The reduce argument with Tensor ``src`` is deprecated and will be removed in
- a future PyTorch release. Please use :meth:`~torch.Tensor.scatter_reduce_`
- instead for more reduction options.
-
- Args:
- dim (int): the axis along which to index
- index (LongTensor): the indices of elements to scatter, can be either empty
- or of the same dimensionality as ``src``. When empty, the operation
- returns ``self`` unchanged.
- src (Tensor): the source element(s) to scatter.
-
- Keyword args:
- reduce (str, optional): reduction operation to apply, can be either
- ``'add'`` or ``'multiply'``.
-
- Example::
-
- >>> src = torch.arange(1, 11).reshape((2, 5))
- >>> src
- tensor([[ 1, 2, 3, 4, 5],
- [ 6, 7, 8, 9, 10]])
- >>> index = torch.tensor([[0, 1, 2, 0]])
- >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(0, index, src)
- tensor([[1, 0, 0, 4, 0],
- [0, 2, 0, 0, 0],
- [0, 0, 3, 0, 0]])
- >>> index = torch.tensor([[0, 1, 2], [0, 1, 4]])
- >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(1, index, src)
- tensor([[1, 2, 3, 0, 0],
- [6, 7, 0, 0, 8],
- [0, 0, 0, 0, 0]])
-
- >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
- ... 1.23, reduce='multiply')
- tensor([[2.0000, 2.0000, 2.4600, 2.0000],
- [2.0000, 2.0000, 2.0000, 2.4600]])
- >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
- ... 1.23, reduce='add')
- tensor([[2.0000, 2.0000, 3.2300, 2.0000],
- [2.0000, 2.0000, 2.0000, 3.2300]])
-
- .. function:: scatter_(dim, index, value, *, reduce=None) -> Tensor:
- :noindex:
-
- Writes the value from :attr:`value` into :attr:`self` at the indices
- specified in the :attr:`index` tensor. This operation is equivalent to the previous version,
- with the :attr:`src` tensor filled entirely with :attr:`value`.
-
- Args:
- dim (int): the axis along which to index
- index (LongTensor): the indices of elements to scatter, can be either empty
- or of the same dimensionality as ``src``. When empty, the operation
- returns ``self`` unchanged.
- value (Scalar): the value to scatter.
-
- Keyword args:
- reduce (str, optional): reduction operation to apply, can be either
- ``'add'`` or ``'multiply'``.
-
- Example::
-
- >>> index = torch.tensor([[0, 1]])
- >>> value = 2
- >>> torch.zeros(3, 5).scatter_(0, index, value)
- tensor([[2., 0., 0., 0., 0.],
- [0., 2., 0., 0., 0.],
- [0., 0., 0., 0., 0.]])
- """
- ...
- @overload
- def scatter_(self, dim: _int, index: Tensor, value: Union[Number, _complex]) -> Tensor:
- r"""
- scatter_(dim, index, src, *, reduce=None) -> Tensor
-
- Writes all values from the tensor :attr:`src` into :attr:`self` at the indices
- specified in the :attr:`index` tensor. For each value in :attr:`src`, its output
- index is specified by its index in :attr:`src` for ``dimension != dim`` and by
- the corresponding value in :attr:`index` for ``dimension = dim``.
-
- For a 3-D tensor, :attr:`self` is updated as::
-
- self[index[i][j][k]][j][k] = src[i][j][k] # if dim == 0
- self[i][index[i][j][k]][k] = src[i][j][k] # if dim == 1
- self[i][j][index[i][j][k]] = src[i][j][k] # if dim == 2
-
- This is the reverse operation of the manner described in :meth:`~Tensor.gather`.
-
- :attr:`self`, :attr:`index` and :attr:`src` (if it is a Tensor) should all have
- the same number of dimensions. It is also required that
- ``index.size(d) <= src.size(d)`` for all dimensions ``d``, and that
- ``index.size(d) <= self.size(d)`` for all dimensions ``d != dim``.
- Note that ``index`` and ``src`` do not broadcast.
-
- Moreover, as for :meth:`~Tensor.gather`, the values of :attr:`index` must be
- between ``0`` and ``self.size(dim) - 1`` inclusive.
-
- .. warning::
-
- When indices are not unique, the behavior is non-deterministic (one of the
- values from ``src`` will be picked arbitrarily) and the gradient will be
- incorrect (it will be propagated to all locations in the source that
- correspond to the same index)!
-
- .. note::
-
- The backward pass is implemented only for ``src.shape == index.shape``.
-
- Additionally accepts an optional :attr:`reduce` argument that allows
- specification of an optional reduction operation, which is applied to all
- values in the tensor :attr:`src` into :attr:`self` at the indices
- specified in the :attr:`index`. For each value in :attr:`src`, the reduction
- operation is applied to an index in :attr:`self` which is specified by
- its index in :attr:`src` for ``dimension != dim`` and by the corresponding
- value in :attr:`index` for ``dimension = dim``.
-
- Given a 3-D tensor and reduction using the multiplication operation, :attr:`self`
- is updated as::
-
- self[index[i][j][k]][j][k] *= src[i][j][k] # if dim == 0
- self[i][index[i][j][k]][k] *= src[i][j][k] # if dim == 1
- self[i][j][index[i][j][k]] *= src[i][j][k] # if dim == 2
-
- Reducing with the addition operation is the same as using
- :meth:`~torch.Tensor.scatter_add_`.
-
- .. warning::
- The reduce argument with Tensor ``src`` is deprecated and will be removed in
- a future PyTorch release. Please use :meth:`~torch.Tensor.scatter_reduce_`
- instead for more reduction options.
-
- Args:
- dim (int): the axis along which to index
- index (LongTensor): the indices of elements to scatter, can be either empty
- or of the same dimensionality as ``src``. When empty, the operation
- returns ``self`` unchanged.
- src (Tensor): the source element(s) to scatter.
-
- Keyword args:
- reduce (str, optional): reduction operation to apply, can be either
- ``'add'`` or ``'multiply'``.
-
- Example::
-
- >>> src = torch.arange(1, 11).reshape((2, 5))
- >>> src
- tensor([[ 1, 2, 3, 4, 5],
- [ 6, 7, 8, 9, 10]])
- >>> index = torch.tensor([[0, 1, 2, 0]])
- >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(0, index, src)
- tensor([[1, 0, 0, 4, 0],
- [0, 2, 0, 0, 0],
- [0, 0, 3, 0, 0]])
- >>> index = torch.tensor([[0, 1, 2], [0, 1, 4]])
- >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(1, index, src)
- tensor([[1, 2, 3, 0, 0],
- [6, 7, 0, 0, 8],
- [0, 0, 0, 0, 0]])
-
- >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
- ... 1.23, reduce='multiply')
- tensor([[2.0000, 2.0000, 2.4600, 2.0000],
- [2.0000, 2.0000, 2.0000, 2.4600]])
- >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
- ... 1.23, reduce='add')
- tensor([[2.0000, 2.0000, 3.2300, 2.0000],
- [2.0000, 2.0000, 2.0000, 3.2300]])
-
- .. function:: scatter_(dim, index, value, *, reduce=None) -> Tensor:
- :noindex:
-
- Writes the value from :attr:`value` into :attr:`self` at the indices
- specified in the :attr:`index` tensor. This operation is equivalent to the previous version,
- with the :attr:`src` tensor filled entirely with :attr:`value`.
-
- Args:
- dim (int): the axis along which to index
- index (LongTensor): the indices of elements to scatter, can be either empty
- or of the same dimensionality as ``src``. When empty, the operation
- returns ``self`` unchanged.
- value (Scalar): the value to scatter.
-
- Keyword args:
- reduce (str, optional): reduction operation to apply, can be either
- ``'add'`` or ``'multiply'``.
-
- Example::
-
- >>> index = torch.tensor([[0, 1]])
- >>> value = 2
- >>> torch.zeros(3, 5).scatter_(0, index, value)
- tensor([[2., 0., 0., 0., 0.],
- [0., 2., 0., 0., 0.],
- [0., 0., 0., 0., 0.]])
- """
- ...
- @overload
- def scatter_add(self, dim: _int, index: Tensor, src: Tensor) -> Tensor:
- r"""
- scatter_add(dim, index, src) -> Tensor
-
- Out-of-place version of :meth:`torch.Tensor.scatter_add_`
- """
- ...
- @overload
- def scatter_add(self, dim: Union[str, ellipsis, None], index: Tensor, src: Tensor) -> Tensor:
- r"""
- scatter_add(dim, index, src) -> Tensor
-
- Out-of-place version of :meth:`torch.Tensor.scatter_add_`
- """
- ...
- def scatter_add_(self, dim: _int, index: Tensor, src: Tensor) -> Tensor:
- r"""
- scatter_add_(dim, index, src) -> Tensor
-
- Adds all values from the tensor :attr:`src` into :attr:`self` at the indices
- specified in the :attr:`index` tensor in a similar fashion as
- :meth:`~torch.Tensor.scatter_`. For each value in :attr:`src`, it is added to
- an index in :attr:`self` which is specified by its index in :attr:`src`
- for ``dimension != dim`` and by the corresponding value in :attr:`index` for
- ``dimension = dim``.
-
- For a 3-D tensor, :attr:`self` is updated as::
-
- self[index[i][j][k]][j][k] += src[i][j][k] # if dim == 0
- self[i][index[i][j][k]][k] += src[i][j][k] # if dim == 1
- self[i][j][index[i][j][k]] += src[i][j][k] # if dim == 2
-
- :attr:`self`, :attr:`index` and :attr:`src` should have same number of
- dimensions. It is also required that ``index.size(d) <= src.size(d)`` for all
- dimensions ``d``, and that ``index.size(d) <= self.size(d)`` for all dimensions
- ``d != dim``. Note that ``index`` and ``src`` do not broadcast.
-
- Note:
- This operation may behave nondeterministically when given tensors on a CUDA device. See :doc:`/notes/randomness` for more information.
-
- .. note::
-
- The backward pass is implemented only for ``src.shape == index.shape``.
-
- Args:
- dim (int): the axis along which to index
- index (LongTensor): the indices of elements to scatter and add, can be
- either empty or of the same dimensionality as ``src``. When empty, the
- operation returns ``self`` unchanged.
- src (Tensor): the source elements to scatter and add
-
- Example::
-
- >>> src = torch.ones((2, 5))
- >>> index = torch.tensor([[0, 1, 2, 0, 0]])
- >>> torch.zeros(3, 5, dtype=src.dtype).scatter_add_(0, index, src)
- tensor([[1., 0., 0., 1., 1.],
- [0., 1., 0., 0., 0.],
- [0., 0., 1., 0., 0.]])
- >>> index = torch.tensor([[0, 1, 2, 0, 0], [0, 1, 2, 2, 2]])
- >>> torch.zeros(3, 5, dtype=src.dtype).scatter_add_(0, index, src)
- tensor([[2., 0., 0., 1., 1.],
- [0., 2., 0., 0., 0.],
- [0., 0., 2., 1., 1.]])
- """
- ...
- def scatter_reduce(self, dim: _int, index: Tensor, src: Tensor, reduce: str, *, include_self: _bool = True) -> Tensor:
- r"""
- scatter_reduce(dim, index, src, reduce, *, include_self=True) -> Tensor
-
- Out-of-place version of :meth:`torch.Tensor.scatter_reduce_`
- """
- ...
- def scatter_reduce_(self, dim: _int, index: Tensor, src: Tensor, reduce: str, *, include_self: _bool = True) -> Tensor:
- r"""
- scatter_reduce_(dim, index, src, reduce, *, include_self=True) -> Tensor
-
- Reduces all values from the :attr:`src` tensor to the indices specified in
- the :attr:`index` tensor in the :attr:`self` tensor using the applied reduction
- defined via the :attr:`reduce` argument (:obj:`"sum"`, :obj:`"prod"`, :obj:`"mean"`,
- :obj:`"amax"`, :obj:`"amin"`). For each value in :attr:`src`, it is reduced to an
- index in :attr:`self` which is specified by its index in :attr:`src` for
- ``dimension != dim`` and by the corresponding value in :attr:`index` for
- ``dimension = dim``. If :obj:`include_self="True"`, the values in the :attr:`self`
- tensor are included in the reduction.
-
- :attr:`self`, :attr:`index` and :attr:`src` should all have
- the same number of dimensions. It is also required that
- ``index.size(d) <= src.size(d)`` for all dimensions ``d``, and that
- ``index.size(d) <= self.size(d)`` for all dimensions ``d != dim``.
- Note that ``index`` and ``src`` do not broadcast.
-
- For a 3-D tensor with :obj:`reduce="sum"` and :obj:`include_self=True` the
- output is given as::
-
- self[index[i][j][k]][j][k] += src[i][j][k] # if dim == 0
- self[i][index[i][j][k]][k] += src[i][j][k] # if dim == 1
- self[i][j][index[i][j][k]] += src[i][j][k] # if dim == 2
-
- Note:
- This operation may behave nondeterministically when given tensors on a CUDA device. See :doc:`/notes/randomness` for more information.
-
- .. note::
-
- The backward pass is implemented only for ``src.shape == index.shape``.
-
- .. warning::
-
- This function is in beta and may change in the near future.
-
- Args:
- dim (int): the axis along which to index
- index (LongTensor): the indices of elements to scatter and reduce.
- src (Tensor): the source elements to scatter and reduce
- reduce (str): the reduction operation to apply for non-unique indices
- (:obj:`"sum"`, :obj:`"prod"`, :obj:`"mean"`, :obj:`"amax"`, :obj:`"amin"`)
- include_self (bool): whether elements from the :attr:`self` tensor are
- included in the reduction
-
- Example::
-
- >>> src = torch.tensor([1., 2., 3., 4., 5., 6.])
- >>> index = torch.tensor([0, 1, 0, 1, 2, 1])
- >>> input = torch.tensor([1., 2., 3., 4.])
- >>> input.scatter_reduce(0, index, src, reduce="sum")
- tensor([5., 14., 8., 4.])
- >>> input.scatter_reduce(0, index, src, reduce="sum", include_self=False)
- tensor([4., 12., 5., 4.])
- >>> input2 = torch.tensor([5., 4., 3., 2.])
- >>> input2.scatter_reduce(0, index, src, reduce="amax")
- tensor([5., 6., 5., 2.])
- >>> input2.scatter_reduce(0, index, src, reduce="amax", include_self=False)
- tensor([3., 6., 5., 2.])
- """
- ...
- @overload
- def select(self, dim: _int, index: Union[_int, SymInt]) -> Tensor:
- r"""
- select(dim, index) -> Tensor
-
- See :func:`torch.select`
- """
- ...
- @overload
- def select(self, dim: Union[str, ellipsis, None], index: _int) -> Tensor:
- r"""
- select(dim, index) -> Tensor
-
- See :func:`torch.select`
- """
- ...
- def select_scatter(self, src: Tensor, dim: _int, index: Union[_int, SymInt]) -> Tensor:
- r"""
- select_scatter(src, dim, index) -> Tensor
-
- See :func:`torch.select_scatter`
- """
- ...
- @overload
- def set_(self, storage: Union[Storage, TypedStorage, UntypedStorage], offset: _int, size: _size, stride: _size) -> Tensor:
- r"""
- set_(source=None, storage_offset=0, size=None, stride=None) -> Tensor
-
- Sets the underlying storage, size, and strides. If :attr:`source` is a tensor,
- :attr:`self` tensor will share the same storage and have the same size and
- strides as :attr:`source`. Changes to elements in one tensor will be reflected
- in the other.
-
- If :attr:`source` is a :class:`~torch.Storage`, the method sets the underlying
- storage, offset, size, and stride.
-
- Args:
- source (Tensor or Storage): the tensor or storage to use
- storage_offset (int, optional): the offset in the storage
- size (torch.Size, optional): the desired size. Defaults to the size of the source.
- stride (tuple, optional): the desired stride. Defaults to C-contiguous strides.
- """
- ...
- @overload
- def set_(self, storage: Union[Storage, TypedStorage, UntypedStorage]) -> Tensor:
- r"""
- set_(source=None, storage_offset=0, size=None, stride=None) -> Tensor
-
- Sets the underlying storage, size, and strides. If :attr:`source` is a tensor,
- :attr:`self` tensor will share the same storage and have the same size and
- strides as :attr:`source`. Changes to elements in one tensor will be reflected
- in the other.
-
- If :attr:`source` is a :class:`~torch.Storage`, the method sets the underlying
- storage, offset, size, and stride.
-
- Args:
- source (Tensor or Storage): the tensor or storage to use
- storage_offset (int, optional): the offset in the storage
- size (torch.Size, optional): the desired size. Defaults to the size of the source.
- stride (tuple, optional): the desired stride. Defaults to C-contiguous strides.
- """
- ...
- def sgn(self) -> Tensor:
- r"""
- sgn() -> Tensor
-
- See :func:`torch.sgn`
- """
- ...
- def sgn_(self) -> Tensor:
- r"""
- sgn_() -> Tensor
-
- In-place version of :meth:`~Tensor.sgn`
- """
- ...
- def short(self) -> Tensor:
- r"""
- short(memory_format=torch.preserve_format) -> Tensor
-
- ``self.short()`` is equivalent to ``self.to(torch.int16)``. See :func:`to`.
-
- Args:
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned Tensor. Default: ``torch.preserve_format``.
- """
- ...
- def sigmoid(self) -> Tensor:
- r"""
- sigmoid() -> Tensor
-
- See :func:`torch.sigmoid`
- """
- ...
- def sigmoid_(self) -> Tensor:
- r"""
- sigmoid_() -> Tensor
-
- In-place version of :meth:`~Tensor.sigmoid`
- """
- ...
- def sign(self) -> Tensor:
- r"""
- sign() -> Tensor
-
- See :func:`torch.sign`
- """
- ...
- def sign_(self) -> Tensor:
- r"""
- sign_() -> Tensor
-
- In-place version of :meth:`~Tensor.sign`
- """
- ...
- def signbit(self) -> Tensor:
- r"""
- signbit() -> Tensor
-
- See :func:`torch.signbit`
- """
- ...
- def sin(self) -> Tensor:
- r"""
- sin() -> Tensor
-
- See :func:`torch.sin`
- """
- ...
- def sin_(self) -> Tensor:
- r"""
- sin_() -> Tensor
-
- In-place version of :meth:`~Tensor.sin`
- """
- ...
- def sinc(self) -> Tensor:
- r"""
- sinc() -> Tensor
-
- See :func:`torch.sinc`
- """
- ...
- def sinc_(self) -> Tensor:
- r"""
- sinc_() -> Tensor
-
- In-place version of :meth:`~Tensor.sinc`
- """
- ...
- def sinh(self) -> Tensor:
- r"""
- sinh() -> Tensor
-
- See :func:`torch.sinh`
- """
- ...
- def sinh_(self) -> Tensor:
- r"""
- sinh_() -> Tensor
-
- In-place version of :meth:`~Tensor.sinh`
- """
- ...
- @overload
- def size(self, dim: None = None) -> Size:
- r"""
- size(dim=None) -> torch.Size or int
-
- Returns the size of the :attr:`self` tensor. If ``dim`` is not specified,
- the returned value is a :class:`torch.Size`, a subclass of :class:`tuple`.
- If ``dim`` is specified, returns an int holding the size of that dimension.
-
- Args:
- dim (int, optional): The dimension for which to retrieve the size.
-
- Example::
-
- >>> t = torch.empty(3, 4, 5)
- >>> t.size()
- torch.Size([3, 4, 5])
- >>> t.size(dim=1)
- 4
- """
- ...
- @overload
- def size(self, dim: _int) -> _int:
- r"""
- size(dim=None) -> torch.Size or int
-
- Returns the size of the :attr:`self` tensor. If ``dim`` is not specified,
- the returned value is a :class:`torch.Size`, a subclass of :class:`tuple`.
- If ``dim`` is specified, returns an int holding the size of that dimension.
-
- Args:
- dim (int, optional): The dimension for which to retrieve the size.
-
- Example::
-
- >>> t = torch.empty(3, 4, 5)
- >>> t.size()
- torch.Size([3, 4, 5])
- >>> t.size(dim=1)
- 4
- """
- ...
- def slice_inverse(self, src: Tensor, dim: _int = 0, start: Optional[Union[_int, SymInt]] = None, end: Optional[Union[_int, SymInt]] = None, step: Union[_int, SymInt] = 1) -> Tensor: ...
- def slice_scatter(self, src: Tensor, dim: _int = 0, start: Optional[Union[_int, SymInt]] = None, end: Optional[Union[_int, SymInt]] = None, step: Union[_int, SymInt] = 1) -> Tensor:
- r"""
- slice_scatter(src, dim=0, start=None, end=None, step=1) -> Tensor
-
- See :func:`torch.slice_scatter`
- """
- ...
- def slogdet(self) -> torch.return_types.slogdet:
- r"""
- slogdet() -> (Tensor, Tensor)
-
- See :func:`torch.slogdet`
- """
- ...
- def smm(self, mat2: Tensor) -> Tensor:
- r"""
- smm(mat) -> Tensor
-
- See :func:`torch.smm`
- """
- ...
- @overload
- def softmax(self, dim: _int, dtype: Optional[_dtype] = None) -> Tensor:
- r"""
- softmax(dim) -> Tensor
-
- Alias for :func:`torch.nn.functional.softmax`.
- """
- ...
- @overload
- def softmax(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor:
- r"""
- softmax(dim) -> Tensor
-
- Alias for :func:`torch.nn.functional.softmax`.
- """
- ...
- @overload
- def sort(self, *, stable: Optional[_bool], dim: _int = -1, descending: _bool = False) -> torch.return_types.sort:
- r"""
- sort(dim=-1, descending=False) -> (Tensor, LongTensor)
-
- See :func:`torch.sort`
- """
- ...
- @overload
- def sort(self, dim: _int = -1, descending: _bool = False) -> torch.return_types.sort:
- r"""
- sort(dim=-1, descending=False) -> (Tensor, LongTensor)
-
- See :func:`torch.sort`
- """
- ...
- @overload
- def sort(self, *, stable: Optional[_bool], dim: Union[str, ellipsis, None], descending: _bool = False) -> torch.return_types.sort:
- r"""
- sort(dim=-1, descending=False) -> (Tensor, LongTensor)
-
- See :func:`torch.sort`
- """
- ...
- @overload
- def sort(self, dim: Union[str, ellipsis, None], descending: _bool = False) -> torch.return_types.sort:
- r"""
- sort(dim=-1, descending=False) -> (Tensor, LongTensor)
-
- See :func:`torch.sort`
- """
- ...
- def sparse_dim(self) -> _int:
- r"""
- sparse_dim() -> int
-
- Return the number of sparse dimensions in a :ref:`sparse tensor <sparse-docs>` :attr:`self`.
-
- .. note::
- Returns ``0`` if :attr:`self` is not a sparse tensor.
-
- See also :meth:`Tensor.dense_dim` and :ref:`hybrid tensors <sparse-hybrid-coo-docs>`.
- """
- ...
- def sparse_mask(self, mask: Tensor) -> Tensor:
- r"""
- sparse_mask(mask) -> Tensor
-
- Returns a new :ref:`sparse tensor <sparse-docs>` with values from a
- strided tensor :attr:`self` filtered by the indices of the sparse
- tensor :attr:`mask`. The values of :attr:`mask` sparse tensor are
- ignored. :attr:`self` and :attr:`mask` tensors must have the same
- shape.
-
- .. note::
-
- The returned sparse tensor might contain duplicate values if :attr:`mask`
- is not coalesced. It is therefore advisable to pass ``mask.coalesce()``
- if such behavior is not desired.
-
- .. note::
-
- The returned sparse tensor has the same indices as the sparse tensor
- :attr:`mask`, even when the corresponding values in :attr:`self` are
- zeros.
-
- Args:
- mask (Tensor): a sparse tensor whose indices are used as a filter
-
- Example::
-
- >>> nse = 5
- >>> dims = (5, 5, 2, 2)
- >>> I = torch.cat([torch.randint(0, dims[0], size=(nse,)),
- ... torch.randint(0, dims[1], size=(nse,))], 0).reshape(2, nse)
- >>> V = torch.randn(nse, dims[2], dims[3])
- >>> S = torch.sparse_coo_tensor(I, V, dims).coalesce()
- >>> D = torch.randn(dims)
- >>> D.sparse_mask(S)
- tensor(indices=tensor([[0, 0, 0, 2],
- [0, 1, 4, 3]]),
- values=tensor([[[ 1.6550, 0.2397],
- [-0.1611, -0.0779]],
-
- [[ 0.2326, -1.0558],
- [ 1.4711, 1.9678]],
-
- [[-0.5138, -0.0411],
- [ 1.9417, 0.5158]],
-
- [[ 0.0793, 0.0036],
- [-0.2569, -0.1055]]]),
- size=(5, 5, 2, 2), nnz=4, layout=torch.sparse_coo)
- """
- ...
- def sparse_resize_(self, size: _size, sparse_dim: _int, dense_dim: _int) -> Tensor:
- r"""
- sparse_resize_(size, sparse_dim, dense_dim) -> Tensor
-
- Resizes :attr:`self` :ref:`sparse tensor <sparse-docs>` to the desired
- size and the number of sparse and dense dimensions.
-
- .. note::
- If the number of specified elements in :attr:`self` is zero, then
- :attr:`size`, :attr:`sparse_dim`, and :attr:`dense_dim` can be any
- size and positive integers such that ``len(size) == sparse_dim +
- dense_dim``.
-
- If :attr:`self` specifies one or more elements, however, then each
- dimension in :attr:`size` must not be smaller than the corresponding
- dimension of :attr:`self`, :attr:`sparse_dim` must equal the number
- of sparse dimensions in :attr:`self`, and :attr:`dense_dim` must
- equal the number of dense dimensions in :attr:`self`.
-
- .. warning::
- Throws an error if :attr:`self` is not a sparse tensor.
-
- Args:
- size (torch.Size): the desired size. If :attr:`self` is non-empty
- sparse tensor, the desired size cannot be smaller than the
- original size.
- sparse_dim (int): the number of sparse dimensions
- dense_dim (int): the number of dense dimensions
- """
- ...
- def sparse_resize_and_clear_(self, size: _size, sparse_dim: _int, dense_dim: _int) -> Tensor:
- r"""
- sparse_resize_and_clear_(size, sparse_dim, dense_dim) -> Tensor
-
- Removes all specified elements from a :ref:`sparse tensor
- <sparse-docs>` :attr:`self` and resizes :attr:`self` to the desired
- size and the number of sparse and dense dimensions.
-
- .. warning:
- Throws an error if :attr:`self` is not a sparse tensor.
-
- Args:
- size (torch.Size): the desired size.
- sparse_dim (int): the number of sparse dimensions
- dense_dim (int): the number of dense dimensions
- """
- ...
- @overload
- def split(self, split_size: _int, dim: _int = 0) -> Sequence[Tensor]: ...
- @overload
- def split(self, split_size: Tuple[_int, ...], dim: _int = 0) -> Sequence[Tensor]: ...
- def split_with_sizes(self, split_sizes: Sequence[Union[_int, SymInt]], dim: _int = 0) -> Tuple[Tensor, ...]: ...
- def sqrt(self) -> Tensor:
- r"""
- sqrt() -> Tensor
-
- See :func:`torch.sqrt`
- """
- ...
- def sqrt_(self) -> Tensor:
- r"""
- sqrt_() -> Tensor
-
- In-place version of :meth:`~Tensor.sqrt`
- """
- ...
- def square(self) -> Tensor:
- r"""
- square() -> Tensor
-
- See :func:`torch.square`
- """
- ...
- def square_(self) -> Tensor:
- r"""
- square_() -> Tensor
-
- In-place version of :meth:`~Tensor.square`
- """
- ...
- @overload
- def squeeze(self) -> Tensor:
- r"""
- squeeze(dim=None) -> Tensor
-
- See :func:`torch.squeeze`
- """
- ...
- @overload
- def squeeze(self, dim: _int) -> Tensor:
- r"""
- squeeze(dim=None) -> Tensor
-
- See :func:`torch.squeeze`
- """
- ...
- @overload
- def squeeze(self, dim: _size) -> Tensor:
- r"""
- squeeze(dim=None) -> Tensor
-
- See :func:`torch.squeeze`
- """
- ...
- @overload
- def squeeze(self, *dim: _int) -> Tensor:
- r"""
- squeeze(dim=None) -> Tensor
-
- See :func:`torch.squeeze`
- """
- ...
- @overload
- def squeeze(self, dim: Union[str, ellipsis, None]) -> Tensor:
- r"""
- squeeze(dim=None) -> Tensor
-
- See :func:`torch.squeeze`
- """
- ...
- @overload
- def squeeze_(self) -> Tensor:
- r"""
- squeeze_(dim=None) -> Tensor
-
- In-place version of :meth:`~Tensor.squeeze`
- """
- ...
- @overload
- def squeeze_(self, dim: _int) -> Tensor:
- r"""
- squeeze_(dim=None) -> Tensor
-
- In-place version of :meth:`~Tensor.squeeze`
- """
- ...
- @overload
- def squeeze_(self, dim: _size) -> Tensor:
- r"""
- squeeze_(dim=None) -> Tensor
-
- In-place version of :meth:`~Tensor.squeeze`
- """
- ...
- @overload
- def squeeze_(self, *dim: _int) -> Tensor:
- r"""
- squeeze_(dim=None) -> Tensor
-
- In-place version of :meth:`~Tensor.squeeze`
- """
- ...
- @overload
- def squeeze_(self, dim: Union[str, ellipsis, None]) -> Tensor:
- r"""
- squeeze_(dim=None) -> Tensor
-
- In-place version of :meth:`~Tensor.squeeze`
- """
- ...
- def sspaddmm(self, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor:
- r"""
- sspaddmm(mat1, mat2, *, beta=1, alpha=1) -> Tensor
-
- See :func:`torch.sspaddmm`
- """
- ...
- @overload
- def std(self, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False) -> Tensor:
- r"""
- std(dim=None, *, correction=1, keepdim=False) -> Tensor
-
- See :func:`torch.std`
- """
- ...
- @overload
- def std(self, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tensor:
- r"""
- std(dim=None, *, correction=1, keepdim=False) -> Tensor
-
- See :func:`torch.std`
- """
- ...
- @overload
- def std(self, unbiased: _bool = True) -> Tensor:
- r"""
- std(dim=None, *, correction=1, keepdim=False) -> Tensor
-
- See :func:`torch.std`
- """
- ...
- @overload
- def std(self, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False) -> Tensor:
- r"""
- std(dim=None, *, correction=1, keepdim=False) -> Tensor
-
- See :func:`torch.std`
- """
- ...
- @overload
- def std(self, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tensor:
- r"""
- std(dim=None, *, correction=1, keepdim=False) -> Tensor
-
- See :func:`torch.std`
- """
- ...
- def untyped_storage(self) -> UntypedStorage: ...
- def storage_offset(self) -> _int:
- r"""
- storage_offset() -> int
-
- Returns :attr:`self` tensor's offset in the underlying storage in terms of
- number of storage elements (not bytes).
-
- Example::
-
- >>> x = torch.tensor([1, 2, 3, 4, 5])
- >>> x.storage_offset()
- 0
- >>> x[3:].storage_offset()
- 3
- """
- ...
- def storage_type(self) -> Storage: ...
- @overload
- def stride(self, dim: None = None) -> Tuple[_int, ...]:
- r"""
- stride(dim) -> tuple or int
-
- Returns the stride of :attr:`self` tensor.
-
- Stride is the jump necessary to go from one element to the next one in the
- specified dimension :attr:`dim`. A tuple of all strides is returned when no
- argument is passed in. Otherwise, an integer value is returned as the stride in
- the particular dimension :attr:`dim`.
-
- Args:
- dim (int, optional): the desired dimension in which stride is required
-
- Example::
-
- >>> x = torch.tensor([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])
- >>> x.stride()
- (5, 1)
- >>> x.stride(0)
- 5
- >>> x.stride(-1)
- 1
- """
- ...
- @overload
- def stride(self, dim: _int) -> _int:
- r"""
- stride(dim) -> tuple or int
-
- Returns the stride of :attr:`self` tensor.
-
- Stride is the jump necessary to go from one element to the next one in the
- specified dimension :attr:`dim`. A tuple of all strides is returned when no
- argument is passed in. Otherwise, an integer value is returned as the stride in
- the particular dimension :attr:`dim`.
-
- Args:
- dim (int, optional): the desired dimension in which stride is required
-
- Example::
-
- >>> x = torch.tensor([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])
- >>> x.stride()
- (5, 1)
- >>> x.stride(0)
- 5
- >>> x.stride(-1)
- 1
- """
- ...
- def sub(self, other: Union[Tensor, Number, _complex, torch.SymInt, torch.SymFloat], *, alpha: Optional[Union[Number, _complex]] = 1, out: Optional[Tensor] = None) -> Tensor:
- r"""
- sub(other, *, alpha=1) -> Tensor
-
- See :func:`torch.sub`.
- """
- ...
- def sub_(self, other: Union[Tensor, Number, _complex, torch.SymInt, torch.SymFloat], *, alpha: Optional[Union[Number, _complex]] = 1) -> Tensor:
- r"""
- sub_(other, *, alpha=1) -> Tensor
-
- In-place version of :meth:`~Tensor.sub`
- """
- ...
- @overload
- def subtract(self, other: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor:
- r"""
- subtract(other, *, alpha=1) -> Tensor
-
- See :func:`torch.subtract`.
- """
- ...
- @overload
- def subtract(self, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor:
- r"""
- subtract(other, *, alpha=1) -> Tensor
-
- See :func:`torch.subtract`.
- """
- ...
- @overload
- def subtract_(self, other: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor:
- r"""
- subtract_(other, *, alpha=1) -> Tensor
-
- In-place version of :meth:`~Tensor.subtract`.
- """
- ...
- @overload
- def subtract_(self, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor:
- r"""
- subtract_(other, *, alpha=1) -> Tensor
-
- In-place version of :meth:`~Tensor.subtract`.
- """
- ...
- @overload
- def sum(self, *, dtype: Optional[_dtype] = None) -> Tensor:
- r"""
- sum(dim=None, keepdim=False, dtype=None) -> Tensor
-
- See :func:`torch.sum`
- """
- ...
- @overload
- def sum(self, dim: Optional[Union[_int, _size]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor:
- r"""
- sum(dim=None, keepdim=False, dtype=None) -> Tensor
-
- See :func:`torch.sum`
- """
- ...
- @overload
- def sum(self, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor:
- r"""
- sum(dim=None, keepdim=False, dtype=None) -> Tensor
-
- See :func:`torch.sum`
- """
- ...
- @overload
- def sum_to_size(self, size: Sequence[Union[_int, SymInt]]) -> Tensor:
- r"""
- sum_to_size(*size) -> Tensor
-
- Sum ``this`` tensor to :attr:`size`.
- :attr:`size` must be broadcastable to ``this`` tensor size.
-
- Args:
- size (int...): a sequence of integers defining the shape of the output tensor.
- """
- ...
- @overload
- def sum_to_size(self, *size: _int) -> Tensor:
- r"""
- sum_to_size(*size) -> Tensor
-
- Sum ``this`` tensor to :attr:`size`.
- :attr:`size` must be broadcastable to ``this`` tensor size.
-
- Args:
- size (int...): a sequence of integers defining the shape of the output tensor.
- """
- ...
- def svd(self, some: _bool = True, compute_uv: _bool = True) -> torch.return_types.svd:
- r"""
- svd(some=True, compute_uv=True) -> (Tensor, Tensor, Tensor)
-
- See :func:`torch.svd`
- """
- ...
- def swapaxes(self, axis0: _int, axis1: _int) -> Tensor:
- r"""
- swapaxes(axis0, axis1) -> Tensor
-
- See :func:`torch.swapaxes`
- """
- ...
- def swapaxes_(self, axis0: _int, axis1: _int) -> Tensor:
- r"""
- swapaxes_(axis0, axis1) -> Tensor
-
- In-place version of :meth:`~Tensor.swapaxes`
- """
- ...
- def swapdims(self, dim0: _int, dim1: _int) -> Tensor:
- r"""
- swapdims(dim0, dim1) -> Tensor
-
- See :func:`torch.swapdims`
- """
- ...
- def swapdims_(self, dim0: _int, dim1: _int) -> Tensor:
- r"""
- swapdims_(dim0, dim1) -> Tensor
-
- In-place version of :meth:`~Tensor.swapdims`
- """
- ...
- def t(self) -> Tensor:
- r"""
- t() -> Tensor
-
- See :func:`torch.t`
- """
- ...
- def t_(self) -> Tensor:
- r"""
- t_() -> Tensor
-
- In-place version of :meth:`~Tensor.t`
- """
- ...
- def take(self, index: Tensor) -> Tensor:
- r"""
- take(indices) -> Tensor
-
- See :func:`torch.take`
- """
- ...
- def take_along_dim(self, indices: Tensor, dim: Optional[_int] = None) -> Tensor:
- r"""
- take_along_dim(indices, dim) -> Tensor
-
- See :func:`torch.take_along_dim`
- """
- ...
- def tan(self) -> Tensor:
- r"""
- tan() -> Tensor
-
- See :func:`torch.tan`
- """
- ...
- def tan_(self) -> Tensor:
- r"""
- tan_() -> Tensor
-
- In-place version of :meth:`~Tensor.tan`
- """
- ...
- def tanh(self) -> Tensor:
- r"""
- tanh() -> Tensor
-
- See :func:`torch.tanh`
- """
- ...
- def tanh_(self) -> Tensor:
- r"""
- tanh_() -> Tensor
-
- In-place version of :meth:`~Tensor.tanh`
- """
- ...
- @overload
- def tensor_split(self, indices: Sequence[Union[_int, SymInt]], dim: _int = 0) -> Tuple[Tensor, ...]:
- r"""
- tensor_split(indices_or_sections, dim=0) -> List of Tensors
-
- See :func:`torch.tensor_split`
- """
- ...
- @overload
- def tensor_split(self, tensor_indices_or_sections: Tensor, dim: _int = 0) -> Tuple[Tensor, ...]:
- r"""
- tensor_split(indices_or_sections, dim=0) -> List of Tensors
-
- See :func:`torch.tensor_split`
- """
- ...
- @overload
- def tensor_split(self, sections: Union[_int, SymInt], dim: _int = 0) -> Tuple[Tensor, ...]:
- r"""
- tensor_split(indices_or_sections, dim=0) -> List of Tensors
-
- See :func:`torch.tensor_split`
- """
- ...
- @overload
- def tile(self, dims: Sequence[Union[_int, SymInt]]) -> Tensor:
- r"""
- tile(dims) -> Tensor
-
- See :func:`torch.tile`
- """
- ...
- @overload
- def tile(self, *dims: _int) -> Tensor:
- r"""
- tile(dims) -> Tensor
-
- See :func:`torch.tile`
- """
- ...
- @overload
- def to(self, dtype: _dtype, non_blocking: _bool = False, copy: _bool = False, *, memory_format: Optional[torch.memory_format] = None) -> Tensor:
- r"""
- to(*args, **kwargs) -> Tensor
-
- Performs Tensor dtype and/or device conversion. A :class:`torch.dtype` and :class:`torch.device` are
- inferred from the arguments of ``self.to(*args, **kwargs)``.
-
- .. note::
-
- If the ``self`` Tensor already
- has the correct :class:`torch.dtype` and :class:`torch.device`, then ``self`` is returned.
- Otherwise, the returned tensor is a copy of ``self`` with the desired
- :class:`torch.dtype` and :class:`torch.device`.
-
- Here are the ways to call ``to``:
-
- .. method:: to(dtype, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor
- :noindex:
-
- Returns a Tensor with the specified :attr:`dtype`
-
- Args:
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned Tensor. Default: ``torch.preserve_format``.
-
- .. method:: to(device=None, dtype=None, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor
- :noindex:
-
- Returns a Tensor with the specified :attr:`device` and (optional)
- :attr:`dtype`. If :attr:`dtype` is ``None`` it is inferred to be ``self.dtype``.
- When :attr:`non_blocking`, tries to convert asynchronously with respect to
- the host if possible, e.g., converting a CPU Tensor with pinned memory to a
- CUDA Tensor.
- When :attr:`copy` is set, a new Tensor is created even when the Tensor
- already matches the desired conversion.
-
- Args:
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned Tensor. Default: ``torch.preserve_format``.
-
- .. method:: to(other, non_blocking=False, copy=False) -> Tensor
- :noindex:
-
- Returns a Tensor with same :class:`torch.dtype` and :class:`torch.device` as
- the Tensor :attr:`other`. When :attr:`non_blocking`, tries to convert
- asynchronously with respect to the host if possible, e.g., converting a CPU
- Tensor with pinned memory to a CUDA Tensor.
- When :attr:`copy` is set, a new Tensor is created even when the Tensor
- already matches the desired conversion.
-
- Example::
-
- >>> tensor = torch.randn(2, 2) # Initially dtype=float32, device=cpu
- >>> tensor.to(torch.float64)
- tensor([[-0.5044, 0.0005],
- [ 0.3310, -0.0584]], dtype=torch.float64)
-
- >>> cuda0 = torch.device('cuda:0')
- >>> tensor.to(cuda0)
- tensor([[-0.5044, 0.0005],
- [ 0.3310, -0.0584]], device='cuda:0')
-
- >>> tensor.to(cuda0, dtype=torch.float64)
- tensor([[-0.5044, 0.0005],
- [ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0')
-
- >>> other = torch.randn((), dtype=torch.float64, device=cuda0)
- >>> tensor.to(other, non_blocking=True)
- tensor([[-0.5044, 0.0005],
- [ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0')
- """
- ...
- @overload
- def to(self, device: Optional[DeviceLikeType] = None, dtype: Optional[_dtype] = None, non_blocking: _bool = False, copy: _bool = False, *, memory_format: Optional[torch.memory_format] = None) -> Tensor:
- r"""
- to(*args, **kwargs) -> Tensor
-
- Performs Tensor dtype and/or device conversion. A :class:`torch.dtype` and :class:`torch.device` are
- inferred from the arguments of ``self.to(*args, **kwargs)``.
-
- .. note::
-
- If the ``self`` Tensor already
- has the correct :class:`torch.dtype` and :class:`torch.device`, then ``self`` is returned.
- Otherwise, the returned tensor is a copy of ``self`` with the desired
- :class:`torch.dtype` and :class:`torch.device`.
-
- Here are the ways to call ``to``:
-
- .. method:: to(dtype, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor
- :noindex:
-
- Returns a Tensor with the specified :attr:`dtype`
-
- Args:
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned Tensor. Default: ``torch.preserve_format``.
-
- .. method:: to(device=None, dtype=None, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor
- :noindex:
-
- Returns a Tensor with the specified :attr:`device` and (optional)
- :attr:`dtype`. If :attr:`dtype` is ``None`` it is inferred to be ``self.dtype``.
- When :attr:`non_blocking`, tries to convert asynchronously with respect to
- the host if possible, e.g., converting a CPU Tensor with pinned memory to a
- CUDA Tensor.
- When :attr:`copy` is set, a new Tensor is created even when the Tensor
- already matches the desired conversion.
-
- Args:
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned Tensor. Default: ``torch.preserve_format``.
-
- .. method:: to(other, non_blocking=False, copy=False) -> Tensor
- :noindex:
-
- Returns a Tensor with same :class:`torch.dtype` and :class:`torch.device` as
- the Tensor :attr:`other`. When :attr:`non_blocking`, tries to convert
- asynchronously with respect to the host if possible, e.g., converting a CPU
- Tensor with pinned memory to a CUDA Tensor.
- When :attr:`copy` is set, a new Tensor is created even when the Tensor
- already matches the desired conversion.
-
- Example::
-
- >>> tensor = torch.randn(2, 2) # Initially dtype=float32, device=cpu
- >>> tensor.to(torch.float64)
- tensor([[-0.5044, 0.0005],
- [ 0.3310, -0.0584]], dtype=torch.float64)
-
- >>> cuda0 = torch.device('cuda:0')
- >>> tensor.to(cuda0)
- tensor([[-0.5044, 0.0005],
- [ 0.3310, -0.0584]], device='cuda:0')
-
- >>> tensor.to(cuda0, dtype=torch.float64)
- tensor([[-0.5044, 0.0005],
- [ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0')
-
- >>> other = torch.randn((), dtype=torch.float64, device=cuda0)
- >>> tensor.to(other, non_blocking=True)
- tensor([[-0.5044, 0.0005],
- [ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0')
- """
- ...
- @overload
- def to(self, other: Tensor, non_blocking: _bool = False, copy: _bool = False, *, memory_format: Optional[torch.memory_format] = None) -> Tensor:
- r"""
- to(*args, **kwargs) -> Tensor
-
- Performs Tensor dtype and/or device conversion. A :class:`torch.dtype` and :class:`torch.device` are
- inferred from the arguments of ``self.to(*args, **kwargs)``.
-
- .. note::
-
- If the ``self`` Tensor already
- has the correct :class:`torch.dtype` and :class:`torch.device`, then ``self`` is returned.
- Otherwise, the returned tensor is a copy of ``self`` with the desired
- :class:`torch.dtype` and :class:`torch.device`.
-
- Here are the ways to call ``to``:
-
- .. method:: to(dtype, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor
- :noindex:
-
- Returns a Tensor with the specified :attr:`dtype`
-
- Args:
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned Tensor. Default: ``torch.preserve_format``.
-
- .. method:: to(device=None, dtype=None, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor
- :noindex:
-
- Returns a Tensor with the specified :attr:`device` and (optional)
- :attr:`dtype`. If :attr:`dtype` is ``None`` it is inferred to be ``self.dtype``.
- When :attr:`non_blocking`, tries to convert asynchronously with respect to
- the host if possible, e.g., converting a CPU Tensor with pinned memory to a
- CUDA Tensor.
- When :attr:`copy` is set, a new Tensor is created even when the Tensor
- already matches the desired conversion.
-
- Args:
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned Tensor. Default: ``torch.preserve_format``.
-
- .. method:: to(other, non_blocking=False, copy=False) -> Tensor
- :noindex:
-
- Returns a Tensor with same :class:`torch.dtype` and :class:`torch.device` as
- the Tensor :attr:`other`. When :attr:`non_blocking`, tries to convert
- asynchronously with respect to the host if possible, e.g., converting a CPU
- Tensor with pinned memory to a CUDA Tensor.
- When :attr:`copy` is set, a new Tensor is created even when the Tensor
- already matches the desired conversion.
-
- Example::
-
- >>> tensor = torch.randn(2, 2) # Initially dtype=float32, device=cpu
- >>> tensor.to(torch.float64)
- tensor([[-0.5044, 0.0005],
- [ 0.3310, -0.0584]], dtype=torch.float64)
-
- >>> cuda0 = torch.device('cuda:0')
- >>> tensor.to(cuda0)
- tensor([[-0.5044, 0.0005],
- [ 0.3310, -0.0584]], device='cuda:0')
-
- >>> tensor.to(cuda0, dtype=torch.float64)
- tensor([[-0.5044, 0.0005],
- [ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0')
-
- >>> other = torch.randn((), dtype=torch.float64, device=cuda0)
- >>> tensor.to(other, non_blocking=True)
- tensor([[-0.5044, 0.0005],
- [ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0')
- """
- ...
- def to_dense(self, dtype: Optional[_dtype] = None, *, masked_grad: Optional[_bool] = None) -> Tensor:
- r"""
- to_dense(dtype=None, *, masked_grad=True) -> Tensor
-
- Creates a strided copy of :attr:`self` if :attr:`self` is not a strided tensor, otherwise returns :attr:`self`.
-
- Keyword args:
- {dtype}
- masked_grad (bool, optional): If set to ``True`` (default) and
- :attr:`self` has a sparse layout then the backward of
- :meth:`to_dense` returns ``grad.sparse_mask(self)``.
-
- Example::
-
- >>> s = torch.sparse_coo_tensor(
- ... torch.tensor([[1, 1],
- ... [0, 2]]),
- ... torch.tensor([9, 10]),
- ... size=(3, 3))
- >>> s.to_dense()
- tensor([[ 0, 0, 0],
- [ 9, 0, 10],
- [ 0, 0, 0]])
- """
- ...
- def to_mkldnn(self, dtype: Optional[_dtype] = None) -> Tensor:
- r"""
- to_mkldnn() -> Tensor
- Returns a copy of the tensor in ``torch.mkldnn`` layout.
- """
- ...
- def to_padded_tensor(self, padding: _float, output_size: Optional[Sequence[Union[_int, SymInt]]] = None) -> Tensor:
- r"""
- to_padded_tensor(padding, output_size=None) -> Tensor
- See :func:`to_padded_tensor`
- """
- ...
- @overload
- def to_sparse(self, *, layout: Optional[_layout] = None, blocksize: Optional[Union[_int, _size]] = None, dense_dim: Optional[_int] = None) -> Tensor:
- r"""
- to_sparse(sparseDims) -> Tensor
-
- Returns a sparse copy of the tensor. PyTorch supports sparse tensors in
- :ref:`coordinate format <sparse-coo-docs>`.
-
- Args:
- sparseDims (int, optional): the number of sparse dimensions to include in the new sparse tensor
-
- Example::
-
- >>> d = torch.tensor([[0, 0, 0], [9, 0, 10], [0, 0, 0]])
- >>> d
- tensor([[ 0, 0, 0],
- [ 9, 0, 10],
- [ 0, 0, 0]])
- >>> d.to_sparse()
- tensor(indices=tensor([[1, 1],
- [0, 2]]),
- values=tensor([ 9, 10]),
- size=(3, 3), nnz=2, layout=torch.sparse_coo)
- >>> d.to_sparse(1)
- tensor(indices=tensor([[1]]),
- values=tensor([[ 9, 0, 10]]),
- size=(3, 3), nnz=1, layout=torch.sparse_coo)
-
- .. method:: to_sparse(*, layout=None, blocksize=None, dense_dim=None) -> Tensor
- :noindex:
-
- Returns a sparse tensor with the specified layout and blocksize. If
- the :attr:`self` is strided, the number of dense dimensions could be
- specified, and a hybrid sparse tensor will be created, with
- `dense_dim` dense dimensions and `self.dim() - 2 - dense_dim` batch
- dimension.
-
- .. note:: If the :attr:`self` layout and blocksize parameters match
- with the specified layout and blocksize, return
- :attr:`self`. Otherwise, return a sparse tensor copy of
- :attr:`self`.
-
- Args:
-
- layout (:class:`torch.layout`, optional): The desired sparse
- layout. One of ``torch.sparse_coo``, ``torch.sparse_csr``,
- ``torch.sparse_csc``, ``torch.sparse_bsr``, or
- ``torch.sparse_bsc``. Default: if ``None``,
- ``torch.sparse_coo``.
-
- blocksize (list, tuple, :class:`torch.Size`, optional): Block size
- of the resulting BSR or BSC tensor. For other layouts,
- specifying the block size that is not ``None`` will result in a
- RuntimeError exception. A block size must be a tuple of length
- two such that its items evenly divide the two sparse dimensions.
-
- dense_dim (int, optional): Number of dense dimensions of the
- resulting CSR, CSC, BSR or BSC tensor. This argument should be
- used only if :attr:`self` is a strided tensor, and must be a
- value between 0 and dimension of :attr:`self` tensor minus two.
-
- Example::
-
- >>> x = torch.tensor([[1, 0], [0, 0], [2, 3]])
- >>> x.to_sparse(layout=torch.sparse_coo)
- tensor(indices=tensor([[0, 2, 2],
- [0, 0, 1]]),
- values=tensor([1, 2, 3]),
- size=(3, 2), nnz=3, layout=torch.sparse_coo)
- >>> x.to_sparse(layout=torch.sparse_bsr, blocksize=(1, 2))
- tensor(crow_indices=tensor([0, 1, 1, 2]),
- col_indices=tensor([0, 0]),
- values=tensor([[[1, 0]],
- [[2, 3]]]), size=(3, 2), nnz=2, layout=torch.sparse_bsr)
- >>> x.to_sparse(layout=torch.sparse_bsr, blocksize=(2, 1))
- RuntimeError: Tensor size(-2) 3 needs to be divisible by blocksize[0] 2
- >>> x.to_sparse(layout=torch.sparse_csr, blocksize=(3, 1))
- RuntimeError: to_sparse for Strided to SparseCsr conversion does not use specified blocksize
-
- >>> x = torch.tensor([[[1], [0]], [[0], [0]], [[2], [3]]])
- >>> x.to_sparse(layout=torch.sparse_csr, dense_dim=1)
- tensor(crow_indices=tensor([0, 1, 1, 3]),
- col_indices=tensor([0, 0, 1]),
- values=tensor([[1],
- [2],
- [3]]), size=(3, 2, 1), nnz=3, layout=torch.sparse_csr)
- """
- ...
- @overload
- def to_sparse(self, sparse_dim: _int) -> Tensor:
- r"""
- to_sparse(sparseDims) -> Tensor
-
- Returns a sparse copy of the tensor. PyTorch supports sparse tensors in
- :ref:`coordinate format <sparse-coo-docs>`.
-
- Args:
- sparseDims (int, optional): the number of sparse dimensions to include in the new sparse tensor
-
- Example::
-
- >>> d = torch.tensor([[0, 0, 0], [9, 0, 10], [0, 0, 0]])
- >>> d
- tensor([[ 0, 0, 0],
- [ 9, 0, 10],
- [ 0, 0, 0]])
- >>> d.to_sparse()
- tensor(indices=tensor([[1, 1],
- [0, 2]]),
- values=tensor([ 9, 10]),
- size=(3, 3), nnz=2, layout=torch.sparse_coo)
- >>> d.to_sparse(1)
- tensor(indices=tensor([[1]]),
- values=tensor([[ 9, 0, 10]]),
- size=(3, 3), nnz=1, layout=torch.sparse_coo)
-
- .. method:: to_sparse(*, layout=None, blocksize=None, dense_dim=None) -> Tensor
- :noindex:
-
- Returns a sparse tensor with the specified layout and blocksize. If
- the :attr:`self` is strided, the number of dense dimensions could be
- specified, and a hybrid sparse tensor will be created, with
- `dense_dim` dense dimensions and `self.dim() - 2 - dense_dim` batch
- dimension.
-
- .. note:: If the :attr:`self` layout and blocksize parameters match
- with the specified layout and blocksize, return
- :attr:`self`. Otherwise, return a sparse tensor copy of
- :attr:`self`.
-
- Args:
-
- layout (:class:`torch.layout`, optional): The desired sparse
- layout. One of ``torch.sparse_coo``, ``torch.sparse_csr``,
- ``torch.sparse_csc``, ``torch.sparse_bsr``, or
- ``torch.sparse_bsc``. Default: if ``None``,
- ``torch.sparse_coo``.
-
- blocksize (list, tuple, :class:`torch.Size`, optional): Block size
- of the resulting BSR or BSC tensor. For other layouts,
- specifying the block size that is not ``None`` will result in a
- RuntimeError exception. A block size must be a tuple of length
- two such that its items evenly divide the two sparse dimensions.
-
- dense_dim (int, optional): Number of dense dimensions of the
- resulting CSR, CSC, BSR or BSC tensor. This argument should be
- used only if :attr:`self` is a strided tensor, and must be a
- value between 0 and dimension of :attr:`self` tensor minus two.
-
- Example::
-
- >>> x = torch.tensor([[1, 0], [0, 0], [2, 3]])
- >>> x.to_sparse(layout=torch.sparse_coo)
- tensor(indices=tensor([[0, 2, 2],
- [0, 0, 1]]),
- values=tensor([1, 2, 3]),
- size=(3, 2), nnz=3, layout=torch.sparse_coo)
- >>> x.to_sparse(layout=torch.sparse_bsr, blocksize=(1, 2))
- tensor(crow_indices=tensor([0, 1, 1, 2]),
- col_indices=tensor([0, 0]),
- values=tensor([[[1, 0]],
- [[2, 3]]]), size=(3, 2), nnz=2, layout=torch.sparse_bsr)
- >>> x.to_sparse(layout=torch.sparse_bsr, blocksize=(2, 1))
- RuntimeError: Tensor size(-2) 3 needs to be divisible by blocksize[0] 2
- >>> x.to_sparse(layout=torch.sparse_csr, blocksize=(3, 1))
- RuntimeError: to_sparse for Strided to SparseCsr conversion does not use specified blocksize
-
- >>> x = torch.tensor([[[1], [0]], [[0], [0]], [[2], [3]]])
- >>> x.to_sparse(layout=torch.sparse_csr, dense_dim=1)
- tensor(crow_indices=tensor([0, 1, 1, 3]),
- col_indices=tensor([0, 0, 1]),
- values=tensor([[1],
- [2],
- [3]]), size=(3, 2, 1), nnz=3, layout=torch.sparse_csr)
- """
- ...
- def to_sparse_bsc(self, blocksize: Union[_int, _size], dense_dim: Optional[_int] = None) -> Tensor:
- r"""
- to_sparse_bsc(blocksize, dense_dim) -> Tensor
-
- Convert a tensor to a block sparse column (BSC) storage format of
- given blocksize. If the :attr:`self` is strided, then the number of
- dense dimensions could be specified, and a hybrid BSC tensor will be
- created, with `dense_dim` dense dimensions and `self.dim() - 2 -
- dense_dim` batch dimension.
-
- Args:
-
- blocksize (list, tuple, :class:`torch.Size`, optional): Block size
- of the resulting BSC tensor. A block size must be a tuple of
- length two such that its items evenly divide the two sparse
- dimensions.
-
- dense_dim (int, optional): Number of dense dimensions of the
- resulting BSC tensor. This argument should be used only if
- :attr:`self` is a strided tensor, and must be a value between 0
- and dimension of :attr:`self` tensor minus two.
-
- Example::
-
- >>> dense = torch.randn(10, 10)
- >>> sparse = dense.to_sparse_csr()
- >>> sparse_bsc = sparse.to_sparse_bsc((5, 5))
- >>> sparse_bsc.row_indices()
- tensor([0, 1, 0, 1])
-
- >>> dense = torch.zeros(4, 3, 1)
- >>> dense[0:2, 0] = dense[0:2, 2] = dense[2:4, 1] = 1
- >>> dense.to_sparse_bsc((2, 1), 1)
- tensor(ccol_indices=tensor([0, 1, 2, 3]),
- row_indices=tensor([0, 1, 0]),
- values=tensor([[[[1.]],
-
- [[1.]]],
-
-
- [[[1.]],
-
- [[1.]]],
-
-
- [[[1.]],
-
- [[1.]]]]), size=(4, 3, 1), nnz=3,
- layout=torch.sparse_bsc)
- """
- ...
- def to_sparse_bsr(self, blocksize: Union[_int, _size], dense_dim: Optional[_int] = None) -> Tensor:
- r"""
- to_sparse_bsr(blocksize, dense_dim) -> Tensor
-
- Convert a tensor to a block sparse row (BSR) storage format of given
- blocksize. If the :attr:`self` is strided, then the number of dense
- dimensions could be specified, and a hybrid BSR tensor will be
- created, with `dense_dim` dense dimensions and `self.dim() - 2 -
- dense_dim` batch dimension.
-
- Args:
-
- blocksize (list, tuple, :class:`torch.Size`, optional): Block size
- of the resulting BSR tensor. A block size must be a tuple of
- length two such that its items evenly divide the two sparse
- dimensions.
-
- dense_dim (int, optional): Number of dense dimensions of the
- resulting BSR tensor. This argument should be used only if
- :attr:`self` is a strided tensor, and must be a value between 0
- and dimension of :attr:`self` tensor minus two.
-
- Example::
-
- >>> dense = torch.randn(10, 10)
- >>> sparse = dense.to_sparse_csr()
- >>> sparse_bsr = sparse.to_sparse_bsr((5, 5))
- >>> sparse_bsr.col_indices()
- tensor([0, 1, 0, 1])
-
- >>> dense = torch.zeros(4, 3, 1)
- >>> dense[0:2, 0] = dense[0:2, 2] = dense[2:4, 1] = 1
- >>> dense.to_sparse_bsr((2, 1), 1)
- tensor(crow_indices=tensor([0, 2, 3]),
- col_indices=tensor([0, 2, 1]),
- values=tensor([[[[1.]],
-
- [[1.]]],
-
-
- [[[1.]],
-
- [[1.]]],
-
-
- [[[1.]],
-
- [[1.]]]]), size=(4, 3, 1), nnz=3,
- layout=torch.sparse_bsr)
- """
- ...
- def to_sparse_csc(self, dense_dim: Optional[_int] = None) -> Tensor:
- r"""
- to_sparse_csc() -> Tensor
-
- Convert a tensor to compressed column storage (CSC) format. Except
- for strided tensors, only works with 2D tensors. If the :attr:`self`
- is strided, then the number of dense dimensions could be specified,
- and a hybrid CSC tensor will be created, with `dense_dim` dense
- dimensions and `self.dim() - 2 - dense_dim` batch dimension.
-
- Args:
-
- dense_dim (int, optional): Number of dense dimensions of the
- resulting CSC tensor. This argument should be used only if
- :attr:`self` is a strided tensor, and must be a value between 0
- and dimension of :attr:`self` tensor minus two.
-
- Example::
-
- >>> dense = torch.randn(5, 5)
- >>> sparse = dense.to_sparse_csc()
- >>> sparse._nnz()
- 25
-
- >>> dense = torch.zeros(3, 3, 1, 1)
- >>> dense[0, 0] = dense[1, 2] = dense[2, 1] = 1
- >>> dense.to_sparse_csc(dense_dim=2)
- tensor(ccol_indices=tensor([0, 1, 2, 3]),
- row_indices=tensor([0, 2, 1]),
- values=tensor([[[1.]],
-
- [[1.]],
-
- [[1.]]]), size=(3, 3, 1, 1), nnz=3,
- layout=torch.sparse_csc)
- """
- ...
- def to_sparse_csr(self, dense_dim: Optional[_int] = None) -> Tensor:
- r"""
- to_sparse_csr(dense_dim=None) -> Tensor
-
- Convert a tensor to compressed row storage format (CSR). Except for
- strided tensors, only works with 2D tensors. If the :attr:`self` is
- strided, then the number of dense dimensions could be specified, and a
- hybrid CSR tensor will be created, with `dense_dim` dense dimensions
- and `self.dim() - 2 - dense_dim` batch dimension.
-
- Args:
-
- dense_dim (int, optional): Number of dense dimensions of the
- resulting CSR tensor. This argument should be used only if
- :attr:`self` is a strided tensor, and must be a value between 0
- and dimension of :attr:`self` tensor minus two.
-
- Example::
-
- >>> dense = torch.randn(5, 5)
- >>> sparse = dense.to_sparse_csr()
- >>> sparse._nnz()
- 25
-
- >>> dense = torch.zeros(3, 3, 1, 1)
- >>> dense[0, 0] = dense[1, 2] = dense[2, 1] = 1
- >>> dense.to_sparse_csr(dense_dim=2)
- tensor(crow_indices=tensor([0, 1, 2, 3]),
- col_indices=tensor([0, 2, 1]),
- values=tensor([[[1.]],
-
- [[1.]],
-
- [[1.]]]), size=(3, 3, 1, 1), nnz=3,
- layout=torch.sparse_csr)
- """
- ...
- def tolist(self) -> List:
- r"""
- tolist() -> list or number
-
- Returns the tensor as a (nested) list. For scalars, a standard
- Python number is returned, just like with :meth:`~Tensor.item`.
- Tensors are automatically moved to the CPU first if necessary.
-
- This operation is not differentiable.
-
- Examples::
-
- >>> a = torch.randn(2, 2)
- >>> a.tolist()
- [[0.012766935862600803, 0.5415473580360413],
- [-0.08909505605697632, 0.7729271650314331]]
- >>> a[0,0].tolist()
- 0.012766935862600803
- """
- ...
- def topk(self, k: Union[_int, SymInt], dim: _int = -1, largest: _bool = True, sorted: _bool = True) -> torch.return_types.topk:
- r"""
- topk(k, dim=None, largest=True, sorted=True) -> (Tensor, LongTensor)
-
- See :func:`torch.topk`
- """
- ...
- def trace(self) -> Tensor:
- r"""
- trace() -> Tensor
-
- See :func:`torch.trace`
- """
- ...
- @overload
- def transpose(self, dim0: _int, dim1: _int) -> Tensor:
- r"""
- transpose(dim0, dim1) -> Tensor
-
- See :func:`torch.transpose`
- """
- ...
- @overload
- def transpose(self, dim0: Union[str, ellipsis, None], dim1: Union[str, ellipsis, None]) -> Tensor:
- r"""
- transpose(dim0, dim1) -> Tensor
-
- See :func:`torch.transpose`
- """
- ...
- def transpose_(self, dim0: _int, dim1: _int) -> Tensor:
- r"""
- transpose_(dim0, dim1) -> Tensor
-
- In-place version of :meth:`~Tensor.transpose`
- """
- ...
- def triangular_solve(self, A: Tensor, upper: _bool = True, transpose: _bool = False, unitriangular: _bool = False) -> torch.return_types.triangular_solve:
- r"""
- triangular_solve(A, upper=True, transpose=False, unitriangular=False) -> (Tensor, Tensor)
-
- See :func:`torch.triangular_solve`
- """
- ...
- def tril(self, diagonal: _int = 0) -> Tensor:
- r"""
- tril(diagonal=0) -> Tensor
-
- See :func:`torch.tril`
- """
- ...
- def tril_(self, diagonal: _int = 0) -> Tensor:
- r"""
- tril_(diagonal=0) -> Tensor
-
- In-place version of :meth:`~Tensor.tril`
- """
- ...
- def triu(self, diagonal: _int = 0) -> Tensor:
- r"""
- triu(diagonal=0) -> Tensor
-
- See :func:`torch.triu`
- """
- ...
- def triu_(self, diagonal: _int = 0) -> Tensor:
- r"""
- triu_(diagonal=0) -> Tensor
-
- In-place version of :meth:`~Tensor.triu`
- """
- ...
- def true_divide(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- true_divide(value) -> Tensor
-
- See :func:`torch.true_divide`
- """
- ...
- def true_divide_(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat]) -> Tensor:
- r"""
- true_divide_(value) -> Tensor
-
- In-place version of :meth:`~Tensor.true_divide_`
- """
- ...
- def trunc(self) -> Tensor:
- r"""
- trunc() -> Tensor
-
- See :func:`torch.trunc`
- """
- ...
- def trunc_(self) -> Tensor:
- r"""
- trunc_() -> Tensor
-
- In-place version of :meth:`~Tensor.trunc`
- """
- ...
- @overload
- def type(self, dtype: None = None, non_blocking: _bool = False) -> str:
- r"""
- type(dtype=None, non_blocking=False, **kwargs) -> str or Tensor
- Returns the type if `dtype` is not provided, else casts this object to
- the specified type.
-
- If this is already of the correct type, no copy is performed and the
- original object is returned.
-
- Args:
- dtype (dtype or string): The desired type
- non_blocking (bool): If ``True``, and the source is in pinned memory
- and destination is on the GPU or vice versa, the copy is performed
- asynchronously with respect to the host. Otherwise, the argument
- has no effect.
- **kwargs: For compatibility, may contain the key ``async`` in place of
- the ``non_blocking`` argument. The ``async`` arg is deprecated.
- """
- ...
- @overload
- def type(self, dtype: Union[str, _dtype], non_blocking: _bool = False) -> Tensor:
- r"""
- type(dtype=None, non_blocking=False, **kwargs) -> str or Tensor
- Returns the type if `dtype` is not provided, else casts this object to
- the specified type.
-
- If this is already of the correct type, no copy is performed and the
- original object is returned.
-
- Args:
- dtype (dtype or string): The desired type
- non_blocking (bool): If ``True``, and the source is in pinned memory
- and destination is on the GPU or vice versa, the copy is performed
- asynchronously with respect to the host. Otherwise, the argument
- has no effect.
- **kwargs: For compatibility, may contain the key ``async`` in place of
- the ``non_blocking`` argument. The ``async`` arg is deprecated.
- """
- ...
- def type_as(self, other: Tensor) -> Tensor:
- r"""
- type_as(tensor) -> Tensor
-
- Returns this tensor cast to the type of the given tensor.
-
- This is a no-op if the tensor is already of the correct type. This is
- equivalent to ``self.type(tensor.type())``
-
- Args:
- tensor (Tensor): the tensor which has the desired type
- """
- ...
- @overload
- def unbind(self, dim: _int = 0) -> Tuple[Tensor, ...]:
- r"""
- unbind(dim=0) -> seq
-
- See :func:`torch.unbind`
- """
- ...
- @overload
- def unbind(self, dim: Union[str, ellipsis, None]) -> Tuple[Tensor, ...]:
- r"""
- unbind(dim=0) -> seq
-
- See :func:`torch.unbind`
- """
- ...
- @overload
- def unflatten(self, dim: Union[str, ellipsis, None], sizes: Sequence[Union[_int, SymInt]], names: Sequence[Union[str, ellipsis, None]]) -> Tensor: ...
- @overload
- def unflatten(self, dim: _int, sizes: Sequence[Union[_int, SymInt]]) -> Tensor: ...
- def unfold(self, dimension: _int, size: _int, step: _int) -> Tensor:
- r"""
- unfold(dimension, size, step) -> Tensor
-
- Returns a view of the original tensor which contains all slices of size :attr:`size` from
- :attr:`self` tensor in the dimension :attr:`dimension`.
-
- Step between two slices is given by :attr:`step`.
-
- If `sizedim` is the size of dimension :attr:`dimension` for :attr:`self`, the size of
- dimension :attr:`dimension` in the returned tensor will be
- `(sizedim - size) / step + 1`.
-
- An additional dimension of size :attr:`size` is appended in the returned tensor.
-
- Args:
- dimension (int): dimension in which unfolding happens
- size (int): the size of each slice that is unfolded
- step (int): the step between each slice
-
- Example::
-
- >>> x = torch.arange(1., 8)
- >>> x
- tensor([ 1., 2., 3., 4., 5., 6., 7.])
- >>> x.unfold(0, 2, 1)
- tensor([[ 1., 2.],
- [ 2., 3.],
- [ 3., 4.],
- [ 4., 5.],
- [ 5., 6.],
- [ 6., 7.]])
- >>> x.unfold(0, 2, 2)
- tensor([[ 1., 2.],
- [ 3., 4.],
- [ 5., 6.]])
- """
- ...
- def uniform_(self, from_: _float = 0, to: _float = 1, *, generator: Optional[Generator] = None) -> Tensor:
- r"""
- uniform_(from=0, to=1, *, generator=None) -> Tensor
-
- Fills :attr:`self` tensor with numbers sampled from the continuous uniform
- distribution:
-
- .. math::
- f(x) = \dfrac{1}{\text{to} - \text{from}}
- """
- ...
- def unsafe_chunk(self, chunks: _int, dim: _int = 0) -> Tuple[Tensor, ...]:
- r"""
- unsafe_chunk(chunks, dim=0) -> List of Tensors
-
- See :func:`torch.unsafe_chunk`
- """
- ...
- def unsafe_split(self, split_size: Union[_int, SymInt], dim: _int = 0) -> Tuple[Tensor, ...]:
- r"""
- unsafe_split(split_size, dim=0) -> List of Tensors
-
- See :func:`torch.unsafe_split`
- """
- ...
- def unsafe_split_with_sizes(self, split_sizes: Sequence[Union[_int, SymInt]], dim: _int = 0) -> Tuple[Tensor, ...]: ...
- def unsqueeze(self, dim: _int) -> Tensor:
- r"""
- unsqueeze(dim) -> Tensor
-
- See :func:`torch.unsqueeze`
- """
- ...
- def unsqueeze_(self, dim: _int) -> Tensor:
- r"""
- unsqueeze_(dim) -> Tensor
-
- In-place version of :meth:`~Tensor.unsqueeze`
- """
- ...
- def values(self) -> Tensor:
- r"""
- values() -> Tensor
-
- Return the values tensor of a :ref:`sparse COO tensor <sparse-coo-docs>`.
-
- .. warning::
- Throws an error if :attr:`self` is not a sparse COO tensor.
-
- See also :meth:`Tensor.indices`.
-
- .. note::
- This method can only be called on a coalesced sparse tensor. See
- :meth:`Tensor.coalesce` for details.
- """
- ...
- @overload
- def var(self, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False) -> Tensor:
- r"""
- var(dim=None, *, correction=1, keepdim=False) -> Tensor
-
- See :func:`torch.var`
- """
- ...
- @overload
- def var(self, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tensor:
- r"""
- var(dim=None, *, correction=1, keepdim=False) -> Tensor
-
- See :func:`torch.var`
- """
- ...
- @overload
- def var(self, unbiased: _bool = True) -> Tensor:
- r"""
- var(dim=None, *, correction=1, keepdim=False) -> Tensor
-
- See :func:`torch.var`
- """
- ...
- @overload
- def var(self, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False) -> Tensor:
- r"""
- var(dim=None, *, correction=1, keepdim=False) -> Tensor
-
- See :func:`torch.var`
- """
- ...
- @overload
- def var(self, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tensor:
- r"""
- var(dim=None, *, correction=1, keepdim=False) -> Tensor
-
- See :func:`torch.var`
- """
- ...
- def vdot(self, other: Tensor) -> Tensor:
- r"""
- vdot(other) -> Tensor
-
- See :func:`torch.vdot`
- """
- ...
- @overload
- def view(self, dtype: _dtype) -> Tensor:
- r"""
- view(*shape) -> Tensor
-
- Returns a new tensor with the same data as the :attr:`self` tensor but of a
- different :attr:`shape`.
-
- The returned tensor shares the same data and must have the same number
- of elements, but may have a different size. For a tensor to be viewed, the new
- view size must be compatible with its original size and stride, i.e., each new
- view dimension must either be a subspace of an original dimension, or only span
- across original dimensions :math:`d, d+1, \dots, d+k` that satisfy the following
- contiguity-like condition that :math:`\forall i = d, \dots, d+k-1`,
-
- .. math::
-
- \text{stride}[i] = \text{stride}[i+1] \times \text{size}[i+1]
-
- Otherwise, it will not be possible to view :attr:`self` tensor as :attr:`shape`
- without copying it (e.g., via :meth:`contiguous`). When it is unclear whether a
- :meth:`view` can be performed, it is advisable to use :meth:`reshape`, which
- returns a view if the shapes are compatible, and copies (equivalent to calling
- :meth:`contiguous`) otherwise.
-
- Args:
- shape (torch.Size or int...): the desired size
-
- Example::
-
- >>> x = torch.randn(4, 4)
- >>> x.size()
- torch.Size([4, 4])
- >>> y = x.view(16)
- >>> y.size()
- torch.Size([16])
- >>> z = x.view(-1, 8) # the size -1 is inferred from other dimensions
- >>> z.size()
- torch.Size([2, 8])
-
- >>> a = torch.randn(1, 2, 3, 4)
- >>> a.size()
- torch.Size([1, 2, 3, 4])
- >>> b = a.transpose(1, 2) # Swaps 2nd and 3rd dimension
- >>> b.size()
- torch.Size([1, 3, 2, 4])
- >>> c = a.view(1, 3, 2, 4) # Does not change tensor layout in memory
- >>> c.size()
- torch.Size([1, 3, 2, 4])
- >>> torch.equal(b, c)
- False
-
-
- .. method:: view(dtype) -> Tensor
- :noindex:
-
- Returns a new tensor with the same data as the :attr:`self` tensor but of a
- different :attr:`dtype`.
-
- If the element size of :attr:`dtype` is different than that of ``self.dtype``,
- then the size of the last dimension of the output will be scaled
- proportionally. For instance, if :attr:`dtype` element size is twice that of
- ``self.dtype``, then each pair of elements in the last dimension of
- :attr:`self` will be combined, and the size of the last dimension of the output
- will be half that of :attr:`self`. If :attr:`dtype` element size is half that
- of ``self.dtype``, then each element in the last dimension of :attr:`self` will
- be split in two, and the size of the last dimension of the output will be
- double that of :attr:`self`. For this to be possible, the following conditions
- must be true:
-
- * ``self.dim()`` must be greater than 0.
- * ``self.stride(-1)`` must be 1.
-
- Additionally, if the element size of :attr:`dtype` is greater than that of
- ``self.dtype``, the following conditions must be true as well:
-
- * ``self.size(-1)`` must be divisible by the ratio between the element
- sizes of the dtypes.
- * ``self.storage_offset()`` must be divisible by the ratio between the
- element sizes of the dtypes.
- * The strides of all dimensions, except the last dimension, must be
- divisible by the ratio between the element sizes of the dtypes.
-
- If any of the above conditions are not met, an error is thrown.
-
- .. warning::
-
- This overload is not supported by TorchScript, and using it in a Torchscript
- program will cause undefined behavior.
-
-
- Args:
- dtype (:class:`torch.dtype`): the desired dtype
-
- Example::
-
- >>> x = torch.randn(4, 4)
- >>> x
- tensor([[ 0.9482, -0.0310, 1.4999, -0.5316],
- [-0.1520, 0.7472, 0.5617, -0.8649],
- [-2.4724, -0.0334, -0.2976, -0.8499],
- [-0.2109, 1.9913, -0.9607, -0.6123]])
- >>> x.dtype
- torch.float32
-
- >>> y = x.view(torch.int32)
- >>> y
- tensor([[ 1064483442, -1124191867, 1069546515, -1089989247],
- [-1105482831, 1061112040, 1057999968, -1084397505],
- [-1071760287, -1123489973, -1097310419, -1084649136],
- [-1101533110, 1073668768, -1082790149, -1088634448]],
- dtype=torch.int32)
- >>> y[0, 0] = 1000000000
- >>> x
- tensor([[ 0.0047, -0.0310, 1.4999, -0.5316],
- [-0.1520, 0.7472, 0.5617, -0.8649],
- [-2.4724, -0.0334, -0.2976, -0.8499],
- [-0.2109, 1.9913, -0.9607, -0.6123]])
-
- >>> x.view(torch.cfloat)
- tensor([[ 0.0047-0.0310j, 1.4999-0.5316j],
- [-0.1520+0.7472j, 0.5617-0.8649j],
- [-2.4724-0.0334j, -0.2976-0.8499j],
- [-0.2109+1.9913j, -0.9607-0.6123j]])
- >>> x.view(torch.cfloat).size()
- torch.Size([4, 2])
-
- >>> x.view(torch.uint8)
- tensor([[ 0, 202, 154, 59, 182, 243, 253, 188, 185, 252, 191, 63, 240, 22,
- 8, 191],
- [227, 165, 27, 190, 128, 72, 63, 63, 146, 203, 15, 63, 22, 106,
- 93, 191],
- [205, 59, 30, 192, 112, 206, 8, 189, 7, 95, 152, 190, 12, 147,
- 89, 191],
- [ 43, 246, 87, 190, 235, 226, 254, 63, 111, 240, 117, 191, 177, 191,
- 28, 191]], dtype=torch.uint8)
- >>> x.view(torch.uint8).size()
- torch.Size([4, 16])
- """
- ...
- @overload
- def view(self, size: Sequence[Union[_int, SymInt]]) -> Tensor:
- r"""
- view(*shape) -> Tensor
-
- Returns a new tensor with the same data as the :attr:`self` tensor but of a
- different :attr:`shape`.
-
- The returned tensor shares the same data and must have the same number
- of elements, but may have a different size. For a tensor to be viewed, the new
- view size must be compatible with its original size and stride, i.e., each new
- view dimension must either be a subspace of an original dimension, or only span
- across original dimensions :math:`d, d+1, \dots, d+k` that satisfy the following
- contiguity-like condition that :math:`\forall i = d, \dots, d+k-1`,
-
- .. math::
-
- \text{stride}[i] = \text{stride}[i+1] \times \text{size}[i+1]
-
- Otherwise, it will not be possible to view :attr:`self` tensor as :attr:`shape`
- without copying it (e.g., via :meth:`contiguous`). When it is unclear whether a
- :meth:`view` can be performed, it is advisable to use :meth:`reshape`, which
- returns a view if the shapes are compatible, and copies (equivalent to calling
- :meth:`contiguous`) otherwise.
-
- Args:
- shape (torch.Size or int...): the desired size
-
- Example::
-
- >>> x = torch.randn(4, 4)
- >>> x.size()
- torch.Size([4, 4])
- >>> y = x.view(16)
- >>> y.size()
- torch.Size([16])
- >>> z = x.view(-1, 8) # the size -1 is inferred from other dimensions
- >>> z.size()
- torch.Size([2, 8])
-
- >>> a = torch.randn(1, 2, 3, 4)
- >>> a.size()
- torch.Size([1, 2, 3, 4])
- >>> b = a.transpose(1, 2) # Swaps 2nd and 3rd dimension
- >>> b.size()
- torch.Size([1, 3, 2, 4])
- >>> c = a.view(1, 3, 2, 4) # Does not change tensor layout in memory
- >>> c.size()
- torch.Size([1, 3, 2, 4])
- >>> torch.equal(b, c)
- False
-
-
- .. method:: view(dtype) -> Tensor
- :noindex:
-
- Returns a new tensor with the same data as the :attr:`self` tensor but of a
- different :attr:`dtype`.
-
- If the element size of :attr:`dtype` is different than that of ``self.dtype``,
- then the size of the last dimension of the output will be scaled
- proportionally. For instance, if :attr:`dtype` element size is twice that of
- ``self.dtype``, then each pair of elements in the last dimension of
- :attr:`self` will be combined, and the size of the last dimension of the output
- will be half that of :attr:`self`. If :attr:`dtype` element size is half that
- of ``self.dtype``, then each element in the last dimension of :attr:`self` will
- be split in two, and the size of the last dimension of the output will be
- double that of :attr:`self`. For this to be possible, the following conditions
- must be true:
-
- * ``self.dim()`` must be greater than 0.
- * ``self.stride(-1)`` must be 1.
-
- Additionally, if the element size of :attr:`dtype` is greater than that of
- ``self.dtype``, the following conditions must be true as well:
-
- * ``self.size(-1)`` must be divisible by the ratio between the element
- sizes of the dtypes.
- * ``self.storage_offset()`` must be divisible by the ratio between the
- element sizes of the dtypes.
- * The strides of all dimensions, except the last dimension, must be
- divisible by the ratio between the element sizes of the dtypes.
-
- If any of the above conditions are not met, an error is thrown.
-
- .. warning::
-
- This overload is not supported by TorchScript, and using it in a Torchscript
- program will cause undefined behavior.
-
-
- Args:
- dtype (:class:`torch.dtype`): the desired dtype
-
- Example::
-
- >>> x = torch.randn(4, 4)
- >>> x
- tensor([[ 0.9482, -0.0310, 1.4999, -0.5316],
- [-0.1520, 0.7472, 0.5617, -0.8649],
- [-2.4724, -0.0334, -0.2976, -0.8499],
- [-0.2109, 1.9913, -0.9607, -0.6123]])
- >>> x.dtype
- torch.float32
-
- >>> y = x.view(torch.int32)
- >>> y
- tensor([[ 1064483442, -1124191867, 1069546515, -1089989247],
- [-1105482831, 1061112040, 1057999968, -1084397505],
- [-1071760287, -1123489973, -1097310419, -1084649136],
- [-1101533110, 1073668768, -1082790149, -1088634448]],
- dtype=torch.int32)
- >>> y[0, 0] = 1000000000
- >>> x
- tensor([[ 0.0047, -0.0310, 1.4999, -0.5316],
- [-0.1520, 0.7472, 0.5617, -0.8649],
- [-2.4724, -0.0334, -0.2976, -0.8499],
- [-0.2109, 1.9913, -0.9607, -0.6123]])
-
- >>> x.view(torch.cfloat)
- tensor([[ 0.0047-0.0310j, 1.4999-0.5316j],
- [-0.1520+0.7472j, 0.5617-0.8649j],
- [-2.4724-0.0334j, -0.2976-0.8499j],
- [-0.2109+1.9913j, -0.9607-0.6123j]])
- >>> x.view(torch.cfloat).size()
- torch.Size([4, 2])
-
- >>> x.view(torch.uint8)
- tensor([[ 0, 202, 154, 59, 182, 243, 253, 188, 185, 252, 191, 63, 240, 22,
- 8, 191],
- [227, 165, 27, 190, 128, 72, 63, 63, 146, 203, 15, 63, 22, 106,
- 93, 191],
- [205, 59, 30, 192, 112, 206, 8, 189, 7, 95, 152, 190, 12, 147,
- 89, 191],
- [ 43, 246, 87, 190, 235, 226, 254, 63, 111, 240, 117, 191, 177, 191,
- 28, 191]], dtype=torch.uint8)
- >>> x.view(torch.uint8).size()
- torch.Size([4, 16])
- """
- ...
- @overload
- def view(self, *size: _int) -> Tensor:
- r"""
- view(*shape) -> Tensor
-
- Returns a new tensor with the same data as the :attr:`self` tensor but of a
- different :attr:`shape`.
-
- The returned tensor shares the same data and must have the same number
- of elements, but may have a different size. For a tensor to be viewed, the new
- view size must be compatible with its original size and stride, i.e., each new
- view dimension must either be a subspace of an original dimension, or only span
- across original dimensions :math:`d, d+1, \dots, d+k` that satisfy the following
- contiguity-like condition that :math:`\forall i = d, \dots, d+k-1`,
-
- .. math::
-
- \text{stride}[i] = \text{stride}[i+1] \times \text{size}[i+1]
-
- Otherwise, it will not be possible to view :attr:`self` tensor as :attr:`shape`
- without copying it (e.g., via :meth:`contiguous`). When it is unclear whether a
- :meth:`view` can be performed, it is advisable to use :meth:`reshape`, which
- returns a view if the shapes are compatible, and copies (equivalent to calling
- :meth:`contiguous`) otherwise.
-
- Args:
- shape (torch.Size or int...): the desired size
-
- Example::
-
- >>> x = torch.randn(4, 4)
- >>> x.size()
- torch.Size([4, 4])
- >>> y = x.view(16)
- >>> y.size()
- torch.Size([16])
- >>> z = x.view(-1, 8) # the size -1 is inferred from other dimensions
- >>> z.size()
- torch.Size([2, 8])
-
- >>> a = torch.randn(1, 2, 3, 4)
- >>> a.size()
- torch.Size([1, 2, 3, 4])
- >>> b = a.transpose(1, 2) # Swaps 2nd and 3rd dimension
- >>> b.size()
- torch.Size([1, 3, 2, 4])
- >>> c = a.view(1, 3, 2, 4) # Does not change tensor layout in memory
- >>> c.size()
- torch.Size([1, 3, 2, 4])
- >>> torch.equal(b, c)
- False
-
-
- .. method:: view(dtype) -> Tensor
- :noindex:
-
- Returns a new tensor with the same data as the :attr:`self` tensor but of a
- different :attr:`dtype`.
-
- If the element size of :attr:`dtype` is different than that of ``self.dtype``,
- then the size of the last dimension of the output will be scaled
- proportionally. For instance, if :attr:`dtype` element size is twice that of
- ``self.dtype``, then each pair of elements in the last dimension of
- :attr:`self` will be combined, and the size of the last dimension of the output
- will be half that of :attr:`self`. If :attr:`dtype` element size is half that
- of ``self.dtype``, then each element in the last dimension of :attr:`self` will
- be split in two, and the size of the last dimension of the output will be
- double that of :attr:`self`. For this to be possible, the following conditions
- must be true:
-
- * ``self.dim()`` must be greater than 0.
- * ``self.stride(-1)`` must be 1.
-
- Additionally, if the element size of :attr:`dtype` is greater than that of
- ``self.dtype``, the following conditions must be true as well:
-
- * ``self.size(-1)`` must be divisible by the ratio between the element
- sizes of the dtypes.
- * ``self.storage_offset()`` must be divisible by the ratio between the
- element sizes of the dtypes.
- * The strides of all dimensions, except the last dimension, must be
- divisible by the ratio between the element sizes of the dtypes.
-
- If any of the above conditions are not met, an error is thrown.
-
- .. warning::
-
- This overload is not supported by TorchScript, and using it in a Torchscript
- program will cause undefined behavior.
-
-
- Args:
- dtype (:class:`torch.dtype`): the desired dtype
-
- Example::
-
- >>> x = torch.randn(4, 4)
- >>> x
- tensor([[ 0.9482, -0.0310, 1.4999, -0.5316],
- [-0.1520, 0.7472, 0.5617, -0.8649],
- [-2.4724, -0.0334, -0.2976, -0.8499],
- [-0.2109, 1.9913, -0.9607, -0.6123]])
- >>> x.dtype
- torch.float32
-
- >>> y = x.view(torch.int32)
- >>> y
- tensor([[ 1064483442, -1124191867, 1069546515, -1089989247],
- [-1105482831, 1061112040, 1057999968, -1084397505],
- [-1071760287, -1123489973, -1097310419, -1084649136],
- [-1101533110, 1073668768, -1082790149, -1088634448]],
- dtype=torch.int32)
- >>> y[0, 0] = 1000000000
- >>> x
- tensor([[ 0.0047, -0.0310, 1.4999, -0.5316],
- [-0.1520, 0.7472, 0.5617, -0.8649],
- [-2.4724, -0.0334, -0.2976, -0.8499],
- [-0.2109, 1.9913, -0.9607, -0.6123]])
-
- >>> x.view(torch.cfloat)
- tensor([[ 0.0047-0.0310j, 1.4999-0.5316j],
- [-0.1520+0.7472j, 0.5617-0.8649j],
- [-2.4724-0.0334j, -0.2976-0.8499j],
- [-0.2109+1.9913j, -0.9607-0.6123j]])
- >>> x.view(torch.cfloat).size()
- torch.Size([4, 2])
-
- >>> x.view(torch.uint8)
- tensor([[ 0, 202, 154, 59, 182, 243, 253, 188, 185, 252, 191, 63, 240, 22,
- 8, 191],
- [227, 165, 27, 190, 128, 72, 63, 63, 146, 203, 15, 63, 22, 106,
- 93, 191],
- [205, 59, 30, 192, 112, 206, 8, 189, 7, 95, 152, 190, 12, 147,
- 89, 191],
- [ 43, 246, 87, 190, 235, 226, 254, 63, 111, 240, 117, 191, 177, 191,
- 28, 191]], dtype=torch.uint8)
- >>> x.view(torch.uint8).size()
- torch.Size([4, 16])
- """
- ...
- def view_as(self, other: Tensor) -> Tensor:
- r"""
- view_as(other) -> Tensor
-
- View this tensor as the same size as :attr:`other`.
- ``self.view_as(other)`` is equivalent to ``self.view(other.size())``.
-
- Please see :meth:`~Tensor.view` for more information about ``view``.
-
- Args:
- other (:class:`torch.Tensor`): The result tensor has the same size
- as :attr:`other`.
- """
- ...
- @overload
- def vsplit(self, sections: _int) -> Tuple[Tensor, ...]:
- r"""
- vsplit(split_size_or_sections) -> List of Tensors
-
- See :func:`torch.vsplit`
- """
- ...
- @overload
- def vsplit(self, indices: _size) -> Tuple[Tensor, ...]:
- r"""
- vsplit(split_size_or_sections) -> List of Tensors
-
- See :func:`torch.vsplit`
- """
- ...
- @overload
- def vsplit(self, *indices: _int) -> Tuple[Tensor, ...]:
- r"""
- vsplit(split_size_or_sections) -> List of Tensors
-
- See :func:`torch.vsplit`
- """
- ...
- @overload
- def where(self, condition: Tensor, other: Tensor) -> Tensor:
- r"""
- where(condition, y) -> Tensor
-
- ``self.where(condition, y)`` is equivalent to ``torch.where(condition, self, y)``.
- See :func:`torch.where`
- """
- ...
- @overload
- def where(self, condition: Tensor, other: Union[Number, _complex]) -> Tensor:
- r"""
- where(condition, y) -> Tensor
-
- ``self.where(condition, y)`` is equivalent to ``torch.where(condition, self, y)``.
- See :func:`torch.where`
- """
- ...
- @overload
- def xlogy(self, other: Tensor) -> Tensor:
- r"""
- xlogy(other) -> Tensor
-
- See :func:`torch.xlogy`
- """
- ...
- @overload
- def xlogy(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- xlogy(other) -> Tensor
-
- See :func:`torch.xlogy`
- """
- ...
- @overload
- def xlogy_(self, other: Tensor) -> Tensor:
- r"""
- xlogy_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.xlogy`
- """
- ...
- @overload
- def xlogy_(self, other: Union[Number, _complex]) -> Tensor:
- r"""
- xlogy_(other) -> Tensor
-
- In-place version of :meth:`~Tensor.xlogy`
- """
- ...
- def xpu(self, device: Optional[Union[_device, _int, str]] = None, non_blocking: _bool = False, memory_format: torch.memory_format = torch.preserve_format) -> Tensor:
- r"""
- xpu(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor
-
- Returns a copy of this object in XPU memory.
-
- If this object is already in XPU memory and on the correct device,
- then no copy is performed and the original object is returned.
-
- Args:
- device (:class:`torch.device`): The destination XPU device.
- Defaults to the current XPU device.
- non_blocking (bool): If ``True`` and the source is in pinned memory,
- the copy will be asynchronous with respect to the host.
- Otherwise, the argument has no effect. Default: ``False``.
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned Tensor. Default: ``torch.preserve_format``.
- """
- ...
- def zero_(self) -> Tensor:
- r"""
- zero_() -> Tensor
-
- Fills :attr:`self` tensor with zeros.
- """
- ...
- _TensorBase = TensorBase
- # Defined in torch/csrc/multiprocessing/init.cpp
- def _multiprocessing_init() -> None: ...
- # Defined in torch/csrc/Module.cpp
- def _accelerator_hooks_device_count() -> _int: ...
- def _accelerator_hooks_set_current_device(device_index: _int) -> None: ...
- def _accelerator_hooks_get_current_device() -> _int: ...
- def _accelerator_hooks_exchange_device(device_index: _int) -> _int: ...
- def _accelerator_hooks_maybe_exchange_device(device_index: _int) -> _int: ...
- def _get_accelerator(check: _bool = False) -> _device: ...
- # Defined in torch/csrc/mtia/Module.cpp
- def _mtia_init() -> None: ...
- def _mtia_isBuilt() -> _bool: ...
- def _mtia_isInBadFork() -> _bool: ...
- def _mtia_deviceSynchronize() -> None: ...
- def _mtia_getCurrentStream(device: _int) -> Stream: ...
- def _mtia_setCurrentStream(stream: Stream) -> None: ...
- def _mtia_getDefaultStream(device: _int) -> Stream: ...
- # Defined in torch/csrc/mps/Module.cpp
- def _mps_deviceSynchronize() -> None: ...
- def _mps_get_default_generator() -> Generator: ...
- def _mps_emptyCache() -> None: ...
- def _mps_setMemoryFraction(fraction: _float) -> None: ...
- def _mps_currentAllocatedMemory() -> _int: ...
- def _mps_driverAllocatedMemory() -> _int: ...
- def _mps_is_available() -> _bool: ...
- def _mps_is_on_macos_or_newer(major: _int, minor: _int) -> _bool: ...
- def _mps_profilerStartTrace(mode: str, wait_until_completed: _bool) -> None: ...
- def _mps_profilerStopTrace() -> None: ...
- def _mps_acquireEvent(enable_timing: _bool) -> _int: ...
- def _mps_releaseEvent(event_id: _int) -> None: ...
- def _mps_recordEvent(event_id: _int) -> None: ...
- def _mps_waitForEvent(event_id: _int) -> None: ...
- def _mps_synchronizeEvent(event_id: _int) -> None: ...
- def _mps_queryEvent(event_id: _int) -> _bool: ...
- def _mps_elapsedTimeOfEvents(start_event_id: _int, end_event_id: _int) -> _float: ...
- # Defined in torch/csrc/cuda/Module.cpp
- def _cuda_getCurrentStream(device: _int) -> Tuple: ...
- def _cuda_getCurrentRawStream(device: _int) -> _int: ...
- def _cuda_getDefaultStream(device: _int) -> Tuple: ...
- def _cuda_getCurrentBlasHandle() -> _int: ...
- def _cuda_clearCublasWorkspaces() -> None: ...
- def _cuda_setDevice(device: _int) -> None: ...
- def _cuda_exchangeDevice(device: _int) -> _int: ...
- def _cuda_maybeExchangeDevice(device: _int) -> _int: ...
- def _cuda_getDevice() -> _int: ...
- def _cuda_getDeviceCount() -> _int: ...
- def _cuda_set_sync_debug_mode(warn_level: Union[_int, str]) -> None: ...
- def _cuda_get_sync_debug_mode() -> _int: ...
- def _cuda_sleep(cycles: _int) -> None: ...
- def _cuda_synchronize() -> None: ...
- def _cuda_ipc_collect() -> None: ...
- def _cuda_getArchFlags() -> Optional[str]: ...
- def _cuda_init() -> None: ...
- def _cuda_setStream(stream_id: _int, device_index: _int, device_type: _int) -> None: ...
- def _cuda_getCompiledVersion() -> _int: ...
- def _cuda_cudaHostAllocator() -> _int: ...
- def _cuda_cudaCachingAllocator_raw_alloc(size: _int, cuda_stream: _int) -> _int: ...
- def _cuda_cudaCachingAllocator_raw_delete(ptr: _int) -> None: ...
- def _cuda_cudaCachingAllocator_set_allocator_settings(env: str) -> None: ...
- def _cuda_beginAllocateCurrentStreamToPool(device: _int, mempool_id: Tuple[_int, _int]) -> None: ...
- def _cuda_endAllocateCurrentStreamToPool(device: _int, mempool_id: Tuple[_int, _int]) -> None: ...
- def _cuda_releasePool(device: _int, mempool_id: Tuple[_int, _int]) -> None: ...
- def _cuda_checkPoolLiveAllocations(device: _int, mempool_id: Tuple[_int, _int], expected_live_allocations: Set) -> _bool: ...
- def _cuda_setCheckpointPoolState(device: _int, state: _cuda_CUDAAllocator_AllocatorState, stale_storages: List[_int], storages_to_add_deleters_to: List[_int]) -> None: ...
- def _cuda_setMemoryFraction(fraction: _float, device: _int) -> None: ...
- def _cuda_emptyCache() -> None: ...
- def _cuda_memoryStats(device: _int) -> Dict[str, Any]: ...
- def _cuda_resetAccumulatedMemoryStats(device: _int) -> None: ...
- def _cuda_resetPeakMemoryStats(device: _int) -> None: ...
- def _cuda_memorySnapshot() -> Dict[str, Any]: ...
- def _cuda_record_memory_history_legacy(
- enabled: _bool,
- record_context: _bool,
- record_context_cpp: _bool,
- alloc_trace_max_entries: _int,
- alloc_trace_record_context: _bool,
- ) -> None: ...
- def _cuda_record_memory_history(
- enabled: Optional[str],
- context: Optional[str],
- stacks: str,
- max_entries
- ) -> None: ...
- def _cuda_isHistoryEnabled() -> _bool: ...
- def _cuda_getAllocatorBackend() -> str: ...
- class _cuda_CUDAAllocator_AllocatorState:
- pass
- def _cuda_getCheckpointState(device: _int, mempool: Tuple[_int, _int]) -> _cuda_CUDAAllocator_AllocatorState: ...
- def _set_cached_tensors_enabled(enabled: _bool) -> None: ...
- def _add_cached_tensor(t: Tensor) -> None: ...
- def _remove_cached_tensor(t: Tensor) -> None: ...
- def _tensors_data_ptrs_at_indices_equal(tensors: List[Tensor], ptrs: List[Optional[_int]], indices: List[_int]) -> _bool: ...
- def _construct_CUDA_Tensor_From_Storage_And_Metadata(metadata: dict, storage: Storage) -> Tensor: ...
- def _storage_Use_Count(storage_ptr: _int) -> _int: ...
- def _set_storage_access_error_msg(t: Tensor, s: str) -> None: ...
- def _free_And_Remove_DeleterFn(storage_ptr: _int) -> None: ...
- def _has_Standard_Deleter(storage_ptr: _int) -> _bool: ...
- class _cuda_CUDAAllocator: ...
- def _cuda_customAllocator(alloc_fn: _int, free_fn: _int) -> _cuda_CUDAAllocator: ...
- def _cuda_changeCurrentAllocator(allocator: _cuda_CUDAAllocator) -> None: ...
- def _cuda_getAllocator() -> _cuda_CUDAAllocator: ...
- def _cuda_lock_mutex() -> None: ...
- def _cuda_unlock_mutex() -> None: ...
- def _cuda_canDeviceAccessPeer(device: _int, peer_device: _int) -> _bool: ...
- def _cuda_jiterator_compile_and_launch_kernel(
- code_string: str,
- kernel_name: str,
- return_by_ref: _bool,
- num_outputs: _int,
- tensors: Tuple,
- kwargs: Dict[str, Union[_int, _float, _bool]],
- ) -> Tensor: ...
- def _cuda_get_cudnn_benchmark_limit() -> _int: ...
- def _cuda_set_cudnn_benchmark_limit(arg: _int) -> None: ...
- def _cuda_get_conv_benchmark_empty_cache() -> _bool: ...
- def _cudnn_set_conv_benchmark_empty_cache(enable: _bool) -> None: ...
- def _nccl_version() -> _int: ...
- def _nccl_version_suffix() -> bytes : ...
- def _nccl_unique_id() -> bytes: ...
- def _nccl_init_rank(nranks: _int, comm_id: bytes, rank: _int) -> object: ...
- def _nccl_reduce(
- input: Sequence[Tensor],
- output: Tensor,
- root: _int,
- op: _int,
- streams: Optional[Sequence[_CudaStreamBase]],
- comms: Optional[Sequence[object]],
- ) -> None: ...
- def _nccl_all_reduce(
- input: Sequence[Tensor],
- output: Sequence[Tensor],
- op: _int,
- streams: Optional[Sequence[_CudaStreamBase]],
- comms: Optional[Sequence[object]],
- ) -> None: ...
- def _nccl_broadcast(
- input: Sequence[Tensor],
- root: _int,
- streams: Optional[Sequence[_CudaStreamBase]],
- comms: Optional[Sequence[object]],
- ) -> None: ...
- def _nccl_all_gather(
- input: Sequence[Tensor],
- output: Sequence[Tensor],
- streams: Optional[Sequence[_CudaStreamBase]],
- comms: Optional[Sequence[object]],
- ) -> None: ...
- def _nccl_reduce_scatter(
- input: Sequence[Tensor],
- output: Sequence[Tensor],
- op: _int,
- streams: Optional[Sequence[_CudaStreamBase]],
- comms: Optional[Sequence[object]],
- ) -> None: ...
- def _rocm_is_backward_pass() -> _bool: ...
- def _cuda_tunableop_enable(val: _bool) -> None: ...
- def _cuda_tunableop_is_enabled() -> _bool: ...
- def _cuda_tunableop_tuning_enable(val: _bool) -> None: ...
- def _cuda_tunableop_tuning_is_enabled() -> _bool: ...
- def _cuda_tunableop_set_max_tuning_duration(duration: _int) -> None: ...
- def _cuda_tunableop_get_max_tuning_duration() -> _int: ...
- def _cuda_tunableop_set_max_tuning_iterations(iterations: _int) -> None: ...
- def _cuda_tunableop_get_max_tuning_iterations() -> _int: ...
- def _cuda_tunableop_set_filename(filename: str, insert_device_ordinal: Optional[_bool]) -> None: ...
- def _cuda_tunableop_get_filename() -> str: ...
- def _cuda_tunableop_write_file(filename: Optional[str]) -> _bool: ...
- def _cuda_tunableop_read_file(filename: Optional[str]) -> _bool: ...
- def _cuda_tunableop_write_file_on_exit(val: _bool) -> None: ...
- def _cuda_tunableop_get_results() -> Tuple[str, str, str, _float]: ...
- def _cuda_tunableop_get_validators() -> Tuple[str, str]: ...
- class _CudaDeviceProperties:
- name: str
- major: _int
- minor: _int
- multi_processor_count: _int
- total_memory: _int
- is_integrated: _int
- is_multi_gpu_board: _int
- max_threads_per_multi_processor: _int
- gcnArchName: str
- # Functions related to SDPA
- class _SDPAParams:
- query: Tensor
- key: Tensor
- value: Tensor
- attn_mask: Optional[Tensor]
- dropout: _float
- is_causal: _bool
- def __init__(
- self,
- query: Tensor,
- key: Tensor,
- value: Tensor,
- attn_mask: Optional[Tensor],
- dropout: _float,
- is_causal: _bool) -> None: ...
- class _SDPBackend(Enum):
- ERROR = -1
- MATH = 0
- FLASH_ATTENTION = 1
- EFFICIENT_ATTENTION = 2
- CUDNN_ATTENTION = 3
- def _can_use_flash_attention(params: _SDPAParams, debug: _bool) -> _bool: ...
- def _can_use_mem_efficient_attention(params: _SDPAParams, debug: _bool) -> _bool: ...
- # Defined in torch/csrc/cuda/python_comm.cpp
- def _broadcast(tensor: Tensor, devices: List[_int]) -> List[Tensor]: ...
- def _broadcast_out(tensor: Tensor, out_tensors: List[Tensor]) -> List[Tensor]: ...
- def _broadcast_coalesced(
- tensors: List[Tensor],
- devices: List[_int],
- buffer_size: _int,
- ) -> List[List[Tensor]]: ...
- def _scatter(
- tensor: Tensor,
- devices: List[_int],
- chunk_sizes: Optional[List[_int]],
- dim: _int,
- streams: Optional[List[Stream]],
- ) -> List[Tensor]: ...
- def _scatter_out(
- tensor: Tensor,
- out_tensors: List[Tensor],
- dim: _int,
- streams: Optional[List[Stream]],
- ) -> List[Tensor]: ...
- def _gather(
- tensors: List[Tensor],
- dim: _int,
- destination_index: Optional[_int],
- ) -> Tensor: ...
- def _gather_out(tensors: List[Tensor], out_tensor: Tensor, dim: _int) -> Tensor: ...
- # Defined in torch/csrc/cuda/Stream.cpp
- class _CudaStreamBase(Stream):
- stream_id: _int
- device_index: _int
- device_type: _int
- device: _device
- cuda_stream: _int
- priority: _int
- def __new__(
- self,
- priority: _int = 0,
- stream_id: _int = 0,
- device_index: _int = 0,
- stream_ptr: _int = 0,
- ) -> _CudaStreamBase: ...
- def query(self) -> _bool: ...
- def synchronize(self) -> None: ...
- def priority_range(self) -> Tuple[_int, _int]: ...
- # Defined in torch/csrc/cuda/Event.cpp
- class _CudaEventBase:
- device: _device
- cuda_event: _int
- def __new__(
- cls,
- enable_timing: _bool = False,
- blocking: _bool = False,
- interprocess: _bool = False,
- ) -> _CudaEventBase: ...
- @classmethod
- def from_ipc_handle(cls, device: _device, ipc_handle: bytes) -> _CudaEventBase: ...
- def record(self, stream: _CudaStreamBase) -> None: ...
- def wait(self, stream: _CudaStreamBase) -> None: ...
- def query(self) -> _bool: ...
- def elapsed_time(self, other: _CudaEventBase) -> _float: ...
- def synchronize(self) -> None: ...
- def ipc_handle(self) -> bytes: ...
- # Defined in torch/csrc/cuda/Graph.cpp
- class _CUDAGraph:
- def capture_begin(self, pool: Optional[Tuple[_int, _int]] = ..., capture_error_mode: str = "global") -> None: ...
- def capture_end(self) -> None: ...
- def register_generator_state(self, Generator) -> None: ...
- def replay(self) -> None: ...
- def reset(self) -> None: ...
- def pool(self) -> Tuple[_int, _int]: ...
- def enable_debug_mode(self) -> None: ...
- def debug_dump(self, debug_path: str) -> None: ...
- def _cuda_isCurrentStreamCapturing() -> _bool: ...
- def _graph_pool_handle() -> Tuple[_int, _int]: ...
- # Defined in torch/csrc/xpu/Module.cpp
- def _xpu_setDevice(device: _int) -> None: ...
- def _xpu_exchangeDevice(device: _int) -> _int: ...
- def _xpu_maybeExchangeDevice(device: _int) -> _int: ...
- def _xpu_getDevice() -> _int: ...
- def _xpu_getDeviceCount() -> _int: ...
- def _xpu_init() -> None: ...
- def _xpu_setStream(stream_id: _int, device_index: _int, device_type: _int) -> None: ...
- def _xpu_getCurrentStream(device: _int) -> Tuple: ...
- def _xpu_getCurrentRawStream(device: _int) -> _int: ...
- def _xpu_synchronize(device: _int) -> None: ...
- def _xpu_emptyCache() -> None: ...
- class _XpuDeviceProperties:
- name: str
- platform_name: str
- vendor: str
- driver_version: str
- version: str
- total_memory: _int
- max_compute_units: _int
- gpu_eu_count: _int
- gpu_subslice_count: _int
- max_work_group_size: _int
- max_num_sub_groups: _int
- sub_group_sizes: List[_int]
- has_fp16: _bool
- has_fp64: _bool
- has_atomic64: _bool
- type: str
- # Defined in torch/csrc/xpu/Stream.cpp
- class _XpuStreamBase(Stream):
- stream_id: _int
- device_index: _int
- device_type: _int
- device: _device
- sycl_queue: _int
- priority: _int
- def __new__(
- cls,
- priority: _int = 0,
- stream_id: _int = 0,
- device_index: _int = 0,
- device_type: _int = 0,
- ) -> _XpuStreamBase: ...
- def query(self) -> _bool: ...
- def synchronize(self) -> None: ...
- @staticmethod
- def priority_range() -> Tuple: ...
- # Defined in torch/csrc/xpu/Event.cpp
- class _XpuEventBase:
- device: _device
- sycl_event: _int
- def __new__(cls, enable_timing: _bool = False) -> _XpuEventBase: ...
- def record(self, stream: _XpuEventBase) -> None: ...
- def wait(self, stream: _XpuStreamBase) -> None: ...
- def query(self) -> _bool: ...
- def elapsed_time(self, other: _XpuEventBase) -> _float: ...
- def synchronize(self) -> None: ...
- # Defined in torch/csrc/DataLoader.cpp
- def _set_worker_signal_handlers(
- *arg: Any,
- ) -> None: ... # THPModule_setWorkerSignalHandlers
- def _set_worker_pids(
- key: _int,
- child_pids: Tuple[_int, ...],
- ) -> None: ... # THPModule_setWorkerPIDs
- def _remove_worker_pids(loader_id: _int) -> None: ... # THPModule_removeWorkerPIDs
- def _error_if_any_worker_fails() -> None: ... # THPModule_errorIfAnyWorkerFails
- # Defined in torch/csrc/jit/python/python_tracer.cpp
- class TracingState:
- def push_scope(self, scope_name: str) -> None: ...
- def pop_scope(self) -> None: ...
- def current_scope(self) -> str: ...
- def set_graph(self, graph: Graph) -> None: ...
- def graph(self) -> Graph: ...
- def _create_graph_by_tracing(
- func: Callable[..., Any],
- inputs: Any,
- var_name_lookup_fn: Callable[[Tensor], str],
- strict: Any,
- force_outplace: Any,
- self: Any = None,
- argument_names: List[str] = [],
- ) -> Tuple[Graph, Stack]: ...
- def _tracer_warn_use_python(): ...
- def _get_tracing_state() -> TracingState: ...
- # Defined in torch/csrc/jit/python/python_ir.cpp
- # Not actually defined in python_ir.cpp, not sure where they are.
- class IValue: ...
- Stack = List[IValue]
- class JitType:
- annotation_str: str
- def isSubtypeOf(self, other: JitType) -> _bool: ...
- def with_dtype(self, dtype: _dtype) -> JitType: ...
- def with_sizes(self, sizes: List[Optional[_int]]) -> JitType: ...
- def kind(self) -> str: ...
- def scalarType(self) -> Optional[str]: ...
- def getElementType(self) -> JitType: ...
- def dtype(self) -> Optional[_dtype]: ...
- class InferredType:
- def __init__(self, arg: Union[JitType, str]): ...
- def type(self) -> JitType: ...
- def success(self) -> _bool: ...
- def reason(self) -> str: ...
- R = TypeVar("R", bound=JitType)
- class AnyType(JitType):
- @staticmethod
- def get() -> AnyType: ...
- class NoneType(JitType):
- @staticmethod
- def get() -> NoneType: ...
- class BoolType(JitType):
- @staticmethod
- def get() -> BoolType: ...
- class FloatType(JitType):
- @staticmethod
- def get() -> FloatType: ...
- class ComplexType(JitType):
- @staticmethod
- def get() -> ComplexType: ...
- class IntType(JitType):
- @staticmethod
- def get() -> IntType: ...
- class SymIntType(JitType):
- @staticmethod
- def get() -> SymIntType: ...
- class SymBoolType(JitType):
- @staticmethod
- def get() -> SymBoolType: ...
- class NumberType(JitType):
- @staticmethod
- def get() -> NumberType: ...
- class StringType(JitType):
- @staticmethod
- def get() -> StringType: ...
- class DeviceObjType(JitType):
- @staticmethod
- def get() -> DeviceObjType: ...
- class _GeneratorType(JitType):
- @staticmethod
- def get() -> _GeneratorType: ...
- class StreamObjType(JitType):
- @staticmethod
- def get() -> StreamObjType: ...
- class ListType(JitType):
- def __init__(self, a: JitType) -> None: ...
- def getElementType(self) -> JitType: ...
- @staticmethod
- def ofInts() -> ListType: ...
- @staticmethod
- def ofTensors() -> ListType: ...
- @staticmethod
- def ofFloats() -> ListType: ...
- @staticmethod
- def ofComplexDoubles() -> ListType: ...
- @staticmethod
- def ofBools() -> ListType: ...
- @staticmethod
- def ofStrings() -> ListType: ...
- class DictType(JitType):
- def __init__(self, key: JitType, value: JitType) -> None: ...
- def getKeyType(self) -> JitType: ...
- def getValueType(self) -> JitType: ...
- class TupleType(JitType):
- def __init__(self, a: List[Optional[JitType]]) -> None: ...
- def elements(self) -> List[JitType]: ...
- class UnionType(JitType):
- def __init__(self, a: List[JitType]) -> None: ...
- class ClassType(JitType):
- def __init__(self, qualified_name: str) -> None: ...
- class InterfaceType(JitType):
- def __init__(self, qualified_name: str) -> None: ...
- def getMethod(self, name: str) -> Optional[FunctionSchema]: ...
- def getMethodNames(self) -> List[str]: ...
- class OptionalType(JitType, Generic[R]):
- def __init__(self, a: JitType) -> None: ...
- def getElementType(self) -> JitType: ...
- @staticmethod
- def ofTensor() -> OptionalType: ...
- class FutureType(JitType):
- def __init__(self, a: JitType) -> None: ...
- def getElementType(self) -> JitType: ...
- class AwaitType(JitType):
- def __init__(self, a: JitType) -> None: ...
- def getElementType(self) -> JitType: ...
- class RRefType(JitType):
- def __init__(self, a: JitType) -> None: ...
- class EnumType(JitType):
- def __init__(
- self,
- qualified_name: str,
- value_type: JitType,
- enum_names_values: List[Any],
- ) -> None: ...
- class TensorType(JitType):
- @classmethod
- def get(cls) -> TensorType: ...
- @classmethod
- def getInferred(cls) -> TensorType: ...
- def with_sizes(self, other: Optional[List[Optional[_int]]]) -> TensorType: ...
- def sizes(self) -> Optional[List[_int]]: ...
- def varyingSizes(self) -> Optional[List[Optional[_int]]]: ...
- def strides(self) -> Optional[List[_int]]: ...
- def device(self) -> Optional[_device]: ...
- def dim(self) -> _int: ...
- def dtype(self) -> Optional[_dtype]: ...
- @staticmethod
- def create_from_tensor(t: Tensor) -> TensorType: ...
- # Defined in torch/csrc/jit/python/python_tree_views.cpp
- class SourceRange: ...
- class TreeView: ...
- class Ident(TreeView):
- @property
- def name(self) -> str: ...
- class ClassDef(TreeView): ...
- class Def(TreeView):
- def name(self) -> Ident: ...
- class Decl(TreeView): ...
- # Defined in torch/csrc/distributed/rpc/init.cpp
- def _rpc_init() -> _bool: ...
- # Defined in torch/csrc/distributed/autograd/init.cpp
- def _dist_autograd_init() -> _bool: ...
- # Defined in torch/csrc/distributed/c10d/init.cpp
- def _c10d_init() -> _bool: ...
- # Defined in torch/csrc/distributed/rpc/testing/init.cpp
- def _faulty_agent_init() -> _bool: ...
- def _register_py_class_for_device(device: str, cls: Any) -> None: ...
- # Defined in torch/csrc/Module.cpp
- def _current_graph_task_id() -> _int: ...
- def _current_autograd_node() -> _Node: ...
- def _dispatch_key_set(Tensor) -> str: ...
- # Defined in torch/csrc/Exceptions.cpp
- class OutOfMemoryError(RuntimeError): ...
- class _DistError(RuntimeError): ...
- class _DistBackendError(RuntimeError): ...
- class _DistStoreError(RuntimeError): ...
- class _DistNetworkError(RuntimeError): ...
- # Defined in torch/csrc/profiler/init.cpp
- class CapturedTraceback:
- pass
- def gather_traceback(python: _bool, script: _bool, cpp: _bool) -> CapturedTraceback: ...
- def symbolize_tracebacks(tracebacks: List[CapturedTraceback]) -> List[Dict[str, Any]]: ...
- def _load_mobile_module_from_file(filename: str): ...
- def _load_mobile_module_from_bytes(bytes_: bytes): ...
- def _load_jit_module_from_file(filename: str): ...
- def _load_jit_module_from_bytes(bytes_: bytes): ...
- def _save_mobile_module(m: LiteScriptModule, filename: str): ...
- def _save_jit_module(m: ScriptModule, filename: str, extra_files: Dict[str, Any]): ...
- def _save_mobile_module_to_bytes(m: LiteScriptModule) -> bytes: ...
- def _save_jit_module_to_bytes(m: ScriptModule, extra_files: Dict[str, Any]) -> bytes: ...
- def _get_module_info_from_flatbuffer(data: bytes): ...
- def _jit_resolve_packet(op_name: str, *args, **kwargs) -> str: ...
- def _swap_tensor_impl(t1: Tensor, t2: Tensor): ...
- def _save_pickle(obj: Any) -> bytes: ...
- # Defined in torch/csrc/jit/runtime/static/init.cpp
- def _jit_to_static_module(graph_or_module: Union[Graph,ScriptModule]) -> Any: ...
- def _fuse_to_static_module(graph_or_module: Union[Graph,ScriptModule], min_size: _int) -> Any: ...
|