_tensor_docs.py 139 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977
  1. # mypy: allow-untyped-defs
  2. """Adds docstrings to Tensor functions"""
  3. import torch._C
  4. from torch._C import _add_docstr as add_docstr
  5. from torch._torch_docs import parse_kwargs, reproducibility_notes
  6. def add_docstr_all(method, docstr):
  7. add_docstr(getattr(torch._C.TensorBase, method), docstr)
  8. common_args = parse_kwargs(
  9. """
  10. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  11. returned Tensor. Default: ``torch.preserve_format``.
  12. """
  13. )
  14. new_common_args = parse_kwargs(
  15. """
  16. size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
  17. shape of the output tensor.
  18. dtype (:class:`torch.dtype`, optional): the desired type of returned tensor.
  19. Default: if None, same :class:`torch.dtype` as this tensor.
  20. device (:class:`torch.device`, optional): the desired device of returned tensor.
  21. Default: if None, same :class:`torch.device` as this tensor.
  22. requires_grad (bool, optional): If autograd should record operations on the
  23. returned tensor. Default: ``False``.
  24. pin_memory (bool, optional): If set, returned tensor would be allocated in
  25. the pinned memory. Works only for CPU tensors. Default: ``False``.
  26. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  27. Default: ``torch.strided``.
  28. """
  29. )
  30. add_docstr_all(
  31. "new_tensor",
  32. """
  33. new_tensor(data, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
  34. pin_memory=False) -> Tensor
  35. """
  36. + r"""
  37. Returns a new Tensor with :attr:`data` as the tensor data.
  38. By default, the returned Tensor has the same :class:`torch.dtype` and
  39. :class:`torch.device` as this tensor.
  40. .. warning::
  41. :func:`new_tensor` always copies :attr:`data`. If you have a Tensor
  42. ``data`` and want to avoid a copy, use :func:`torch.Tensor.requires_grad_`
  43. or :func:`torch.Tensor.detach`.
  44. If you have a numpy array and want to avoid a copy, use
  45. :func:`torch.from_numpy`.
  46. .. warning::
  47. When data is a tensor `x`, :func:`new_tensor()` reads out 'the data' from whatever it is passed,
  48. and constructs a leaf variable. Therefore ``tensor.new_tensor(x)`` is equivalent to ``x.clone().detach()``
  49. and ``tensor.new_tensor(x, requires_grad=True)`` is equivalent to ``x.clone().detach().requires_grad_(True)``.
  50. The equivalents using ``clone()`` and ``detach()`` are recommended.
  51. Args:
  52. data (array_like): The returned Tensor copies :attr:`data`.
  53. Keyword args:
  54. {dtype}
  55. {device}
  56. {requires_grad}
  57. {layout}
  58. {pin_memory}
  59. Example::
  60. >>> tensor = torch.ones((2,), dtype=torch.int8)
  61. >>> data = [[0, 1], [2, 3]]
  62. >>> tensor.new_tensor(data)
  63. tensor([[ 0, 1],
  64. [ 2, 3]], dtype=torch.int8)
  65. """.format(
  66. **new_common_args
  67. ),
  68. )
  69. add_docstr_all(
  70. "new_full",
  71. """
  72. new_full(size, fill_value, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
  73. pin_memory=False) -> Tensor
  74. """
  75. + r"""
  76. Returns a Tensor of size :attr:`size` filled with :attr:`fill_value`.
  77. By default, the returned Tensor has the same :class:`torch.dtype` and
  78. :class:`torch.device` as this tensor.
  79. Args:
  80. fill_value (scalar): the number to fill the output tensor with.
  81. Keyword args:
  82. {dtype}
  83. {device}
  84. {requires_grad}
  85. {layout}
  86. {pin_memory}
  87. Example::
  88. >>> tensor = torch.ones((2,), dtype=torch.float64)
  89. >>> tensor.new_full((3, 4), 3.141592)
  90. tensor([[ 3.1416, 3.1416, 3.1416, 3.1416],
  91. [ 3.1416, 3.1416, 3.1416, 3.1416],
  92. [ 3.1416, 3.1416, 3.1416, 3.1416]], dtype=torch.float64)
  93. """.format(
  94. **new_common_args
  95. ),
  96. )
  97. add_docstr_all(
  98. "new_empty",
  99. """
  100. new_empty(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
  101. pin_memory=False) -> Tensor
  102. """
  103. + r"""
  104. Returns a Tensor of size :attr:`size` filled with uninitialized data.
  105. By default, the returned Tensor has the same :class:`torch.dtype` and
  106. :class:`torch.device` as this tensor.
  107. Args:
  108. size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
  109. shape of the output tensor.
  110. Keyword args:
  111. {dtype}
  112. {device}
  113. {requires_grad}
  114. {layout}
  115. {pin_memory}
  116. Example::
  117. >>> tensor = torch.ones(())
  118. >>> tensor.new_empty((2, 3))
  119. tensor([[ 5.8182e-18, 4.5765e-41, -1.0545e+30],
  120. [ 3.0949e-41, 4.4842e-44, 0.0000e+00]])
  121. """.format(
  122. **new_common_args
  123. ),
  124. )
  125. add_docstr_all(
  126. "new_empty_strided",
  127. """
  128. new_empty_strided(size, stride, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
  129. pin_memory=False) -> Tensor
  130. """
  131. + r"""
  132. Returns a Tensor of size :attr:`size` and strides :attr:`stride` filled with
  133. uninitialized data. By default, the returned Tensor has the same
  134. :class:`torch.dtype` and :class:`torch.device` as this tensor.
  135. Args:
  136. size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
  137. shape of the output tensor.
  138. Keyword args:
  139. {dtype}
  140. {device}
  141. {requires_grad}
  142. {layout}
  143. {pin_memory}
  144. Example::
  145. >>> tensor = torch.ones(())
  146. >>> tensor.new_empty_strided((2, 3), (3, 1))
  147. tensor([[ 5.8182e-18, 4.5765e-41, -1.0545e+30],
  148. [ 3.0949e-41, 4.4842e-44, 0.0000e+00]])
  149. """.format(
  150. **new_common_args
  151. ),
  152. )
  153. add_docstr_all(
  154. "new_ones",
  155. """
  156. new_ones(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
  157. pin_memory=False) -> Tensor
  158. """
  159. + r"""
  160. Returns a Tensor of size :attr:`size` filled with ``1``.
  161. By default, the returned Tensor has the same :class:`torch.dtype` and
  162. :class:`torch.device` as this tensor.
  163. Args:
  164. size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
  165. shape of the output tensor.
  166. Keyword args:
  167. {dtype}
  168. {device}
  169. {requires_grad}
  170. {layout}
  171. {pin_memory}
  172. Example::
  173. >>> tensor = torch.tensor((), dtype=torch.int32)
  174. >>> tensor.new_ones((2, 3))
  175. tensor([[ 1, 1, 1],
  176. [ 1, 1, 1]], dtype=torch.int32)
  177. """.format(
  178. **new_common_args
  179. ),
  180. )
  181. add_docstr_all(
  182. "new_zeros",
  183. """
  184. new_zeros(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, \
  185. pin_memory=False) -> Tensor
  186. """
  187. + r"""
  188. Returns a Tensor of size :attr:`size` filled with ``0``.
  189. By default, the returned Tensor has the same :class:`torch.dtype` and
  190. :class:`torch.device` as this tensor.
  191. Args:
  192. size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
  193. shape of the output tensor.
  194. Keyword args:
  195. {dtype}
  196. {device}
  197. {requires_grad}
  198. {layout}
  199. {pin_memory}
  200. Example::
  201. >>> tensor = torch.tensor((), dtype=torch.float64)
  202. >>> tensor.new_zeros((2, 3))
  203. tensor([[ 0., 0., 0.],
  204. [ 0., 0., 0.]], dtype=torch.float64)
  205. """.format(
  206. **new_common_args
  207. ),
  208. )
  209. add_docstr_all(
  210. "abs",
  211. r"""
  212. abs() -> Tensor
  213. See :func:`torch.abs`
  214. """,
  215. )
  216. add_docstr_all(
  217. "abs_",
  218. r"""
  219. abs_() -> Tensor
  220. In-place version of :meth:`~Tensor.abs`
  221. """,
  222. )
  223. add_docstr_all(
  224. "absolute",
  225. r"""
  226. absolute() -> Tensor
  227. Alias for :func:`abs`
  228. """,
  229. )
  230. add_docstr_all(
  231. "absolute_",
  232. r"""
  233. absolute_() -> Tensor
  234. In-place version of :meth:`~Tensor.absolute`
  235. Alias for :func:`abs_`
  236. """,
  237. )
  238. add_docstr_all(
  239. "acos",
  240. r"""
  241. acos() -> Tensor
  242. See :func:`torch.acos`
  243. """,
  244. )
  245. add_docstr_all(
  246. "acos_",
  247. r"""
  248. acos_() -> Tensor
  249. In-place version of :meth:`~Tensor.acos`
  250. """,
  251. )
  252. add_docstr_all(
  253. "arccos",
  254. r"""
  255. arccos() -> Tensor
  256. See :func:`torch.arccos`
  257. """,
  258. )
  259. add_docstr_all(
  260. "arccos_",
  261. r"""
  262. arccos_() -> Tensor
  263. In-place version of :meth:`~Tensor.arccos`
  264. """,
  265. )
  266. add_docstr_all(
  267. "acosh",
  268. r"""
  269. acosh() -> Tensor
  270. See :func:`torch.acosh`
  271. """,
  272. )
  273. add_docstr_all(
  274. "acosh_",
  275. r"""
  276. acosh_() -> Tensor
  277. In-place version of :meth:`~Tensor.acosh`
  278. """,
  279. )
  280. add_docstr_all(
  281. "arccosh",
  282. r"""
  283. acosh() -> Tensor
  284. See :func:`torch.arccosh`
  285. """,
  286. )
  287. add_docstr_all(
  288. "arccosh_",
  289. r"""
  290. acosh_() -> Tensor
  291. In-place version of :meth:`~Tensor.arccosh`
  292. """,
  293. )
  294. add_docstr_all(
  295. "add",
  296. r"""
  297. add(other, *, alpha=1) -> Tensor
  298. Add a scalar or tensor to :attr:`self` tensor. If both :attr:`alpha`
  299. and :attr:`other` are specified, each element of :attr:`other` is scaled by
  300. :attr:`alpha` before being used.
  301. When :attr:`other` is a tensor, the shape of :attr:`other` must be
  302. :ref:`broadcastable <broadcasting-semantics>` with the shape of the underlying
  303. tensor
  304. See :func:`torch.add`
  305. """,
  306. )
  307. add_docstr_all(
  308. "add_",
  309. r"""
  310. add_(other, *, alpha=1) -> Tensor
  311. In-place version of :meth:`~Tensor.add`
  312. """,
  313. )
  314. add_docstr_all(
  315. "addbmm",
  316. r"""
  317. addbmm(batch1, batch2, *, beta=1, alpha=1) -> Tensor
  318. See :func:`torch.addbmm`
  319. """,
  320. )
  321. add_docstr_all(
  322. "addbmm_",
  323. r"""
  324. addbmm_(batch1, batch2, *, beta=1, alpha=1) -> Tensor
  325. In-place version of :meth:`~Tensor.addbmm`
  326. """,
  327. )
  328. add_docstr_all(
  329. "addcdiv",
  330. r"""
  331. addcdiv(tensor1, tensor2, *, value=1) -> Tensor
  332. See :func:`torch.addcdiv`
  333. """,
  334. )
  335. add_docstr_all(
  336. "addcdiv_",
  337. r"""
  338. addcdiv_(tensor1, tensor2, *, value=1) -> Tensor
  339. In-place version of :meth:`~Tensor.addcdiv`
  340. """,
  341. )
  342. add_docstr_all(
  343. "addcmul",
  344. r"""
  345. addcmul(tensor1, tensor2, *, value=1) -> Tensor
  346. See :func:`torch.addcmul`
  347. """,
  348. )
  349. add_docstr_all(
  350. "addcmul_",
  351. r"""
  352. addcmul_(tensor1, tensor2, *, value=1) -> Tensor
  353. In-place version of :meth:`~Tensor.addcmul`
  354. """,
  355. )
  356. add_docstr_all(
  357. "addmm",
  358. r"""
  359. addmm(mat1, mat2, *, beta=1, alpha=1) -> Tensor
  360. See :func:`torch.addmm`
  361. """,
  362. )
  363. add_docstr_all(
  364. "addmm_",
  365. r"""
  366. addmm_(mat1, mat2, *, beta=1, alpha=1) -> Tensor
  367. In-place version of :meth:`~Tensor.addmm`
  368. """,
  369. )
  370. add_docstr_all(
  371. "addmv",
  372. r"""
  373. addmv(mat, vec, *, beta=1, alpha=1) -> Tensor
  374. See :func:`torch.addmv`
  375. """,
  376. )
  377. add_docstr_all(
  378. "addmv_",
  379. r"""
  380. addmv_(mat, vec, *, beta=1, alpha=1) -> Tensor
  381. In-place version of :meth:`~Tensor.addmv`
  382. """,
  383. )
  384. add_docstr_all(
  385. "sspaddmm",
  386. r"""
  387. sspaddmm(mat1, mat2, *, beta=1, alpha=1) -> Tensor
  388. See :func:`torch.sspaddmm`
  389. """,
  390. )
  391. add_docstr_all(
  392. "smm",
  393. r"""
  394. smm(mat) -> Tensor
  395. See :func:`torch.smm`
  396. """,
  397. )
  398. add_docstr_all(
  399. "addr",
  400. r"""
  401. addr(vec1, vec2, *, beta=1, alpha=1) -> Tensor
  402. See :func:`torch.addr`
  403. """,
  404. )
  405. add_docstr_all(
  406. "addr_",
  407. r"""
  408. addr_(vec1, vec2, *, beta=1, alpha=1) -> Tensor
  409. In-place version of :meth:`~Tensor.addr`
  410. """,
  411. )
  412. add_docstr_all(
  413. "align_as",
  414. r"""
  415. align_as(other) -> Tensor
  416. Permutes the dimensions of the :attr:`self` tensor to match the dimension order
  417. in the :attr:`other` tensor, adding size-one dims for any new names.
  418. This operation is useful for explicit broadcasting by names (see examples).
  419. All of the dims of :attr:`self` must be named in order to use this method.
  420. The resulting tensor is a view on the original tensor.
  421. All dimension names of :attr:`self` must be present in ``other.names``.
  422. :attr:`other` may contain named dimensions that are not in ``self.names``;
  423. the output tensor has a size-one dimension for each of those new names.
  424. To align a tensor to a specific order, use :meth:`~Tensor.align_to`.
  425. Examples::
  426. # Example 1: Applying a mask
  427. >>> mask = torch.randint(2, [127, 128], dtype=torch.bool).refine_names('W', 'H')
  428. >>> imgs = torch.randn(32, 128, 127, 3, names=('N', 'H', 'W', 'C'))
  429. >>> imgs.masked_fill_(mask.align_as(imgs), 0)
  430. # Example 2: Applying a per-channel-scale
  431. >>> def scale_channels(input, scale):
  432. >>> scale = scale.refine_names('C')
  433. >>> return input * scale.align_as(input)
  434. >>> num_channels = 3
  435. >>> scale = torch.randn(num_channels, names=('C',))
  436. >>> imgs = torch.rand(32, 128, 128, num_channels, names=('N', 'H', 'W', 'C'))
  437. >>> more_imgs = torch.rand(32, num_channels, 128, 128, names=('N', 'C', 'H', 'W'))
  438. >>> videos = torch.randn(3, num_channels, 128, 128, 128, names=('N', 'C', 'H', 'W', 'D'))
  439. # scale_channels is agnostic to the dimension order of the input
  440. >>> scale_channels(imgs, scale)
  441. >>> scale_channels(more_imgs, scale)
  442. >>> scale_channels(videos, scale)
  443. .. warning::
  444. The named tensor API is experimental and subject to change.
  445. """,
  446. )
  447. add_docstr_all(
  448. "all",
  449. r"""
  450. all(dim=None, keepdim=False) -> Tensor
  451. See :func:`torch.all`
  452. """,
  453. )
  454. add_docstr_all(
  455. "allclose",
  456. r"""
  457. allclose(other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor
  458. See :func:`torch.allclose`
  459. """,
  460. )
  461. add_docstr_all(
  462. "angle",
  463. r"""
  464. angle() -> Tensor
  465. See :func:`torch.angle`
  466. """,
  467. )
  468. add_docstr_all(
  469. "any",
  470. r"""
  471. any(dim=None, keepdim=False) -> Tensor
  472. See :func:`torch.any`
  473. """,
  474. )
  475. add_docstr_all(
  476. "apply_",
  477. r"""
  478. apply_(callable) -> Tensor
  479. Applies the function :attr:`callable` to each element in the tensor, replacing
  480. each element with the value returned by :attr:`callable`.
  481. .. note::
  482. This function only works with CPU tensors and should not be used in code
  483. sections that require high performance.
  484. """,
  485. )
  486. add_docstr_all(
  487. "asin",
  488. r"""
  489. asin() -> Tensor
  490. See :func:`torch.asin`
  491. """,
  492. )
  493. add_docstr_all(
  494. "asin_",
  495. r"""
  496. asin_() -> Tensor
  497. In-place version of :meth:`~Tensor.asin`
  498. """,
  499. )
  500. add_docstr_all(
  501. "arcsin",
  502. r"""
  503. arcsin() -> Tensor
  504. See :func:`torch.arcsin`
  505. """,
  506. )
  507. add_docstr_all(
  508. "arcsin_",
  509. r"""
  510. arcsin_() -> Tensor
  511. In-place version of :meth:`~Tensor.arcsin`
  512. """,
  513. )
  514. add_docstr_all(
  515. "asinh",
  516. r"""
  517. asinh() -> Tensor
  518. See :func:`torch.asinh`
  519. """,
  520. )
  521. add_docstr_all(
  522. "asinh_",
  523. r"""
  524. asinh_() -> Tensor
  525. In-place version of :meth:`~Tensor.asinh`
  526. """,
  527. )
  528. add_docstr_all(
  529. "arcsinh",
  530. r"""
  531. arcsinh() -> Tensor
  532. See :func:`torch.arcsinh`
  533. """,
  534. )
  535. add_docstr_all(
  536. "arcsinh_",
  537. r"""
  538. arcsinh_() -> Tensor
  539. In-place version of :meth:`~Tensor.arcsinh`
  540. """,
  541. )
  542. add_docstr_all(
  543. "as_strided",
  544. r"""
  545. as_strided(size, stride, storage_offset=None) -> Tensor
  546. See :func:`torch.as_strided`
  547. """,
  548. )
  549. add_docstr_all(
  550. "as_strided_",
  551. r"""
  552. as_strided_(size, stride, storage_offset=None) -> Tensor
  553. In-place version of :meth:`~Tensor.as_strided`
  554. """,
  555. )
  556. add_docstr_all(
  557. "atan",
  558. r"""
  559. atan() -> Tensor
  560. See :func:`torch.atan`
  561. """,
  562. )
  563. add_docstr_all(
  564. "atan_",
  565. r"""
  566. atan_() -> Tensor
  567. In-place version of :meth:`~Tensor.atan`
  568. """,
  569. )
  570. add_docstr_all(
  571. "arctan",
  572. r"""
  573. arctan() -> Tensor
  574. See :func:`torch.arctan`
  575. """,
  576. )
  577. add_docstr_all(
  578. "arctan_",
  579. r"""
  580. arctan_() -> Tensor
  581. In-place version of :meth:`~Tensor.arctan`
  582. """,
  583. )
  584. add_docstr_all(
  585. "atan2",
  586. r"""
  587. atan2(other) -> Tensor
  588. See :func:`torch.atan2`
  589. """,
  590. )
  591. add_docstr_all(
  592. "atan2_",
  593. r"""
  594. atan2_(other) -> Tensor
  595. In-place version of :meth:`~Tensor.atan2`
  596. """,
  597. )
  598. add_docstr_all(
  599. "arctan2",
  600. r"""
  601. arctan2(other) -> Tensor
  602. See :func:`torch.arctan2`
  603. """,
  604. )
  605. add_docstr_all(
  606. "arctan2_",
  607. r"""
  608. atan2_(other) -> Tensor
  609. In-place version of :meth:`~Tensor.arctan2`
  610. """,
  611. )
  612. add_docstr_all(
  613. "atanh",
  614. r"""
  615. atanh() -> Tensor
  616. See :func:`torch.atanh`
  617. """,
  618. )
  619. add_docstr_all(
  620. "atanh_",
  621. r"""
  622. atanh_(other) -> Tensor
  623. In-place version of :meth:`~Tensor.atanh`
  624. """,
  625. )
  626. add_docstr_all(
  627. "arctanh",
  628. r"""
  629. arctanh() -> Tensor
  630. See :func:`torch.arctanh`
  631. """,
  632. )
  633. add_docstr_all(
  634. "arctanh_",
  635. r"""
  636. arctanh_(other) -> Tensor
  637. In-place version of :meth:`~Tensor.arctanh`
  638. """,
  639. )
  640. add_docstr_all(
  641. "baddbmm",
  642. r"""
  643. baddbmm(batch1, batch2, *, beta=1, alpha=1) -> Tensor
  644. See :func:`torch.baddbmm`
  645. """,
  646. )
  647. add_docstr_all(
  648. "baddbmm_",
  649. r"""
  650. baddbmm_(batch1, batch2, *, beta=1, alpha=1) -> Tensor
  651. In-place version of :meth:`~Tensor.baddbmm`
  652. """,
  653. )
  654. add_docstr_all(
  655. "bernoulli",
  656. r"""
  657. bernoulli(*, generator=None) -> Tensor
  658. Returns a result tensor where each :math:`\texttt{result[i]}` is independently
  659. sampled from :math:`\text{Bernoulli}(\texttt{self[i]})`. :attr:`self` must have
  660. floating point ``dtype``, and the result will have the same ``dtype``.
  661. See :func:`torch.bernoulli`
  662. """,
  663. )
  664. add_docstr_all(
  665. "bernoulli_",
  666. r"""
  667. bernoulli_(p=0.5, *, generator=None) -> Tensor
  668. Fills each location of :attr:`self` with an independent sample from
  669. :math:`\text{Bernoulli}(\texttt{p})`. :attr:`self` can have integral
  670. ``dtype``.
  671. :attr:`p` should either be a scalar or tensor containing probabilities to be
  672. used for drawing the binary random number.
  673. If it is a tensor, the :math:`\text{i}^{th}` element of :attr:`self` tensor
  674. will be set to a value sampled from
  675. :math:`\text{Bernoulli}(\texttt{p\_tensor[i]})`. In this case `p` must have
  676. floating point ``dtype``.
  677. See also :meth:`~Tensor.bernoulli` and :func:`torch.bernoulli`
  678. """,
  679. )
  680. add_docstr_all(
  681. "bincount",
  682. r"""
  683. bincount(weights=None, minlength=0) -> Tensor
  684. See :func:`torch.bincount`
  685. """,
  686. )
  687. add_docstr_all(
  688. "bitwise_not",
  689. r"""
  690. bitwise_not() -> Tensor
  691. See :func:`torch.bitwise_not`
  692. """,
  693. )
  694. add_docstr_all(
  695. "bitwise_not_",
  696. r"""
  697. bitwise_not_() -> Tensor
  698. In-place version of :meth:`~Tensor.bitwise_not`
  699. """,
  700. )
  701. add_docstr_all(
  702. "bitwise_and",
  703. r"""
  704. bitwise_and() -> Tensor
  705. See :func:`torch.bitwise_and`
  706. """,
  707. )
  708. add_docstr_all(
  709. "bitwise_and_",
  710. r"""
  711. bitwise_and_() -> Tensor
  712. In-place version of :meth:`~Tensor.bitwise_and`
  713. """,
  714. )
  715. add_docstr_all(
  716. "bitwise_or",
  717. r"""
  718. bitwise_or() -> Tensor
  719. See :func:`torch.bitwise_or`
  720. """,
  721. )
  722. add_docstr_all(
  723. "bitwise_or_",
  724. r"""
  725. bitwise_or_() -> Tensor
  726. In-place version of :meth:`~Tensor.bitwise_or`
  727. """,
  728. )
  729. add_docstr_all(
  730. "bitwise_xor",
  731. r"""
  732. bitwise_xor() -> Tensor
  733. See :func:`torch.bitwise_xor`
  734. """,
  735. )
  736. add_docstr_all(
  737. "bitwise_xor_",
  738. r"""
  739. bitwise_xor_() -> Tensor
  740. In-place version of :meth:`~Tensor.bitwise_xor`
  741. """,
  742. )
  743. add_docstr_all(
  744. "bitwise_left_shift",
  745. r"""
  746. bitwise_left_shift(other) -> Tensor
  747. See :func:`torch.bitwise_left_shift`
  748. """,
  749. )
  750. add_docstr_all(
  751. "bitwise_left_shift_",
  752. r"""
  753. bitwise_left_shift_(other) -> Tensor
  754. In-place version of :meth:`~Tensor.bitwise_left_shift`
  755. """,
  756. )
  757. add_docstr_all(
  758. "bitwise_right_shift",
  759. r"""
  760. bitwise_right_shift(other) -> Tensor
  761. See :func:`torch.bitwise_right_shift`
  762. """,
  763. )
  764. add_docstr_all(
  765. "bitwise_right_shift_",
  766. r"""
  767. bitwise_right_shift_(other) -> Tensor
  768. In-place version of :meth:`~Tensor.bitwise_right_shift`
  769. """,
  770. )
  771. add_docstr_all(
  772. "broadcast_to",
  773. r"""
  774. broadcast_to(shape) -> Tensor
  775. See :func:`torch.broadcast_to`.
  776. """,
  777. )
  778. add_docstr_all(
  779. "logical_and",
  780. r"""
  781. logical_and() -> Tensor
  782. See :func:`torch.logical_and`
  783. """,
  784. )
  785. add_docstr_all(
  786. "logical_and_",
  787. r"""
  788. logical_and_() -> Tensor
  789. In-place version of :meth:`~Tensor.logical_and`
  790. """,
  791. )
  792. add_docstr_all(
  793. "logical_not",
  794. r"""
  795. logical_not() -> Tensor
  796. See :func:`torch.logical_not`
  797. """,
  798. )
  799. add_docstr_all(
  800. "logical_not_",
  801. r"""
  802. logical_not_() -> Tensor
  803. In-place version of :meth:`~Tensor.logical_not`
  804. """,
  805. )
  806. add_docstr_all(
  807. "logical_or",
  808. r"""
  809. logical_or() -> Tensor
  810. See :func:`torch.logical_or`
  811. """,
  812. )
  813. add_docstr_all(
  814. "logical_or_",
  815. r"""
  816. logical_or_() -> Tensor
  817. In-place version of :meth:`~Tensor.logical_or`
  818. """,
  819. )
  820. add_docstr_all(
  821. "logical_xor",
  822. r"""
  823. logical_xor() -> Tensor
  824. See :func:`torch.logical_xor`
  825. """,
  826. )
  827. add_docstr_all(
  828. "logical_xor_",
  829. r"""
  830. logical_xor_() -> Tensor
  831. In-place version of :meth:`~Tensor.logical_xor`
  832. """,
  833. )
  834. add_docstr_all(
  835. "bmm",
  836. r"""
  837. bmm(batch2) -> Tensor
  838. See :func:`torch.bmm`
  839. """,
  840. )
  841. add_docstr_all(
  842. "cauchy_",
  843. r"""
  844. cauchy_(median=0, sigma=1, *, generator=None) -> Tensor
  845. Fills the tensor with numbers drawn from the Cauchy distribution:
  846. .. math::
  847. f(x) = \dfrac{1}{\pi} \dfrac{\sigma}{(x - \text{median})^2 + \sigma^2}
  848. .. note::
  849. Sigma (:math:`\sigma`) is used to denote the scale parameter in Cauchy distribution.
  850. """,
  851. )
  852. add_docstr_all(
  853. "ceil",
  854. r"""
  855. ceil() -> Tensor
  856. See :func:`torch.ceil`
  857. """,
  858. )
  859. add_docstr_all(
  860. "ceil_",
  861. r"""
  862. ceil_() -> Tensor
  863. In-place version of :meth:`~Tensor.ceil`
  864. """,
  865. )
  866. add_docstr_all(
  867. "cholesky",
  868. r"""
  869. cholesky(upper=False) -> Tensor
  870. See :func:`torch.cholesky`
  871. """,
  872. )
  873. add_docstr_all(
  874. "cholesky_solve",
  875. r"""
  876. cholesky_solve(input2, upper=False) -> Tensor
  877. See :func:`torch.cholesky_solve`
  878. """,
  879. )
  880. add_docstr_all(
  881. "cholesky_inverse",
  882. r"""
  883. cholesky_inverse(upper=False) -> Tensor
  884. See :func:`torch.cholesky_inverse`
  885. """,
  886. )
  887. add_docstr_all(
  888. "clamp",
  889. r"""
  890. clamp(min=None, max=None) -> Tensor
  891. See :func:`torch.clamp`
  892. """,
  893. )
  894. add_docstr_all(
  895. "clamp_",
  896. r"""
  897. clamp_(min=None, max=None) -> Tensor
  898. In-place version of :meth:`~Tensor.clamp`
  899. """,
  900. )
  901. add_docstr_all(
  902. "clip",
  903. r"""
  904. clip(min=None, max=None) -> Tensor
  905. Alias for :meth:`~Tensor.clamp`.
  906. """,
  907. )
  908. add_docstr_all(
  909. "clip_",
  910. r"""
  911. clip_(min=None, max=None) -> Tensor
  912. Alias for :meth:`~Tensor.clamp_`.
  913. """,
  914. )
  915. add_docstr_all(
  916. "clone",
  917. r"""
  918. clone(*, memory_format=torch.preserve_format) -> Tensor
  919. See :func:`torch.clone`
  920. """.format(
  921. **common_args
  922. ),
  923. )
  924. add_docstr_all(
  925. "coalesce",
  926. r"""
  927. coalesce() -> Tensor
  928. Returns a coalesced copy of :attr:`self` if :attr:`self` is an
  929. :ref:`uncoalesced tensor <sparse-uncoalesced-coo-docs>`.
  930. Returns :attr:`self` if :attr:`self` is a coalesced tensor.
  931. .. warning::
  932. Throws an error if :attr:`self` is not a sparse COO tensor.
  933. """,
  934. )
  935. add_docstr_all(
  936. "contiguous",
  937. r"""
  938. contiguous(memory_format=torch.contiguous_format) -> Tensor
  939. Returns a contiguous in memory tensor containing the same data as :attr:`self` tensor. If
  940. :attr:`self` tensor is already in the specified memory format, this function returns the
  941. :attr:`self` tensor.
  942. Args:
  943. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  944. returned Tensor. Default: ``torch.contiguous_format``.
  945. """,
  946. )
  947. add_docstr_all(
  948. "copy_",
  949. r"""
  950. copy_(src, non_blocking=False) -> Tensor
  951. Copies the elements from :attr:`src` into :attr:`self` tensor and returns
  952. :attr:`self`.
  953. The :attr:`src` tensor must be :ref:`broadcastable <broadcasting-semantics>`
  954. with the :attr:`self` tensor. It may be of a different data type or reside on a
  955. different device.
  956. Args:
  957. src (Tensor): the source tensor to copy from
  958. non_blocking (bool): if ``True`` and this copy is between CPU and GPU,
  959. the copy may occur asynchronously with respect to the host. For other
  960. cases, this argument has no effect.
  961. """,
  962. )
  963. add_docstr_all(
  964. "conj",
  965. r"""
  966. conj() -> Tensor
  967. See :func:`torch.conj`
  968. """,
  969. )
  970. add_docstr_all(
  971. "conj_physical",
  972. r"""
  973. conj_physical() -> Tensor
  974. See :func:`torch.conj_physical`
  975. """,
  976. )
  977. add_docstr_all(
  978. "conj_physical_",
  979. r"""
  980. conj_physical_() -> Tensor
  981. In-place version of :meth:`~Tensor.conj_physical`
  982. """,
  983. )
  984. add_docstr_all(
  985. "resolve_conj",
  986. r"""
  987. resolve_conj() -> Tensor
  988. See :func:`torch.resolve_conj`
  989. """,
  990. )
  991. add_docstr_all(
  992. "resolve_neg",
  993. r"""
  994. resolve_neg() -> Tensor
  995. See :func:`torch.resolve_neg`
  996. """,
  997. )
  998. add_docstr_all(
  999. "copysign",
  1000. r"""
  1001. copysign(other) -> Tensor
  1002. See :func:`torch.copysign`
  1003. """,
  1004. )
  1005. add_docstr_all(
  1006. "copysign_",
  1007. r"""
  1008. copysign_(other) -> Tensor
  1009. In-place version of :meth:`~Tensor.copysign`
  1010. """,
  1011. )
  1012. add_docstr_all(
  1013. "cos",
  1014. r"""
  1015. cos() -> Tensor
  1016. See :func:`torch.cos`
  1017. """,
  1018. )
  1019. add_docstr_all(
  1020. "cos_",
  1021. r"""
  1022. cos_() -> Tensor
  1023. In-place version of :meth:`~Tensor.cos`
  1024. """,
  1025. )
  1026. add_docstr_all(
  1027. "cosh",
  1028. r"""
  1029. cosh() -> Tensor
  1030. See :func:`torch.cosh`
  1031. """,
  1032. )
  1033. add_docstr_all(
  1034. "cosh_",
  1035. r"""
  1036. cosh_() -> Tensor
  1037. In-place version of :meth:`~Tensor.cosh`
  1038. """,
  1039. )
  1040. add_docstr_all(
  1041. "cpu",
  1042. r"""
  1043. cpu(memory_format=torch.preserve_format) -> Tensor
  1044. Returns a copy of this object in CPU memory.
  1045. If this object is already in CPU memory and on the correct device,
  1046. then no copy is performed and the original object is returned.
  1047. Args:
  1048. {memory_format}
  1049. """.format(
  1050. **common_args
  1051. ),
  1052. )
  1053. add_docstr_all(
  1054. "count_nonzero",
  1055. r"""
  1056. count_nonzero(dim=None) -> Tensor
  1057. See :func:`torch.count_nonzero`
  1058. """,
  1059. )
  1060. add_docstr_all(
  1061. "cov",
  1062. r"""
  1063. cov(*, correction=1, fweights=None, aweights=None) -> Tensor
  1064. See :func:`torch.cov`
  1065. """,
  1066. )
  1067. add_docstr_all(
  1068. "corrcoef",
  1069. r"""
  1070. corrcoef() -> Tensor
  1071. See :func:`torch.corrcoef`
  1072. """,
  1073. )
  1074. add_docstr_all(
  1075. "cross",
  1076. r"""
  1077. cross(other, dim=None) -> Tensor
  1078. See :func:`torch.cross`
  1079. """,
  1080. )
  1081. add_docstr_all(
  1082. "cuda",
  1083. r"""
  1084. cuda(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor
  1085. Returns a copy of this object in CUDA memory.
  1086. If this object is already in CUDA memory and on the correct device,
  1087. then no copy is performed and the original object is returned.
  1088. Args:
  1089. device (:class:`torch.device`): The destination GPU device.
  1090. Defaults to the current CUDA device.
  1091. non_blocking (bool): If ``True`` and the source is in pinned memory,
  1092. the copy will be asynchronous with respect to the host.
  1093. Otherwise, the argument has no effect. Default: ``False``.
  1094. {memory_format}
  1095. """.format(
  1096. **common_args
  1097. ),
  1098. )
  1099. add_docstr_all(
  1100. "ipu",
  1101. r"""
  1102. ipu(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor
  1103. Returns a copy of this object in IPU memory.
  1104. If this object is already in IPU memory and on the correct device,
  1105. then no copy is performed and the original object is returned.
  1106. Args:
  1107. device (:class:`torch.device`): The destination IPU device.
  1108. Defaults to the current IPU device.
  1109. non_blocking (bool): If ``True`` and the source is in pinned memory,
  1110. the copy will be asynchronous with respect to the host.
  1111. Otherwise, the argument has no effect. Default: ``False``.
  1112. {memory_format}
  1113. """.format(
  1114. **common_args
  1115. ),
  1116. )
  1117. add_docstr_all(
  1118. "xpu",
  1119. r"""
  1120. xpu(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor
  1121. Returns a copy of this object in XPU memory.
  1122. If this object is already in XPU memory and on the correct device,
  1123. then no copy is performed and the original object is returned.
  1124. Args:
  1125. device (:class:`torch.device`): The destination XPU device.
  1126. Defaults to the current XPU device.
  1127. non_blocking (bool): If ``True`` and the source is in pinned memory,
  1128. the copy will be asynchronous with respect to the host.
  1129. Otherwise, the argument has no effect. Default: ``False``.
  1130. {memory_format}
  1131. """.format(
  1132. **common_args
  1133. ),
  1134. )
  1135. add_docstr_all(
  1136. "logcumsumexp",
  1137. r"""
  1138. logcumsumexp(dim) -> Tensor
  1139. See :func:`torch.logcumsumexp`
  1140. """,
  1141. )
  1142. add_docstr_all(
  1143. "cummax",
  1144. r"""
  1145. cummax(dim) -> (Tensor, Tensor)
  1146. See :func:`torch.cummax`
  1147. """,
  1148. )
  1149. add_docstr_all(
  1150. "cummin",
  1151. r"""
  1152. cummin(dim) -> (Tensor, Tensor)
  1153. See :func:`torch.cummin`
  1154. """,
  1155. )
  1156. add_docstr_all(
  1157. "cumprod",
  1158. r"""
  1159. cumprod(dim, dtype=None) -> Tensor
  1160. See :func:`torch.cumprod`
  1161. """,
  1162. )
  1163. add_docstr_all(
  1164. "cumprod_",
  1165. r"""
  1166. cumprod_(dim, dtype=None) -> Tensor
  1167. In-place version of :meth:`~Tensor.cumprod`
  1168. """,
  1169. )
  1170. add_docstr_all(
  1171. "cumsum",
  1172. r"""
  1173. cumsum(dim, dtype=None) -> Tensor
  1174. See :func:`torch.cumsum`
  1175. """,
  1176. )
  1177. add_docstr_all(
  1178. "cumsum_",
  1179. r"""
  1180. cumsum_(dim, dtype=None) -> Tensor
  1181. In-place version of :meth:`~Tensor.cumsum`
  1182. """,
  1183. )
  1184. add_docstr_all(
  1185. "data_ptr",
  1186. r"""
  1187. data_ptr() -> int
  1188. Returns the address of the first element of :attr:`self` tensor.
  1189. """,
  1190. )
  1191. add_docstr_all(
  1192. "dequantize",
  1193. r"""
  1194. dequantize() -> Tensor
  1195. Given a quantized Tensor, dequantize it and return the dequantized float Tensor.
  1196. """,
  1197. )
  1198. add_docstr_all(
  1199. "dense_dim",
  1200. r"""
  1201. dense_dim() -> int
  1202. Return the number of dense dimensions in a :ref:`sparse tensor <sparse-docs>` :attr:`self`.
  1203. .. note::
  1204. Returns ``len(self.shape)`` if :attr:`self` is not a sparse tensor.
  1205. See also :meth:`Tensor.sparse_dim` and :ref:`hybrid tensors <sparse-hybrid-coo-docs>`.
  1206. """,
  1207. )
  1208. add_docstr_all(
  1209. "diag",
  1210. r"""
  1211. diag(diagonal=0) -> Tensor
  1212. See :func:`torch.diag`
  1213. """,
  1214. )
  1215. add_docstr_all(
  1216. "diag_embed",
  1217. r"""
  1218. diag_embed(offset=0, dim1=-2, dim2=-1) -> Tensor
  1219. See :func:`torch.diag_embed`
  1220. """,
  1221. )
  1222. add_docstr_all(
  1223. "diagflat",
  1224. r"""
  1225. diagflat(offset=0) -> Tensor
  1226. See :func:`torch.diagflat`
  1227. """,
  1228. )
  1229. add_docstr_all(
  1230. "diagonal",
  1231. r"""
  1232. diagonal(offset=0, dim1=0, dim2=1) -> Tensor
  1233. See :func:`torch.diagonal`
  1234. """,
  1235. )
  1236. add_docstr_all(
  1237. "diagonal_scatter",
  1238. r"""
  1239. diagonal_scatter(src, offset=0, dim1=0, dim2=1) -> Tensor
  1240. See :func:`torch.diagonal_scatter`
  1241. """,
  1242. )
  1243. add_docstr_all(
  1244. "as_strided_scatter",
  1245. r"""
  1246. as_strided_scatter(src, size, stride, storage_offset=None) -> Tensor
  1247. See :func:`torch.as_strided_scatter`
  1248. """,
  1249. )
  1250. add_docstr_all(
  1251. "fill_diagonal_",
  1252. r"""
  1253. fill_diagonal_(fill_value, wrap=False) -> Tensor
  1254. Fill the main diagonal of a tensor that has at least 2-dimensions.
  1255. When dims>2, all dimensions of input must be of equal length.
  1256. This function modifies the input tensor in-place, and returns the input tensor.
  1257. Arguments:
  1258. fill_value (Scalar): the fill value
  1259. wrap (bool): the diagonal 'wrapped' after N columns for tall matrices.
  1260. Example::
  1261. >>> a = torch.zeros(3, 3)
  1262. >>> a.fill_diagonal_(5)
  1263. tensor([[5., 0., 0.],
  1264. [0., 5., 0.],
  1265. [0., 0., 5.]])
  1266. >>> b = torch.zeros(7, 3)
  1267. >>> b.fill_diagonal_(5)
  1268. tensor([[5., 0., 0.],
  1269. [0., 5., 0.],
  1270. [0., 0., 5.],
  1271. [0., 0., 0.],
  1272. [0., 0., 0.],
  1273. [0., 0., 0.],
  1274. [0., 0., 0.]])
  1275. >>> c = torch.zeros(7, 3)
  1276. >>> c.fill_diagonal_(5, wrap=True)
  1277. tensor([[5., 0., 0.],
  1278. [0., 5., 0.],
  1279. [0., 0., 5.],
  1280. [0., 0., 0.],
  1281. [5., 0., 0.],
  1282. [0., 5., 0.],
  1283. [0., 0., 5.]])
  1284. """,
  1285. )
  1286. add_docstr_all(
  1287. "floor_divide",
  1288. r"""
  1289. floor_divide(value) -> Tensor
  1290. See :func:`torch.floor_divide`
  1291. """,
  1292. )
  1293. add_docstr_all(
  1294. "floor_divide_",
  1295. r"""
  1296. floor_divide_(value) -> Tensor
  1297. In-place version of :meth:`~Tensor.floor_divide`
  1298. """,
  1299. )
  1300. add_docstr_all(
  1301. "diff",
  1302. r"""
  1303. diff(n=1, dim=-1, prepend=None, append=None) -> Tensor
  1304. See :func:`torch.diff`
  1305. """,
  1306. )
  1307. add_docstr_all(
  1308. "digamma",
  1309. r"""
  1310. digamma() -> Tensor
  1311. See :func:`torch.digamma`
  1312. """,
  1313. )
  1314. add_docstr_all(
  1315. "digamma_",
  1316. r"""
  1317. digamma_() -> Tensor
  1318. In-place version of :meth:`~Tensor.digamma`
  1319. """,
  1320. )
  1321. add_docstr_all(
  1322. "dim",
  1323. r"""
  1324. dim() -> int
  1325. Returns the number of dimensions of :attr:`self` tensor.
  1326. """,
  1327. )
  1328. add_docstr_all(
  1329. "dist",
  1330. r"""
  1331. dist(other, p=2) -> Tensor
  1332. See :func:`torch.dist`
  1333. """,
  1334. )
  1335. add_docstr_all(
  1336. "div",
  1337. r"""
  1338. div(value, *, rounding_mode=None) -> Tensor
  1339. See :func:`torch.div`
  1340. """,
  1341. )
  1342. add_docstr_all(
  1343. "div_",
  1344. r"""
  1345. div_(value, *, rounding_mode=None) -> Tensor
  1346. In-place version of :meth:`~Tensor.div`
  1347. """,
  1348. )
  1349. add_docstr_all(
  1350. "divide",
  1351. r"""
  1352. divide(value, *, rounding_mode=None) -> Tensor
  1353. See :func:`torch.divide`
  1354. """,
  1355. )
  1356. add_docstr_all(
  1357. "divide_",
  1358. r"""
  1359. divide_(value, *, rounding_mode=None) -> Tensor
  1360. In-place version of :meth:`~Tensor.divide`
  1361. """,
  1362. )
  1363. add_docstr_all(
  1364. "dot",
  1365. r"""
  1366. dot(other) -> Tensor
  1367. See :func:`torch.dot`
  1368. """,
  1369. )
  1370. add_docstr_all(
  1371. "element_size",
  1372. r"""
  1373. element_size() -> int
  1374. Returns the size in bytes of an individual element.
  1375. Example::
  1376. >>> torch.tensor([]).element_size()
  1377. 4
  1378. >>> torch.tensor([], dtype=torch.uint8).element_size()
  1379. 1
  1380. """,
  1381. )
  1382. add_docstr_all(
  1383. "eq",
  1384. r"""
  1385. eq(other) -> Tensor
  1386. See :func:`torch.eq`
  1387. """,
  1388. )
  1389. add_docstr_all(
  1390. "eq_",
  1391. r"""
  1392. eq_(other) -> Tensor
  1393. In-place version of :meth:`~Tensor.eq`
  1394. """,
  1395. )
  1396. add_docstr_all(
  1397. "equal",
  1398. r"""
  1399. equal(other) -> bool
  1400. See :func:`torch.equal`
  1401. """,
  1402. )
  1403. add_docstr_all(
  1404. "erf",
  1405. r"""
  1406. erf() -> Tensor
  1407. See :func:`torch.erf`
  1408. """,
  1409. )
  1410. add_docstr_all(
  1411. "erf_",
  1412. r"""
  1413. erf_() -> Tensor
  1414. In-place version of :meth:`~Tensor.erf`
  1415. """,
  1416. )
  1417. add_docstr_all(
  1418. "erfc",
  1419. r"""
  1420. erfc() -> Tensor
  1421. See :func:`torch.erfc`
  1422. """,
  1423. )
  1424. add_docstr_all(
  1425. "erfc_",
  1426. r"""
  1427. erfc_() -> Tensor
  1428. In-place version of :meth:`~Tensor.erfc`
  1429. """,
  1430. )
  1431. add_docstr_all(
  1432. "erfinv",
  1433. r"""
  1434. erfinv() -> Tensor
  1435. See :func:`torch.erfinv`
  1436. """,
  1437. )
  1438. add_docstr_all(
  1439. "erfinv_",
  1440. r"""
  1441. erfinv_() -> Tensor
  1442. In-place version of :meth:`~Tensor.erfinv`
  1443. """,
  1444. )
  1445. add_docstr_all(
  1446. "exp",
  1447. r"""
  1448. exp() -> Tensor
  1449. See :func:`torch.exp`
  1450. """,
  1451. )
  1452. add_docstr_all(
  1453. "exp_",
  1454. r"""
  1455. exp_() -> Tensor
  1456. In-place version of :meth:`~Tensor.exp`
  1457. """,
  1458. )
  1459. add_docstr_all(
  1460. "exp2",
  1461. r"""
  1462. exp2() -> Tensor
  1463. See :func:`torch.exp2`
  1464. """,
  1465. )
  1466. add_docstr_all(
  1467. "exp2_",
  1468. r"""
  1469. exp2_() -> Tensor
  1470. In-place version of :meth:`~Tensor.exp2`
  1471. """,
  1472. )
  1473. add_docstr_all(
  1474. "expm1",
  1475. r"""
  1476. expm1() -> Tensor
  1477. See :func:`torch.expm1`
  1478. """,
  1479. )
  1480. add_docstr_all(
  1481. "expm1_",
  1482. r"""
  1483. expm1_() -> Tensor
  1484. In-place version of :meth:`~Tensor.expm1`
  1485. """,
  1486. )
  1487. add_docstr_all(
  1488. "exponential_",
  1489. r"""
  1490. exponential_(lambd=1, *, generator=None) -> Tensor
  1491. Fills :attr:`self` tensor with elements drawn from the PDF (probability density function):
  1492. .. math::
  1493. f(x) = \lambda e^{-\lambda x}, x > 0
  1494. .. note::
  1495. In probability theory, exponential distribution is supported on interval [0, :math:`\inf`) (i.e., :math:`x >= 0`)
  1496. implying that zero can be sampled from the exponential distribution.
  1497. However, :func:`torch.Tensor.exponential_` does not sample zero,
  1498. which means that its actual support is the interval (0, :math:`\inf`).
  1499. Note that :func:`torch.distributions.exponential.Exponential` is supported on the interval [0, :math:`\inf`) and can sample zero.
  1500. """,
  1501. )
  1502. add_docstr_all(
  1503. "fill_",
  1504. r"""
  1505. fill_(value) -> Tensor
  1506. Fills :attr:`self` tensor with the specified value.
  1507. """,
  1508. )
  1509. add_docstr_all(
  1510. "floor",
  1511. r"""
  1512. floor() -> Tensor
  1513. See :func:`torch.floor`
  1514. """,
  1515. )
  1516. add_docstr_all(
  1517. "flip",
  1518. r"""
  1519. flip(dims) -> Tensor
  1520. See :func:`torch.flip`
  1521. """,
  1522. )
  1523. add_docstr_all(
  1524. "fliplr",
  1525. r"""
  1526. fliplr() -> Tensor
  1527. See :func:`torch.fliplr`
  1528. """,
  1529. )
  1530. add_docstr_all(
  1531. "flipud",
  1532. r"""
  1533. flipud() -> Tensor
  1534. See :func:`torch.flipud`
  1535. """,
  1536. )
  1537. add_docstr_all(
  1538. "roll",
  1539. r"""
  1540. roll(shifts, dims) -> Tensor
  1541. See :func:`torch.roll`
  1542. """,
  1543. )
  1544. add_docstr_all(
  1545. "floor_",
  1546. r"""
  1547. floor_() -> Tensor
  1548. In-place version of :meth:`~Tensor.floor`
  1549. """,
  1550. )
  1551. add_docstr_all(
  1552. "fmod",
  1553. r"""
  1554. fmod(divisor) -> Tensor
  1555. See :func:`torch.fmod`
  1556. """,
  1557. )
  1558. add_docstr_all(
  1559. "fmod_",
  1560. r"""
  1561. fmod_(divisor) -> Tensor
  1562. In-place version of :meth:`~Tensor.fmod`
  1563. """,
  1564. )
  1565. add_docstr_all(
  1566. "frac",
  1567. r"""
  1568. frac() -> Tensor
  1569. See :func:`torch.frac`
  1570. """,
  1571. )
  1572. add_docstr_all(
  1573. "frac_",
  1574. r"""
  1575. frac_() -> Tensor
  1576. In-place version of :meth:`~Tensor.frac`
  1577. """,
  1578. )
  1579. add_docstr_all(
  1580. "frexp",
  1581. r"""
  1582. frexp(input) -> (Tensor mantissa, Tensor exponent)
  1583. See :func:`torch.frexp`
  1584. """,
  1585. )
  1586. add_docstr_all(
  1587. "flatten",
  1588. r"""
  1589. flatten(start_dim=0, end_dim=-1) -> Tensor
  1590. See :func:`torch.flatten`
  1591. """,
  1592. )
  1593. add_docstr_all(
  1594. "gather",
  1595. r"""
  1596. gather(dim, index) -> Tensor
  1597. See :func:`torch.gather`
  1598. """,
  1599. )
  1600. add_docstr_all(
  1601. "gcd",
  1602. r"""
  1603. gcd(other) -> Tensor
  1604. See :func:`torch.gcd`
  1605. """,
  1606. )
  1607. add_docstr_all(
  1608. "gcd_",
  1609. r"""
  1610. gcd_(other) -> Tensor
  1611. In-place version of :meth:`~Tensor.gcd`
  1612. """,
  1613. )
  1614. add_docstr_all(
  1615. "ge",
  1616. r"""
  1617. ge(other) -> Tensor
  1618. See :func:`torch.ge`.
  1619. """,
  1620. )
  1621. add_docstr_all(
  1622. "ge_",
  1623. r"""
  1624. ge_(other) -> Tensor
  1625. In-place version of :meth:`~Tensor.ge`.
  1626. """,
  1627. )
  1628. add_docstr_all(
  1629. "greater_equal",
  1630. r"""
  1631. greater_equal(other) -> Tensor
  1632. See :func:`torch.greater_equal`.
  1633. """,
  1634. )
  1635. add_docstr_all(
  1636. "greater_equal_",
  1637. r"""
  1638. greater_equal_(other) -> Tensor
  1639. In-place version of :meth:`~Tensor.greater_equal`.
  1640. """,
  1641. )
  1642. add_docstr_all(
  1643. "geometric_",
  1644. r"""
  1645. geometric_(p, *, generator=None) -> Tensor
  1646. Fills :attr:`self` tensor with elements drawn from the geometric distribution:
  1647. .. math::
  1648. P(X=k) = (1 - p)^{k - 1} p, k = 1, 2, ...
  1649. .. note::
  1650. :func:`torch.Tensor.geometric_` `k`-th trial is the first success hence draws samples in :math:`\{1, 2, \ldots\}`, whereas
  1651. :func:`torch.distributions.geometric.Geometric` :math:`(k+1)`-th trial is the first success
  1652. hence draws samples in :math:`\{0, 1, \ldots\}`.
  1653. """,
  1654. )
  1655. add_docstr_all(
  1656. "geqrf",
  1657. r"""
  1658. geqrf() -> (Tensor, Tensor)
  1659. See :func:`torch.geqrf`
  1660. """,
  1661. )
  1662. add_docstr_all(
  1663. "ger",
  1664. r"""
  1665. ger(vec2) -> Tensor
  1666. See :func:`torch.ger`
  1667. """,
  1668. )
  1669. add_docstr_all(
  1670. "inner",
  1671. r"""
  1672. inner(other) -> Tensor
  1673. See :func:`torch.inner`.
  1674. """,
  1675. )
  1676. add_docstr_all(
  1677. "outer",
  1678. r"""
  1679. outer(vec2) -> Tensor
  1680. See :func:`torch.outer`.
  1681. """,
  1682. )
  1683. add_docstr_all(
  1684. "hypot",
  1685. r"""
  1686. hypot(other) -> Tensor
  1687. See :func:`torch.hypot`
  1688. """,
  1689. )
  1690. add_docstr_all(
  1691. "hypot_",
  1692. r"""
  1693. hypot_(other) -> Tensor
  1694. In-place version of :meth:`~Tensor.hypot`
  1695. """,
  1696. )
  1697. add_docstr_all(
  1698. "i0",
  1699. r"""
  1700. i0() -> Tensor
  1701. See :func:`torch.i0`
  1702. """,
  1703. )
  1704. add_docstr_all(
  1705. "i0_",
  1706. r"""
  1707. i0_() -> Tensor
  1708. In-place version of :meth:`~Tensor.i0`
  1709. """,
  1710. )
  1711. add_docstr_all(
  1712. "igamma",
  1713. r"""
  1714. igamma(other) -> Tensor
  1715. See :func:`torch.igamma`
  1716. """,
  1717. )
  1718. add_docstr_all(
  1719. "igamma_",
  1720. r"""
  1721. igamma_(other) -> Tensor
  1722. In-place version of :meth:`~Tensor.igamma`
  1723. """,
  1724. )
  1725. add_docstr_all(
  1726. "igammac",
  1727. r"""
  1728. igammac(other) -> Tensor
  1729. See :func:`torch.igammac`
  1730. """,
  1731. )
  1732. add_docstr_all(
  1733. "igammac_",
  1734. r"""
  1735. igammac_(other) -> Tensor
  1736. In-place version of :meth:`~Tensor.igammac`
  1737. """,
  1738. )
  1739. add_docstr_all(
  1740. "indices",
  1741. r"""
  1742. indices() -> Tensor
  1743. Return the indices tensor of a :ref:`sparse COO tensor <sparse-coo-docs>`.
  1744. .. warning::
  1745. Throws an error if :attr:`self` is not a sparse COO tensor.
  1746. See also :meth:`Tensor.values`.
  1747. .. note::
  1748. This method can only be called on a coalesced sparse tensor. See
  1749. :meth:`Tensor.coalesce` for details.
  1750. """,
  1751. )
  1752. add_docstr_all(
  1753. "get_device",
  1754. r"""
  1755. get_device() -> Device ordinal (Integer)
  1756. For CUDA tensors, this function returns the device ordinal of the GPU on which the tensor resides.
  1757. For CPU tensors, this function returns `-1`.
  1758. Example::
  1759. >>> x = torch.randn(3, 4, 5, device='cuda:0')
  1760. >>> x.get_device()
  1761. 0
  1762. >>> x.cpu().get_device()
  1763. -1
  1764. """,
  1765. )
  1766. add_docstr_all(
  1767. "values",
  1768. r"""
  1769. values() -> Tensor
  1770. Return the values tensor of a :ref:`sparse COO tensor <sparse-coo-docs>`.
  1771. .. warning::
  1772. Throws an error if :attr:`self` is not a sparse COO tensor.
  1773. See also :meth:`Tensor.indices`.
  1774. .. note::
  1775. This method can only be called on a coalesced sparse tensor. See
  1776. :meth:`Tensor.coalesce` for details.
  1777. """,
  1778. )
  1779. add_docstr_all(
  1780. "gt",
  1781. r"""
  1782. gt(other) -> Tensor
  1783. See :func:`torch.gt`.
  1784. """,
  1785. )
  1786. add_docstr_all(
  1787. "gt_",
  1788. r"""
  1789. gt_(other) -> Tensor
  1790. In-place version of :meth:`~Tensor.gt`.
  1791. """,
  1792. )
  1793. add_docstr_all(
  1794. "greater",
  1795. r"""
  1796. greater(other) -> Tensor
  1797. See :func:`torch.greater`.
  1798. """,
  1799. )
  1800. add_docstr_all(
  1801. "greater_",
  1802. r"""
  1803. greater_(other) -> Tensor
  1804. In-place version of :meth:`~Tensor.greater`.
  1805. """,
  1806. )
  1807. add_docstr_all(
  1808. "has_names",
  1809. r"""
  1810. Is ``True`` if any of this tensor's dimensions are named. Otherwise, is ``False``.
  1811. """,
  1812. )
  1813. add_docstr_all(
  1814. "hardshrink",
  1815. r"""
  1816. hardshrink(lambd=0.5) -> Tensor
  1817. See :func:`torch.nn.functional.hardshrink`
  1818. """,
  1819. )
  1820. add_docstr_all(
  1821. "heaviside",
  1822. r"""
  1823. heaviside(values) -> Tensor
  1824. See :func:`torch.heaviside`
  1825. """,
  1826. )
  1827. add_docstr_all(
  1828. "heaviside_",
  1829. r"""
  1830. heaviside_(values) -> Tensor
  1831. In-place version of :meth:`~Tensor.heaviside`
  1832. """,
  1833. )
  1834. add_docstr_all(
  1835. "histc",
  1836. r"""
  1837. histc(bins=100, min=0, max=0) -> Tensor
  1838. See :func:`torch.histc`
  1839. """,
  1840. )
  1841. add_docstr_all(
  1842. "histogram",
  1843. r"""
  1844. histogram(input, bins, *, range=None, weight=None, density=False) -> (Tensor, Tensor)
  1845. See :func:`torch.histogram`
  1846. """,
  1847. )
  1848. add_docstr_all(
  1849. "index_add_",
  1850. r"""
  1851. index_add_(dim, index, source, *, alpha=1) -> Tensor
  1852. Accumulate the elements of :attr:`alpha` times ``source`` into the :attr:`self`
  1853. tensor by adding to the indices in the order given in :attr:`index`. For example,
  1854. if ``dim == 0``, ``index[i] == j``, and ``alpha=-1``, then the ``i``\ th row of
  1855. ``source`` is subtracted from the ``j``\ th row of :attr:`self`.
  1856. The :attr:`dim`\ th dimension of ``source`` must have the same size as the
  1857. length of :attr:`index` (which must be a vector), and all other dimensions must
  1858. match :attr:`self`, or an error will be raised.
  1859. For a 3-D tensor the output is given as::
  1860. self[index[i], :, :] += alpha * src[i, :, :] # if dim == 0
  1861. self[:, index[i], :] += alpha * src[:, i, :] # if dim == 1
  1862. self[:, :, index[i]] += alpha * src[:, :, i] # if dim == 2
  1863. Note:
  1864. {forward_reproducibility_note}
  1865. Args:
  1866. dim (int): dimension along which to index
  1867. index (Tensor): indices of ``source`` to select from,
  1868. should have dtype either `torch.int64` or `torch.int32`
  1869. source (Tensor): the tensor containing values to add
  1870. Keyword args:
  1871. alpha (Number): the scalar multiplier for ``source``
  1872. Example::
  1873. >>> x = torch.ones(5, 3)
  1874. >>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
  1875. >>> index = torch.tensor([0, 4, 2])
  1876. >>> x.index_add_(0, index, t)
  1877. tensor([[ 2., 3., 4.],
  1878. [ 1., 1., 1.],
  1879. [ 8., 9., 10.],
  1880. [ 1., 1., 1.],
  1881. [ 5., 6., 7.]])
  1882. >>> x.index_add_(0, index, t, alpha=-1)
  1883. tensor([[ 1., 1., 1.],
  1884. [ 1., 1., 1.],
  1885. [ 1., 1., 1.],
  1886. [ 1., 1., 1.],
  1887. [ 1., 1., 1.]])
  1888. """.format(
  1889. **reproducibility_notes
  1890. ),
  1891. )
  1892. add_docstr_all(
  1893. "index_copy_",
  1894. r"""
  1895. index_copy_(dim, index, tensor) -> Tensor
  1896. Copies the elements of :attr:`tensor` into the :attr:`self` tensor by selecting
  1897. the indices in the order given in :attr:`index`. For example, if ``dim == 0``
  1898. and ``index[i] == j``, then the ``i``\ th row of :attr:`tensor` is copied to the
  1899. ``j``\ th row of :attr:`self`.
  1900. The :attr:`dim`\ th dimension of :attr:`tensor` must have the same size as the
  1901. length of :attr:`index` (which must be a vector), and all other dimensions must
  1902. match :attr:`self`, or an error will be raised.
  1903. .. note::
  1904. If :attr:`index` contains duplicate entries, multiple elements from
  1905. :attr:`tensor` will be copied to the same index of :attr:`self`. The result
  1906. is nondeterministic since it depends on which copy occurs last.
  1907. Args:
  1908. dim (int): dimension along which to index
  1909. index (LongTensor): indices of :attr:`tensor` to select from
  1910. tensor (Tensor): the tensor containing values to copy
  1911. Example::
  1912. >>> x = torch.zeros(5, 3)
  1913. >>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
  1914. >>> index = torch.tensor([0, 4, 2])
  1915. >>> x.index_copy_(0, index, t)
  1916. tensor([[ 1., 2., 3.],
  1917. [ 0., 0., 0.],
  1918. [ 7., 8., 9.],
  1919. [ 0., 0., 0.],
  1920. [ 4., 5., 6.]])
  1921. """,
  1922. )
  1923. add_docstr_all(
  1924. "index_fill_",
  1925. r"""
  1926. index_fill_(dim, index, value) -> Tensor
  1927. Fills the elements of the :attr:`self` tensor with value :attr:`value` by
  1928. selecting the indices in the order given in :attr:`index`.
  1929. Args:
  1930. dim (int): dimension along which to index
  1931. index (LongTensor): indices of :attr:`self` tensor to fill in
  1932. value (float): the value to fill with
  1933. Example::
  1934. >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
  1935. >>> index = torch.tensor([0, 2])
  1936. >>> x.index_fill_(1, index, -1)
  1937. tensor([[-1., 2., -1.],
  1938. [-1., 5., -1.],
  1939. [-1., 8., -1.]])
  1940. """,
  1941. )
  1942. add_docstr_all(
  1943. "index_put_",
  1944. r"""
  1945. index_put_(indices, values, accumulate=False) -> Tensor
  1946. Puts values from the tensor :attr:`values` into the tensor :attr:`self` using
  1947. the indices specified in :attr:`indices` (which is a tuple of Tensors). The
  1948. expression ``tensor.index_put_(indices, values)`` is equivalent to
  1949. ``tensor[indices] = values``. Returns :attr:`self`.
  1950. If :attr:`accumulate` is ``True``, the elements in :attr:`values` are added to
  1951. :attr:`self`. If accumulate is ``False``, the behavior is undefined if indices
  1952. contain duplicate elements.
  1953. Args:
  1954. indices (tuple of LongTensor): tensors used to index into `self`.
  1955. values (Tensor): tensor of same dtype as `self`.
  1956. accumulate (bool): whether to accumulate into self
  1957. """,
  1958. )
  1959. add_docstr_all(
  1960. "index_put",
  1961. r"""
  1962. index_put(indices, values, accumulate=False) -> Tensor
  1963. Out-place version of :meth:`~Tensor.index_put_`.
  1964. """,
  1965. )
  1966. add_docstr_all(
  1967. "index_reduce_",
  1968. r"""
  1969. index_reduce_(dim, index, source, reduce, *, include_self=True) -> Tensor
  1970. Accumulate the elements of ``source`` into the :attr:`self`
  1971. tensor by accumulating to the indices in the order given in :attr:`index`
  1972. using the reduction given by the ``reduce`` argument. For example, if ``dim == 0``,
  1973. ``index[i] == j``, ``reduce == prod`` and ``include_self == True`` then the ``i``\ th
  1974. row of ``source`` is multiplied by the ``j``\ th row of :attr:`self`. If
  1975. :obj:`include_self="True"`, the values in the :attr:`self` tensor are included
  1976. in the reduction, otherwise, rows in the :attr:`self` tensor that are accumulated
  1977. to are treated as if they were filled with the reduction identites.
  1978. The :attr:`dim`\ th dimension of ``source`` must have the same size as the
  1979. length of :attr:`index` (which must be a vector), and all other dimensions must
  1980. match :attr:`self`, or an error will be raised.
  1981. For a 3-D tensor with :obj:`reduce="prod"` and :obj:`include_self=True` the
  1982. output is given as::
  1983. self[index[i], :, :] *= src[i, :, :] # if dim == 0
  1984. self[:, index[i], :] *= src[:, i, :] # if dim == 1
  1985. self[:, :, index[i]] *= src[:, :, i] # if dim == 2
  1986. Note:
  1987. {forward_reproducibility_note}
  1988. .. note::
  1989. This function only supports floating point tensors.
  1990. .. warning::
  1991. This function is in beta and may change in the near future.
  1992. Args:
  1993. dim (int): dimension along which to index
  1994. index (Tensor): indices of ``source`` to select from,
  1995. should have dtype either `torch.int64` or `torch.int32`
  1996. source (FloatTensor): the tensor containing values to accumulate
  1997. reduce (str): the reduction operation to apply
  1998. (:obj:`"prod"`, :obj:`"mean"`, :obj:`"amax"`, :obj:`"amin"`)
  1999. Keyword args:
  2000. include_self (bool): whether the elements from the ``self`` tensor are
  2001. included in the reduction
  2002. Example::
  2003. >>> x = torch.empty(5, 3).fill_(2)
  2004. >>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]], dtype=torch.float)
  2005. >>> index = torch.tensor([0, 4, 2, 0])
  2006. >>> x.index_reduce_(0, index, t, 'prod')
  2007. tensor([[20., 44., 72.],
  2008. [ 2., 2., 2.],
  2009. [14., 16., 18.],
  2010. [ 2., 2., 2.],
  2011. [ 8., 10., 12.]])
  2012. >>> x = torch.empty(5, 3).fill_(2)
  2013. >>> x.index_reduce_(0, index, t, 'prod', include_self=False)
  2014. tensor([[10., 22., 36.],
  2015. [ 2., 2., 2.],
  2016. [ 7., 8., 9.],
  2017. [ 2., 2., 2.],
  2018. [ 4., 5., 6.]])
  2019. """.format(
  2020. **reproducibility_notes
  2021. ),
  2022. )
  2023. add_docstr_all(
  2024. "index_select",
  2025. r"""
  2026. index_select(dim, index) -> Tensor
  2027. See :func:`torch.index_select`
  2028. """,
  2029. )
  2030. add_docstr_all(
  2031. "sparse_mask",
  2032. r"""
  2033. sparse_mask(mask) -> Tensor
  2034. Returns a new :ref:`sparse tensor <sparse-docs>` with values from a
  2035. strided tensor :attr:`self` filtered by the indices of the sparse
  2036. tensor :attr:`mask`. The values of :attr:`mask` sparse tensor are
  2037. ignored. :attr:`self` and :attr:`mask` tensors must have the same
  2038. shape.
  2039. .. note::
  2040. The returned sparse tensor might contain duplicate values if :attr:`mask`
  2041. is not coalesced. It is therefore advisable to pass ``mask.coalesce()``
  2042. if such behavior is not desired.
  2043. .. note::
  2044. The returned sparse tensor has the same indices as the sparse tensor
  2045. :attr:`mask`, even when the corresponding values in :attr:`self` are
  2046. zeros.
  2047. Args:
  2048. mask (Tensor): a sparse tensor whose indices are used as a filter
  2049. Example::
  2050. >>> nse = 5
  2051. >>> dims = (5, 5, 2, 2)
  2052. >>> I = torch.cat([torch.randint(0, dims[0], size=(nse,)),
  2053. ... torch.randint(0, dims[1], size=(nse,))], 0).reshape(2, nse)
  2054. >>> V = torch.randn(nse, dims[2], dims[3])
  2055. >>> S = torch.sparse_coo_tensor(I, V, dims).coalesce()
  2056. >>> D = torch.randn(dims)
  2057. >>> D.sparse_mask(S)
  2058. tensor(indices=tensor([[0, 0, 0, 2],
  2059. [0, 1, 4, 3]]),
  2060. values=tensor([[[ 1.6550, 0.2397],
  2061. [-0.1611, -0.0779]],
  2062. [[ 0.2326, -1.0558],
  2063. [ 1.4711, 1.9678]],
  2064. [[-0.5138, -0.0411],
  2065. [ 1.9417, 0.5158]],
  2066. [[ 0.0793, 0.0036],
  2067. [-0.2569, -0.1055]]]),
  2068. size=(5, 5, 2, 2), nnz=4, layout=torch.sparse_coo)
  2069. """,
  2070. )
  2071. add_docstr_all(
  2072. "inverse",
  2073. r"""
  2074. inverse() -> Tensor
  2075. See :func:`torch.inverse`
  2076. """,
  2077. )
  2078. add_docstr_all(
  2079. "isnan",
  2080. r"""
  2081. isnan() -> Tensor
  2082. See :func:`torch.isnan`
  2083. """,
  2084. )
  2085. add_docstr_all(
  2086. "isinf",
  2087. r"""
  2088. isinf() -> Tensor
  2089. See :func:`torch.isinf`
  2090. """,
  2091. )
  2092. add_docstr_all(
  2093. "isposinf",
  2094. r"""
  2095. isposinf() -> Tensor
  2096. See :func:`torch.isposinf`
  2097. """,
  2098. )
  2099. add_docstr_all(
  2100. "isneginf",
  2101. r"""
  2102. isneginf() -> Tensor
  2103. See :func:`torch.isneginf`
  2104. """,
  2105. )
  2106. add_docstr_all(
  2107. "isfinite",
  2108. r"""
  2109. isfinite() -> Tensor
  2110. See :func:`torch.isfinite`
  2111. """,
  2112. )
  2113. add_docstr_all(
  2114. "isclose",
  2115. r"""
  2116. isclose(other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor
  2117. See :func:`torch.isclose`
  2118. """,
  2119. )
  2120. add_docstr_all(
  2121. "isreal",
  2122. r"""
  2123. isreal() -> Tensor
  2124. See :func:`torch.isreal`
  2125. """,
  2126. )
  2127. add_docstr_all(
  2128. "is_coalesced",
  2129. r"""
  2130. is_coalesced() -> bool
  2131. Returns ``True`` if :attr:`self` is a :ref:`sparse COO tensor
  2132. <sparse-coo-docs>` that is coalesced, ``False`` otherwise.
  2133. .. warning::
  2134. Throws an error if :attr:`self` is not a sparse COO tensor.
  2135. See :meth:`coalesce` and :ref:`uncoalesced tensors <sparse-uncoalesced-coo-docs>`.
  2136. """,
  2137. )
  2138. add_docstr_all(
  2139. "is_contiguous",
  2140. r"""
  2141. is_contiguous(memory_format=torch.contiguous_format) -> bool
  2142. Returns True if :attr:`self` tensor is contiguous in memory in the order specified
  2143. by memory format.
  2144. Args:
  2145. memory_format (:class:`torch.memory_format`, optional): Specifies memory allocation
  2146. order. Default: ``torch.contiguous_format``.
  2147. """,
  2148. )
  2149. add_docstr_all(
  2150. "is_pinned",
  2151. r"""
  2152. Returns true if this tensor resides in pinned memory.
  2153. """,
  2154. )
  2155. add_docstr_all(
  2156. "is_floating_point",
  2157. r"""
  2158. is_floating_point() -> bool
  2159. Returns True if the data type of :attr:`self` is a floating point data type.
  2160. """,
  2161. )
  2162. add_docstr_all(
  2163. "is_complex",
  2164. r"""
  2165. is_complex() -> bool
  2166. Returns True if the data type of :attr:`self` is a complex data type.
  2167. """,
  2168. )
  2169. add_docstr_all(
  2170. "is_inference",
  2171. r"""
  2172. is_inference() -> bool
  2173. See :func:`torch.is_inference`
  2174. """,
  2175. )
  2176. add_docstr_all(
  2177. "is_conj",
  2178. r"""
  2179. is_conj() -> bool
  2180. Returns True if the conjugate bit of :attr:`self` is set to true.
  2181. """,
  2182. )
  2183. add_docstr_all(
  2184. "is_neg",
  2185. r"""
  2186. is_neg() -> bool
  2187. Returns True if the negative bit of :attr:`self` is set to true.
  2188. """,
  2189. )
  2190. add_docstr_all(
  2191. "is_signed",
  2192. r"""
  2193. is_signed() -> bool
  2194. Returns True if the data type of :attr:`self` is a signed data type.
  2195. """,
  2196. )
  2197. add_docstr_all(
  2198. "is_set_to",
  2199. r"""
  2200. is_set_to(tensor) -> bool
  2201. Returns True if both tensors are pointing to the exact same memory (same
  2202. storage, offset, size and stride).
  2203. """,
  2204. )
  2205. add_docstr_all(
  2206. "item",
  2207. r"""
  2208. item() -> number
  2209. Returns the value of this tensor as a standard Python number. This only works
  2210. for tensors with one element. For other cases, see :meth:`~Tensor.tolist`.
  2211. This operation is not differentiable.
  2212. Example::
  2213. >>> x = torch.tensor([1.0])
  2214. >>> x.item()
  2215. 1.0
  2216. """,
  2217. )
  2218. add_docstr_all(
  2219. "kron",
  2220. r"""
  2221. kron(other) -> Tensor
  2222. See :func:`torch.kron`
  2223. """,
  2224. )
  2225. add_docstr_all(
  2226. "kthvalue",
  2227. r"""
  2228. kthvalue(k, dim=None, keepdim=False) -> (Tensor, LongTensor)
  2229. See :func:`torch.kthvalue`
  2230. """,
  2231. )
  2232. add_docstr_all(
  2233. "ldexp",
  2234. r"""
  2235. ldexp(other) -> Tensor
  2236. See :func:`torch.ldexp`
  2237. """,
  2238. )
  2239. add_docstr_all(
  2240. "ldexp_",
  2241. r"""
  2242. ldexp_(other) -> Tensor
  2243. In-place version of :meth:`~Tensor.ldexp`
  2244. """,
  2245. )
  2246. add_docstr_all(
  2247. "lcm",
  2248. r"""
  2249. lcm(other) -> Tensor
  2250. See :func:`torch.lcm`
  2251. """,
  2252. )
  2253. add_docstr_all(
  2254. "lcm_",
  2255. r"""
  2256. lcm_(other) -> Tensor
  2257. In-place version of :meth:`~Tensor.lcm`
  2258. """,
  2259. )
  2260. add_docstr_all(
  2261. "le",
  2262. r"""
  2263. le(other) -> Tensor
  2264. See :func:`torch.le`.
  2265. """,
  2266. )
  2267. add_docstr_all(
  2268. "le_",
  2269. r"""
  2270. le_(other) -> Tensor
  2271. In-place version of :meth:`~Tensor.le`.
  2272. """,
  2273. )
  2274. add_docstr_all(
  2275. "less_equal",
  2276. r"""
  2277. less_equal(other) -> Tensor
  2278. See :func:`torch.less_equal`.
  2279. """,
  2280. )
  2281. add_docstr_all(
  2282. "less_equal_",
  2283. r"""
  2284. less_equal_(other) -> Tensor
  2285. In-place version of :meth:`~Tensor.less_equal`.
  2286. """,
  2287. )
  2288. add_docstr_all(
  2289. "lerp",
  2290. r"""
  2291. lerp(end, weight) -> Tensor
  2292. See :func:`torch.lerp`
  2293. """,
  2294. )
  2295. add_docstr_all(
  2296. "lerp_",
  2297. r"""
  2298. lerp_(end, weight) -> Tensor
  2299. In-place version of :meth:`~Tensor.lerp`
  2300. """,
  2301. )
  2302. add_docstr_all(
  2303. "lgamma",
  2304. r"""
  2305. lgamma() -> Tensor
  2306. See :func:`torch.lgamma`
  2307. """,
  2308. )
  2309. add_docstr_all(
  2310. "lgamma_",
  2311. r"""
  2312. lgamma_() -> Tensor
  2313. In-place version of :meth:`~Tensor.lgamma`
  2314. """,
  2315. )
  2316. add_docstr_all(
  2317. "log",
  2318. r"""
  2319. log() -> Tensor
  2320. See :func:`torch.log`
  2321. """,
  2322. )
  2323. add_docstr_all(
  2324. "log_",
  2325. r"""
  2326. log_() -> Tensor
  2327. In-place version of :meth:`~Tensor.log`
  2328. """,
  2329. )
  2330. add_docstr_all(
  2331. "log10",
  2332. r"""
  2333. log10() -> Tensor
  2334. See :func:`torch.log10`
  2335. """,
  2336. )
  2337. add_docstr_all(
  2338. "log10_",
  2339. r"""
  2340. log10_() -> Tensor
  2341. In-place version of :meth:`~Tensor.log10`
  2342. """,
  2343. )
  2344. add_docstr_all(
  2345. "log1p",
  2346. r"""
  2347. log1p() -> Tensor
  2348. See :func:`torch.log1p`
  2349. """,
  2350. )
  2351. add_docstr_all(
  2352. "log1p_",
  2353. r"""
  2354. log1p_() -> Tensor
  2355. In-place version of :meth:`~Tensor.log1p`
  2356. """,
  2357. )
  2358. add_docstr_all(
  2359. "log2",
  2360. r"""
  2361. log2() -> Tensor
  2362. See :func:`torch.log2`
  2363. """,
  2364. )
  2365. add_docstr_all(
  2366. "log2_",
  2367. r"""
  2368. log2_() -> Tensor
  2369. In-place version of :meth:`~Tensor.log2`
  2370. """,
  2371. )
  2372. add_docstr_all(
  2373. "logaddexp",
  2374. r"""
  2375. logaddexp(other) -> Tensor
  2376. See :func:`torch.logaddexp`
  2377. """,
  2378. )
  2379. add_docstr_all(
  2380. "logaddexp2",
  2381. r"""
  2382. logaddexp2(other) -> Tensor
  2383. See :func:`torch.logaddexp2`
  2384. """,
  2385. )
  2386. add_docstr_all(
  2387. "log_normal_",
  2388. r"""
  2389. log_normal_(mean=1, std=2, *, generator=None)
  2390. Fills :attr:`self` tensor with numbers samples from the log-normal distribution
  2391. parameterized by the given mean :math:`\mu` and standard deviation
  2392. :math:`\sigma`. Note that :attr:`mean` and :attr:`std` are the mean and
  2393. standard deviation of the underlying normal distribution, and not of the
  2394. returned distribution:
  2395. .. math::
  2396. f(x) = \dfrac{1}{x \sigma \sqrt{2\pi}}\ e^{-\frac{(\ln x - \mu)^2}{2\sigma^2}}
  2397. """,
  2398. )
  2399. add_docstr_all(
  2400. "logsumexp",
  2401. r"""
  2402. logsumexp(dim, keepdim=False) -> Tensor
  2403. See :func:`torch.logsumexp`
  2404. """,
  2405. )
  2406. add_docstr_all(
  2407. "lt",
  2408. r"""
  2409. lt(other) -> Tensor
  2410. See :func:`torch.lt`.
  2411. """,
  2412. )
  2413. add_docstr_all(
  2414. "lt_",
  2415. r"""
  2416. lt_(other) -> Tensor
  2417. In-place version of :meth:`~Tensor.lt`.
  2418. """,
  2419. )
  2420. add_docstr_all(
  2421. "less",
  2422. r"""
  2423. lt(other) -> Tensor
  2424. See :func:`torch.less`.
  2425. """,
  2426. )
  2427. add_docstr_all(
  2428. "less_",
  2429. r"""
  2430. less_(other) -> Tensor
  2431. In-place version of :meth:`~Tensor.less`.
  2432. """,
  2433. )
  2434. add_docstr_all(
  2435. "lu_solve",
  2436. r"""
  2437. lu_solve(LU_data, LU_pivots) -> Tensor
  2438. See :func:`torch.lu_solve`
  2439. """,
  2440. )
  2441. add_docstr_all(
  2442. "map_",
  2443. r"""
  2444. map_(tensor, callable)
  2445. Applies :attr:`callable` for each element in :attr:`self` tensor and the given
  2446. :attr:`tensor` and stores the results in :attr:`self` tensor. :attr:`self` tensor and
  2447. the given :attr:`tensor` must be :ref:`broadcastable <broadcasting-semantics>`.
  2448. The :attr:`callable` should have the signature::
  2449. def callable(a, b) -> number
  2450. """,
  2451. )
  2452. add_docstr_all(
  2453. "masked_scatter_",
  2454. r"""
  2455. masked_scatter_(mask, source)
  2456. Copies elements from :attr:`source` into :attr:`self` tensor at positions where
  2457. the :attr:`mask` is True. Elements from :attr:`source` are copied into :attr:`self`
  2458. starting at position 0 of :attr:`source` and continuing in order one-by-one for each
  2459. occurrence of :attr:`mask` being True.
  2460. The shape of :attr:`mask` must be :ref:`broadcastable <broadcasting-semantics>`
  2461. with the shape of the underlying tensor. The :attr:`source` should have at least
  2462. as many elements as the number of ones in :attr:`mask`.
  2463. Args:
  2464. mask (BoolTensor): the boolean mask
  2465. source (Tensor): the tensor to copy from
  2466. .. note::
  2467. The :attr:`mask` operates on the :attr:`self` tensor, not on the given
  2468. :attr:`source` tensor.
  2469. Example:
  2470. >>> self = torch.tensor([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])
  2471. >>> mask = torch.tensor([[0, 0, 0, 1, 1], [1, 1, 0, 1, 1]], dtype=torch.bool)
  2472. >>> source = torch.tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
  2473. >>> self.masked_scatter_(mask, source)
  2474. tensor([[0, 0, 0, 0, 1],
  2475. [2, 3, 0, 4, 5]])
  2476. """,
  2477. )
  2478. add_docstr_all(
  2479. "masked_fill_",
  2480. r"""
  2481. masked_fill_(mask, value)
  2482. Fills elements of :attr:`self` tensor with :attr:`value` where :attr:`mask` is
  2483. True. The shape of :attr:`mask` must be
  2484. :ref:`broadcastable <broadcasting-semantics>` with the shape of the underlying
  2485. tensor.
  2486. Args:
  2487. mask (BoolTensor): the boolean mask
  2488. value (float): the value to fill in with
  2489. """,
  2490. )
  2491. add_docstr_all(
  2492. "masked_select",
  2493. r"""
  2494. masked_select(mask) -> Tensor
  2495. See :func:`torch.masked_select`
  2496. """,
  2497. )
  2498. add_docstr_all(
  2499. "matrix_power",
  2500. r"""
  2501. matrix_power(n) -> Tensor
  2502. .. note:: :meth:`~Tensor.matrix_power` is deprecated, use :func:`torch.linalg.matrix_power` instead.
  2503. Alias for :func:`torch.linalg.matrix_power`
  2504. """,
  2505. )
  2506. add_docstr_all(
  2507. "matrix_exp",
  2508. r"""
  2509. matrix_exp() -> Tensor
  2510. See :func:`torch.matrix_exp`
  2511. """,
  2512. )
  2513. add_docstr_all(
  2514. "max",
  2515. r"""
  2516. max(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
  2517. See :func:`torch.max`
  2518. """,
  2519. )
  2520. add_docstr_all(
  2521. "amax",
  2522. r"""
  2523. amax(dim=None, keepdim=False) -> Tensor
  2524. See :func:`torch.amax`
  2525. """,
  2526. )
  2527. add_docstr_all(
  2528. "maximum",
  2529. r"""
  2530. maximum(other) -> Tensor
  2531. See :func:`torch.maximum`
  2532. """,
  2533. )
  2534. add_docstr_all(
  2535. "fmax",
  2536. r"""
  2537. fmax(other) -> Tensor
  2538. See :func:`torch.fmax`
  2539. """,
  2540. )
  2541. add_docstr_all(
  2542. "argmax",
  2543. r"""
  2544. argmax(dim=None, keepdim=False) -> LongTensor
  2545. See :func:`torch.argmax`
  2546. """,
  2547. )
  2548. add_docstr_all(
  2549. "argwhere",
  2550. r"""
  2551. argwhere() -> Tensor
  2552. See :func:`torch.argwhere`
  2553. """,
  2554. )
  2555. add_docstr_all(
  2556. "mean",
  2557. r"""
  2558. mean(dim=None, keepdim=False, *, dtype=None) -> Tensor
  2559. See :func:`torch.mean`
  2560. """,
  2561. )
  2562. add_docstr_all(
  2563. "nanmean",
  2564. r"""
  2565. nanmean(dim=None, keepdim=False, *, dtype=None) -> Tensor
  2566. See :func:`torch.nanmean`
  2567. """,
  2568. )
  2569. add_docstr_all(
  2570. "median",
  2571. r"""
  2572. median(dim=None, keepdim=False) -> (Tensor, LongTensor)
  2573. See :func:`torch.median`
  2574. """,
  2575. )
  2576. add_docstr_all(
  2577. "nanmedian",
  2578. r"""
  2579. nanmedian(dim=None, keepdim=False) -> (Tensor, LongTensor)
  2580. See :func:`torch.nanmedian`
  2581. """,
  2582. )
  2583. add_docstr_all(
  2584. "min",
  2585. r"""
  2586. min(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
  2587. See :func:`torch.min`
  2588. """,
  2589. )
  2590. add_docstr_all(
  2591. "amin",
  2592. r"""
  2593. amin(dim=None, keepdim=False) -> Tensor
  2594. See :func:`torch.amin`
  2595. """,
  2596. )
  2597. add_docstr_all(
  2598. "minimum",
  2599. r"""
  2600. minimum(other) -> Tensor
  2601. See :func:`torch.minimum`
  2602. """,
  2603. )
  2604. add_docstr_all(
  2605. "aminmax",
  2606. r"""
  2607. aminmax(*, dim=None, keepdim=False) -> (Tensor min, Tensor max)
  2608. See :func:`torch.aminmax`
  2609. """,
  2610. )
  2611. add_docstr_all(
  2612. "fmin",
  2613. r"""
  2614. fmin(other) -> Tensor
  2615. See :func:`torch.fmin`
  2616. """,
  2617. )
  2618. add_docstr_all(
  2619. "argmin",
  2620. r"""
  2621. argmin(dim=None, keepdim=False) -> LongTensor
  2622. See :func:`torch.argmin`
  2623. """,
  2624. )
  2625. add_docstr_all(
  2626. "mm",
  2627. r"""
  2628. mm(mat2) -> Tensor
  2629. See :func:`torch.mm`
  2630. """,
  2631. )
  2632. add_docstr_all(
  2633. "mode",
  2634. r"""
  2635. mode(dim=None, keepdim=False) -> (Tensor, LongTensor)
  2636. See :func:`torch.mode`
  2637. """,
  2638. )
  2639. add_docstr_all(
  2640. "movedim",
  2641. r"""
  2642. movedim(source, destination) -> Tensor
  2643. See :func:`torch.movedim`
  2644. """,
  2645. )
  2646. add_docstr_all(
  2647. "moveaxis",
  2648. r"""
  2649. moveaxis(source, destination) -> Tensor
  2650. See :func:`torch.moveaxis`
  2651. """,
  2652. )
  2653. add_docstr_all(
  2654. "mul",
  2655. r"""
  2656. mul(value) -> Tensor
  2657. See :func:`torch.mul`.
  2658. """,
  2659. )
  2660. add_docstr_all(
  2661. "mul_",
  2662. r"""
  2663. mul_(value) -> Tensor
  2664. In-place version of :meth:`~Tensor.mul`.
  2665. """,
  2666. )
  2667. add_docstr_all(
  2668. "multiply",
  2669. r"""
  2670. multiply(value) -> Tensor
  2671. See :func:`torch.multiply`.
  2672. """,
  2673. )
  2674. add_docstr_all(
  2675. "multiply_",
  2676. r"""
  2677. multiply_(value) -> Tensor
  2678. In-place version of :meth:`~Tensor.multiply`.
  2679. """,
  2680. )
  2681. add_docstr_all(
  2682. "multinomial",
  2683. r"""
  2684. multinomial(num_samples, replacement=False, *, generator=None) -> Tensor
  2685. See :func:`torch.multinomial`
  2686. """,
  2687. )
  2688. add_docstr_all(
  2689. "mv",
  2690. r"""
  2691. mv(vec) -> Tensor
  2692. See :func:`torch.mv`
  2693. """,
  2694. )
  2695. add_docstr_all(
  2696. "mvlgamma",
  2697. r"""
  2698. mvlgamma(p) -> Tensor
  2699. See :func:`torch.mvlgamma`
  2700. """,
  2701. )
  2702. add_docstr_all(
  2703. "mvlgamma_",
  2704. r"""
  2705. mvlgamma_(p) -> Tensor
  2706. In-place version of :meth:`~Tensor.mvlgamma`
  2707. """,
  2708. )
  2709. add_docstr_all(
  2710. "narrow",
  2711. r"""
  2712. narrow(dimension, start, length) -> Tensor
  2713. See :func:`torch.narrow`.
  2714. """,
  2715. )
  2716. add_docstr_all(
  2717. "narrow_copy",
  2718. r"""
  2719. narrow_copy(dimension, start, length) -> Tensor
  2720. See :func:`torch.narrow_copy`.
  2721. """,
  2722. )
  2723. add_docstr_all(
  2724. "ndimension",
  2725. r"""
  2726. ndimension() -> int
  2727. Alias for :meth:`~Tensor.dim()`
  2728. """,
  2729. )
  2730. add_docstr_all(
  2731. "nan_to_num",
  2732. r"""
  2733. nan_to_num(nan=0.0, posinf=None, neginf=None) -> Tensor
  2734. See :func:`torch.nan_to_num`.
  2735. """,
  2736. )
  2737. add_docstr_all(
  2738. "nan_to_num_",
  2739. r"""
  2740. nan_to_num_(nan=0.0, posinf=None, neginf=None) -> Tensor
  2741. In-place version of :meth:`~Tensor.nan_to_num`.
  2742. """,
  2743. )
  2744. add_docstr_all(
  2745. "ne",
  2746. r"""
  2747. ne(other) -> Tensor
  2748. See :func:`torch.ne`.
  2749. """,
  2750. )
  2751. add_docstr_all(
  2752. "ne_",
  2753. r"""
  2754. ne_(other) -> Tensor
  2755. In-place version of :meth:`~Tensor.ne`.
  2756. """,
  2757. )
  2758. add_docstr_all(
  2759. "not_equal",
  2760. r"""
  2761. not_equal(other) -> Tensor
  2762. See :func:`torch.not_equal`.
  2763. """,
  2764. )
  2765. add_docstr_all(
  2766. "not_equal_",
  2767. r"""
  2768. not_equal_(other) -> Tensor
  2769. In-place version of :meth:`~Tensor.not_equal`.
  2770. """,
  2771. )
  2772. add_docstr_all(
  2773. "neg",
  2774. r"""
  2775. neg() -> Tensor
  2776. See :func:`torch.neg`
  2777. """,
  2778. )
  2779. add_docstr_all(
  2780. "negative",
  2781. r"""
  2782. negative() -> Tensor
  2783. See :func:`torch.negative`
  2784. """,
  2785. )
  2786. add_docstr_all(
  2787. "neg_",
  2788. r"""
  2789. neg_() -> Tensor
  2790. In-place version of :meth:`~Tensor.neg`
  2791. """,
  2792. )
  2793. add_docstr_all(
  2794. "negative_",
  2795. r"""
  2796. negative_() -> Tensor
  2797. In-place version of :meth:`~Tensor.negative`
  2798. """,
  2799. )
  2800. add_docstr_all(
  2801. "nelement",
  2802. r"""
  2803. nelement() -> int
  2804. Alias for :meth:`~Tensor.numel`
  2805. """,
  2806. )
  2807. add_docstr_all(
  2808. "nextafter",
  2809. r"""
  2810. nextafter(other) -> Tensor
  2811. See :func:`torch.nextafter`
  2812. """,
  2813. )
  2814. add_docstr_all(
  2815. "nextafter_",
  2816. r"""
  2817. nextafter_(other) -> Tensor
  2818. In-place version of :meth:`~Tensor.nextafter`
  2819. """,
  2820. )
  2821. add_docstr_all(
  2822. "nonzero",
  2823. r"""
  2824. nonzero() -> LongTensor
  2825. See :func:`torch.nonzero`
  2826. """,
  2827. )
  2828. add_docstr_all(
  2829. "nonzero_static",
  2830. r"""
  2831. nonzero_static(input, *, size, fill_value=-1) -> Tensor
  2832. Returns a 2-D tensor where each row is the index for a non-zero value.
  2833. The returned Tensor has the same `torch.dtype` as `torch.nonzero()`.
  2834. Args:
  2835. input (Tensor): the input tensor to count non-zero elements.
  2836. Keyword args:
  2837. size (int): the size of non-zero elements expected to be included in the out
  2838. tensor. Pad the out tensor with `fill_value` if the `size` is larger
  2839. than total number of non-zero elements, truncate out tensor if `size`
  2840. is smaller. The size must be a non-negative integer.
  2841. fill_value (int): the value to fill the output tensor with when `size` is larger
  2842. than the total number of non-zero elements. Default is `-1` to represent
  2843. invalid index.
  2844. Example:
  2845. # Example 1: Padding
  2846. >>> input_tensor = torch.tensor([[1, 0], [3, 2]])
  2847. >>> static_size = 4
  2848. >>> t = torch.nonzero_static(input_tensor, size = static_size)
  2849. tensor([[ 0, 0],
  2850. [ 1, 0],
  2851. [ 1, 1],
  2852. [ -1, -1]], dtype=torch.int64)
  2853. # Example 2: Truncating
  2854. >>> input_tensor = torch.tensor([[1, 0], [3, 2]])
  2855. >>> static_size = 2
  2856. >>> t = torch.nonzero_static(input_tensor, size = static_size)
  2857. tensor([[ 0, 0],
  2858. [ 1, 0]], dtype=torch.int64)
  2859. # Example 3: 0 size
  2860. >>> input_tensor = torch.tensor([10])
  2861. >>> static_size = 0
  2862. >>> t = torch.nonzero_static(input_tensor, size = static_size)
  2863. tensor([], size=(0, 1), dtype=torch.int64)
  2864. # Example 4: 0 rank input
  2865. >>> input_tensor = torch.tensor(10)
  2866. >>> static_size = 2
  2867. >>> t = torch.nonzero_static(input_tensor, size = static_size)
  2868. tensor([], size=(2, 0), dtype=torch.int64)
  2869. """,
  2870. )
  2871. add_docstr_all(
  2872. "norm",
  2873. r"""
  2874. norm(p=2, dim=None, keepdim=False) -> Tensor
  2875. See :func:`torch.norm`
  2876. """,
  2877. )
  2878. add_docstr_all(
  2879. "normal_",
  2880. r"""
  2881. normal_(mean=0, std=1, *, generator=None) -> Tensor
  2882. Fills :attr:`self` tensor with elements samples from the normal distribution
  2883. parameterized by :attr:`mean` and :attr:`std`.
  2884. """,
  2885. )
  2886. add_docstr_all(
  2887. "numel",
  2888. r"""
  2889. numel() -> int
  2890. See :func:`torch.numel`
  2891. """,
  2892. )
  2893. add_docstr_all(
  2894. "numpy",
  2895. r"""
  2896. numpy(*, force=False) -> numpy.ndarray
  2897. Returns the tensor as a NumPy :class:`ndarray`.
  2898. If :attr:`force` is ``False`` (the default), the conversion
  2899. is performed only if the tensor is on the CPU, does not require grad,
  2900. does not have its conjugate bit set, and is a dtype and layout that
  2901. NumPy supports. The returned ndarray and the tensor will share their
  2902. storage, so changes to the tensor will be reflected in the ndarray
  2903. and vice versa.
  2904. If :attr:`force` is ``True`` this is equivalent to
  2905. calling ``t.detach().cpu().resolve_conj().resolve_neg().numpy()``.
  2906. If the tensor isn't on the CPU or the conjugate or negative bit is set,
  2907. the tensor won't share its storage with the returned ndarray.
  2908. Setting :attr:`force` to ``True`` can be a useful shorthand.
  2909. Args:
  2910. force (bool): if ``True``, the ndarray may be a copy of the tensor
  2911. instead of always sharing memory, defaults to ``False``.
  2912. """,
  2913. )
  2914. add_docstr_all(
  2915. "orgqr",
  2916. r"""
  2917. orgqr(input2) -> Tensor
  2918. See :func:`torch.orgqr`
  2919. """,
  2920. )
  2921. add_docstr_all(
  2922. "ormqr",
  2923. r"""
  2924. ormqr(input2, input3, left=True, transpose=False) -> Tensor
  2925. See :func:`torch.ormqr`
  2926. """,
  2927. )
  2928. add_docstr_all(
  2929. "permute",
  2930. r"""
  2931. permute(*dims) -> Tensor
  2932. See :func:`torch.permute`
  2933. """,
  2934. )
  2935. add_docstr_all(
  2936. "polygamma",
  2937. r"""
  2938. polygamma(n) -> Tensor
  2939. See :func:`torch.polygamma`
  2940. """,
  2941. )
  2942. add_docstr_all(
  2943. "polygamma_",
  2944. r"""
  2945. polygamma_(n) -> Tensor
  2946. In-place version of :meth:`~Tensor.polygamma`
  2947. """,
  2948. )
  2949. add_docstr_all(
  2950. "positive",
  2951. r"""
  2952. positive() -> Tensor
  2953. See :func:`torch.positive`
  2954. """,
  2955. )
  2956. add_docstr_all(
  2957. "pow",
  2958. r"""
  2959. pow(exponent) -> Tensor
  2960. See :func:`torch.pow`
  2961. """,
  2962. )
  2963. add_docstr_all(
  2964. "pow_",
  2965. r"""
  2966. pow_(exponent) -> Tensor
  2967. In-place version of :meth:`~Tensor.pow`
  2968. """,
  2969. )
  2970. add_docstr_all(
  2971. "float_power",
  2972. r"""
  2973. float_power(exponent) -> Tensor
  2974. See :func:`torch.float_power`
  2975. """,
  2976. )
  2977. add_docstr_all(
  2978. "float_power_",
  2979. r"""
  2980. float_power_(exponent) -> Tensor
  2981. In-place version of :meth:`~Tensor.float_power`
  2982. """,
  2983. )
  2984. add_docstr_all(
  2985. "prod",
  2986. r"""
  2987. prod(dim=None, keepdim=False, dtype=None) -> Tensor
  2988. See :func:`torch.prod`
  2989. """,
  2990. )
  2991. add_docstr_all(
  2992. "put_",
  2993. r"""
  2994. put_(index, source, accumulate=False) -> Tensor
  2995. Copies the elements from :attr:`source` into the positions specified by
  2996. :attr:`index`. For the purpose of indexing, the :attr:`self` tensor is treated as if
  2997. it were a 1-D tensor.
  2998. :attr:`index` and :attr:`source` need to have the same number of elements, but not necessarily
  2999. the same shape.
  3000. If :attr:`accumulate` is ``True``, the elements in :attr:`source` are added to
  3001. :attr:`self`. If accumulate is ``False``, the behavior is undefined if :attr:`index`
  3002. contain duplicate elements.
  3003. Args:
  3004. index (LongTensor): the indices into self
  3005. source (Tensor): the tensor containing values to copy from
  3006. accumulate (bool): whether to accumulate into self
  3007. Example::
  3008. >>> src = torch.tensor([[4, 3, 5],
  3009. ... [6, 7, 8]])
  3010. >>> src.put_(torch.tensor([1, 3]), torch.tensor([9, 10]))
  3011. tensor([[ 4, 9, 5],
  3012. [ 10, 7, 8]])
  3013. """,
  3014. )
  3015. add_docstr_all(
  3016. "put",
  3017. r"""
  3018. put(input, index, source, accumulate=False) -> Tensor
  3019. Out-of-place version of :meth:`torch.Tensor.put_`.
  3020. `input` corresponds to `self` in :meth:`torch.Tensor.put_`.
  3021. """,
  3022. )
  3023. add_docstr_all(
  3024. "qr",
  3025. r"""
  3026. qr(some=True) -> (Tensor, Tensor)
  3027. See :func:`torch.qr`
  3028. """,
  3029. )
  3030. add_docstr_all(
  3031. "qscheme",
  3032. r"""
  3033. qscheme() -> torch.qscheme
  3034. Returns the quantization scheme of a given QTensor.
  3035. """,
  3036. )
  3037. add_docstr_all(
  3038. "quantile",
  3039. r"""
  3040. quantile(q, dim=None, keepdim=False, *, interpolation='linear') -> Tensor
  3041. See :func:`torch.quantile`
  3042. """,
  3043. )
  3044. add_docstr_all(
  3045. "nanquantile",
  3046. r"""
  3047. nanquantile(q, dim=None, keepdim=False, *, interpolation='linear') -> Tensor
  3048. See :func:`torch.nanquantile`
  3049. """,
  3050. )
  3051. add_docstr_all(
  3052. "q_scale",
  3053. r"""
  3054. q_scale() -> float
  3055. Given a Tensor quantized by linear(affine) quantization,
  3056. returns the scale of the underlying quantizer().
  3057. """,
  3058. )
  3059. add_docstr_all(
  3060. "q_zero_point",
  3061. r"""
  3062. q_zero_point() -> int
  3063. Given a Tensor quantized by linear(affine) quantization,
  3064. returns the zero_point of the underlying quantizer().
  3065. """,
  3066. )
  3067. add_docstr_all(
  3068. "q_per_channel_scales",
  3069. r"""
  3070. q_per_channel_scales() -> Tensor
  3071. Given a Tensor quantized by linear (affine) per-channel quantization,
  3072. returns a Tensor of scales of the underlying quantizer. It has the number of
  3073. elements that matches the corresponding dimensions (from q_per_channel_axis) of
  3074. the tensor.
  3075. """,
  3076. )
  3077. add_docstr_all(
  3078. "q_per_channel_zero_points",
  3079. r"""
  3080. q_per_channel_zero_points() -> Tensor
  3081. Given a Tensor quantized by linear (affine) per-channel quantization,
  3082. returns a tensor of zero_points of the underlying quantizer. It has the number of
  3083. elements that matches the corresponding dimensions (from q_per_channel_axis) of
  3084. the tensor.
  3085. """,
  3086. )
  3087. add_docstr_all(
  3088. "q_per_channel_axis",
  3089. r"""
  3090. q_per_channel_axis() -> int
  3091. Given a Tensor quantized by linear (affine) per-channel quantization,
  3092. returns the index of dimension on which per-channel quantization is applied.
  3093. """,
  3094. )
  3095. add_docstr_all(
  3096. "random_",
  3097. r"""
  3098. random_(from=0, to=None, *, generator=None) -> Tensor
  3099. Fills :attr:`self` tensor with numbers sampled from the discrete uniform
  3100. distribution over ``[from, to - 1]``. If not specified, the values are usually
  3101. only bounded by :attr:`self` tensor's data type. However, for floating point
  3102. types, if unspecified, range will be ``[0, 2^mantissa]`` to ensure that every
  3103. value is representable. For example, `torch.tensor(1, dtype=torch.double).random_()`
  3104. will be uniform in ``[0, 2^53]``.
  3105. """,
  3106. )
  3107. add_docstr_all(
  3108. "rad2deg",
  3109. r"""
  3110. rad2deg() -> Tensor
  3111. See :func:`torch.rad2deg`
  3112. """,
  3113. )
  3114. add_docstr_all(
  3115. "rad2deg_",
  3116. r"""
  3117. rad2deg_() -> Tensor
  3118. In-place version of :meth:`~Tensor.rad2deg`
  3119. """,
  3120. )
  3121. add_docstr_all(
  3122. "deg2rad",
  3123. r"""
  3124. deg2rad() -> Tensor
  3125. See :func:`torch.deg2rad`
  3126. """,
  3127. )
  3128. add_docstr_all(
  3129. "deg2rad_",
  3130. r"""
  3131. deg2rad_() -> Tensor
  3132. In-place version of :meth:`~Tensor.deg2rad`
  3133. """,
  3134. )
  3135. add_docstr_all(
  3136. "ravel",
  3137. r"""
  3138. ravel() -> Tensor
  3139. see :func:`torch.ravel`
  3140. """,
  3141. )
  3142. add_docstr_all(
  3143. "reciprocal",
  3144. r"""
  3145. reciprocal() -> Tensor
  3146. See :func:`torch.reciprocal`
  3147. """,
  3148. )
  3149. add_docstr_all(
  3150. "reciprocal_",
  3151. r"""
  3152. reciprocal_() -> Tensor
  3153. In-place version of :meth:`~Tensor.reciprocal`
  3154. """,
  3155. )
  3156. add_docstr_all(
  3157. "record_stream",
  3158. r"""
  3159. record_stream(stream)
  3160. Marks the tensor as having been used by this stream. When the tensor
  3161. is deallocated, ensure the tensor memory is not reused for another tensor
  3162. until all work queued on :attr:`stream` at the time of deallocation is
  3163. complete.
  3164. .. note::
  3165. The caching allocator is aware of only the stream where a tensor was
  3166. allocated. Due to the awareness, it already correctly manages the life
  3167. cycle of tensors on only one stream. But if a tensor is used on a stream
  3168. different from the stream of origin, the allocator might reuse the memory
  3169. unexpectedly. Calling this method lets the allocator know which streams
  3170. have used the tensor.
  3171. .. warning::
  3172. This method is most suitable for use cases where you are providing a
  3173. function that created a tensor on a side stream, and want users to be able
  3174. to make use of the tensor without having to think carefully about stream
  3175. safety when making use of them. These safety guarantees come at some
  3176. performance and predictability cost (analogous to the tradeoff between GC
  3177. and manual memory management), so if you are in a situation where
  3178. you manage the full lifetime of your tensors, you may consider instead
  3179. manually managing CUDA events so that calling this method is not necessary.
  3180. In particular, when you call this method, on later allocations the
  3181. allocator will poll the recorded stream to see if all operations have
  3182. completed yet; you can potentially race with side stream computation and
  3183. non-deterministically reuse or fail to reuse memory for an allocation.
  3184. You can safely use tensors allocated on side streams without
  3185. :meth:`~Tensor.record_stream`; you must manually ensure that
  3186. any non-creation stream uses of a tensor are synced back to the creation
  3187. stream before you deallocate the tensor. As the CUDA caching allocator
  3188. guarantees that the memory will only be reused with the same creation stream,
  3189. this is sufficient to ensure that writes to future reallocations of the
  3190. memory will be delayed until non-creation stream uses are done.
  3191. (Counterintuitively, you may observe that on the CPU side we have already
  3192. reallocated the tensor, even though CUDA kernels on the old tensor are
  3193. still in progress. This is fine, because CUDA operations on the new
  3194. tensor will appropriately wait for the old operations to complete, as they
  3195. are all on the same stream.)
  3196. Concretely, this looks like this::
  3197. with torch.cuda.stream(s0):
  3198. x = torch.zeros(N)
  3199. s1.wait_stream(s0)
  3200. with torch.cuda.stream(s1):
  3201. y = some_comm_op(x)
  3202. ... some compute on s0 ...
  3203. # synchronize creation stream s0 to side stream s1
  3204. # before deallocating x
  3205. s0.wait_stream(s1)
  3206. del x
  3207. Note that some discretion is required when deciding when to perform
  3208. ``s0.wait_stream(s1)``. In particular, if we were to wait immediately
  3209. after ``some_comm_op``, there wouldn't be any point in having the side
  3210. stream; it would be equivalent to have run ``some_comm_op`` on ``s0``.
  3211. Instead, the synchronization must be placed at some appropriate, later
  3212. point in time where you expect the side stream ``s1`` to have finished
  3213. work. This location is typically identified via profiling, e.g., using
  3214. Chrome traces produced
  3215. :meth:`torch.autograd.profiler.profile.export_chrome_trace`. If you
  3216. place the wait too early, work on s0 will block until ``s1`` has finished,
  3217. preventing further overlapping of communication and computation. If you
  3218. place the wait too late, you will use more memory than is strictly
  3219. necessary (as you are keeping ``x`` live for longer.) For a concrete
  3220. example of how this guidance can be applied in practice, see this post:
  3221. `FSDP and CUDACachingAllocator
  3222. <https://dev-discuss.pytorch.org/t/fsdp-cudacachingallocator-an-outsider-newb-perspective/1486>`_.
  3223. """,
  3224. )
  3225. add_docstr_all(
  3226. "remainder",
  3227. r"""
  3228. remainder(divisor) -> Tensor
  3229. See :func:`torch.remainder`
  3230. """,
  3231. )
  3232. add_docstr_all(
  3233. "remainder_",
  3234. r"""
  3235. remainder_(divisor) -> Tensor
  3236. In-place version of :meth:`~Tensor.remainder`
  3237. """,
  3238. )
  3239. add_docstr_all(
  3240. "renorm",
  3241. r"""
  3242. renorm(p, dim, maxnorm) -> Tensor
  3243. See :func:`torch.renorm`
  3244. """,
  3245. )
  3246. add_docstr_all(
  3247. "renorm_",
  3248. r"""
  3249. renorm_(p, dim, maxnorm) -> Tensor
  3250. In-place version of :meth:`~Tensor.renorm`
  3251. """,
  3252. )
  3253. add_docstr_all(
  3254. "repeat",
  3255. r"""
  3256. repeat(*sizes) -> Tensor
  3257. Repeats this tensor along the specified dimensions.
  3258. Unlike :meth:`~Tensor.expand`, this function copies the tensor's data.
  3259. .. warning::
  3260. :meth:`~Tensor.repeat` behaves differently from
  3261. `numpy.repeat <https://docs.scipy.org/doc/numpy/reference/generated/numpy.repeat.html>`_,
  3262. but is more similar to
  3263. `numpy.tile <https://docs.scipy.org/doc/numpy/reference/generated/numpy.tile.html>`_.
  3264. For the operator similar to `numpy.repeat`, see :func:`torch.repeat_interleave`.
  3265. Args:
  3266. sizes (torch.Size or int...): The number of times to repeat this tensor along each
  3267. dimension
  3268. Example::
  3269. >>> x = torch.tensor([1, 2, 3])
  3270. >>> x.repeat(4, 2)
  3271. tensor([[ 1, 2, 3, 1, 2, 3],
  3272. [ 1, 2, 3, 1, 2, 3],
  3273. [ 1, 2, 3, 1, 2, 3],
  3274. [ 1, 2, 3, 1, 2, 3]])
  3275. >>> x.repeat(4, 2, 1).size()
  3276. torch.Size([4, 2, 3])
  3277. """,
  3278. )
  3279. add_docstr_all(
  3280. "repeat_interleave",
  3281. r"""
  3282. repeat_interleave(repeats, dim=None, *, output_size=None) -> Tensor
  3283. See :func:`torch.repeat_interleave`.
  3284. """,
  3285. )
  3286. add_docstr_all(
  3287. "requires_grad_",
  3288. r"""
  3289. requires_grad_(requires_grad=True) -> Tensor
  3290. Change if autograd should record operations on this tensor: sets this tensor's
  3291. :attr:`requires_grad` attribute in-place. Returns this tensor.
  3292. :func:`requires_grad_`'s main use case is to tell autograd to begin recording
  3293. operations on a Tensor ``tensor``. If ``tensor`` has ``requires_grad=False``
  3294. (because it was obtained through a DataLoader, or required preprocessing or
  3295. initialization), ``tensor.requires_grad_()`` makes it so that autograd will
  3296. begin to record operations on ``tensor``.
  3297. Args:
  3298. requires_grad (bool): If autograd should record operations on this tensor.
  3299. Default: ``True``.
  3300. Example::
  3301. >>> # Let's say we want to preprocess some saved weights and use
  3302. >>> # the result as new weights.
  3303. >>> saved_weights = [0.1, 0.2, 0.3, 0.25]
  3304. >>> loaded_weights = torch.tensor(saved_weights)
  3305. >>> weights = preprocess(loaded_weights) # some function
  3306. >>> weights
  3307. tensor([-0.5503, 0.4926, -2.1158, -0.8303])
  3308. >>> # Now, start to record operations done to weights
  3309. >>> weights.requires_grad_()
  3310. >>> out = weights.pow(2).sum()
  3311. >>> out.backward()
  3312. >>> weights.grad
  3313. tensor([-1.1007, 0.9853, -4.2316, -1.6606])
  3314. """,
  3315. )
  3316. add_docstr_all(
  3317. "reshape",
  3318. r"""
  3319. reshape(*shape) -> Tensor
  3320. Returns a tensor with the same data and number of elements as :attr:`self`
  3321. but with the specified shape. This method returns a view if :attr:`shape` is
  3322. compatible with the current shape. See :meth:`torch.Tensor.view` on when it is
  3323. possible to return a view.
  3324. See :func:`torch.reshape`
  3325. Args:
  3326. shape (tuple of ints or int...): the desired shape
  3327. """,
  3328. )
  3329. add_docstr_all(
  3330. "reshape_as",
  3331. r"""
  3332. reshape_as(other) -> Tensor
  3333. Returns this tensor as the same shape as :attr:`other`.
  3334. ``self.reshape_as(other)`` is equivalent to ``self.reshape(other.sizes())``.
  3335. This method returns a view if ``other.sizes()`` is compatible with the current
  3336. shape. See :meth:`torch.Tensor.view` on when it is possible to return a view.
  3337. Please see :meth:`reshape` for more information about ``reshape``.
  3338. Args:
  3339. other (:class:`torch.Tensor`): The result tensor has the same shape
  3340. as :attr:`other`.
  3341. """,
  3342. )
  3343. add_docstr_all(
  3344. "resize_",
  3345. r"""
  3346. resize_(*sizes, memory_format=torch.contiguous_format) -> Tensor
  3347. Resizes :attr:`self` tensor to the specified size. If the number of elements is
  3348. larger than the current storage size, then the underlying storage is resized
  3349. to fit the new number of elements. If the number of elements is smaller, the
  3350. underlying storage is not changed. Existing elements are preserved but any new
  3351. memory is uninitialized.
  3352. .. warning::
  3353. This is a low-level method. The storage is reinterpreted as C-contiguous,
  3354. ignoring the current strides (unless the target size equals the current
  3355. size, in which case the tensor is left unchanged). For most purposes, you
  3356. will instead want to use :meth:`~Tensor.view()`, which checks for
  3357. contiguity, or :meth:`~Tensor.reshape()`, which copies data if needed. To
  3358. change the size in-place with custom strides, see :meth:`~Tensor.set_()`.
  3359. .. note::
  3360. If :func:`torch.use_deterministic_algorithms()` and
  3361. :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
  3362. ``True``, new elements are initialized to prevent nondeterministic behavior
  3363. from using the result as an input to an operation. Floating point and
  3364. complex values are set to NaN, and integer values are set to the maximum
  3365. value.
  3366. Args:
  3367. sizes (torch.Size or int...): the desired size
  3368. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  3369. Tensor. Default: ``torch.contiguous_format``. Note that memory format of
  3370. :attr:`self` is going to be unaffected if ``self.size()`` matches ``sizes``.
  3371. Example::
  3372. >>> x = torch.tensor([[1, 2], [3, 4], [5, 6]])
  3373. >>> x.resize_(2, 2)
  3374. tensor([[ 1, 2],
  3375. [ 3, 4]])
  3376. """,
  3377. )
  3378. add_docstr_all(
  3379. "resize_as_",
  3380. r"""
  3381. resize_as_(tensor, memory_format=torch.contiguous_format) -> Tensor
  3382. Resizes the :attr:`self` tensor to be the same size as the specified
  3383. :attr:`tensor`. This is equivalent to ``self.resize_(tensor.size())``.
  3384. Args:
  3385. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  3386. Tensor. Default: ``torch.contiguous_format``. Note that memory format of
  3387. :attr:`self` is going to be unaffected if ``self.size()`` matches ``tensor.size()``.
  3388. """,
  3389. )
  3390. add_docstr_all(
  3391. "rot90",
  3392. r"""
  3393. rot90(k, dims) -> Tensor
  3394. See :func:`torch.rot90`
  3395. """,
  3396. )
  3397. add_docstr_all(
  3398. "round",
  3399. r"""
  3400. round(decimals=0) -> Tensor
  3401. See :func:`torch.round`
  3402. """,
  3403. )
  3404. add_docstr_all(
  3405. "round_",
  3406. r"""
  3407. round_(decimals=0) -> Tensor
  3408. In-place version of :meth:`~Tensor.round`
  3409. """,
  3410. )
  3411. add_docstr_all(
  3412. "rsqrt",
  3413. r"""
  3414. rsqrt() -> Tensor
  3415. See :func:`torch.rsqrt`
  3416. """,
  3417. )
  3418. add_docstr_all(
  3419. "rsqrt_",
  3420. r"""
  3421. rsqrt_() -> Tensor
  3422. In-place version of :meth:`~Tensor.rsqrt`
  3423. """,
  3424. )
  3425. add_docstr_all(
  3426. "scatter_",
  3427. r"""
  3428. scatter_(dim, index, src, *, reduce=None) -> Tensor
  3429. Writes all values from the tensor :attr:`src` into :attr:`self` at the indices
  3430. specified in the :attr:`index` tensor. For each value in :attr:`src`, its output
  3431. index is specified by its index in :attr:`src` for ``dimension != dim`` and by
  3432. the corresponding value in :attr:`index` for ``dimension = dim``.
  3433. For a 3-D tensor, :attr:`self` is updated as::
  3434. self[index[i][j][k]][j][k] = src[i][j][k] # if dim == 0
  3435. self[i][index[i][j][k]][k] = src[i][j][k] # if dim == 1
  3436. self[i][j][index[i][j][k]] = src[i][j][k] # if dim == 2
  3437. This is the reverse operation of the manner described in :meth:`~Tensor.gather`.
  3438. :attr:`self`, :attr:`index` and :attr:`src` (if it is a Tensor) should all have
  3439. the same number of dimensions. It is also required that
  3440. ``index.size(d) <= src.size(d)`` for all dimensions ``d``, and that
  3441. ``index.size(d) <= self.size(d)`` for all dimensions ``d != dim``.
  3442. Note that ``index`` and ``src`` do not broadcast.
  3443. Moreover, as for :meth:`~Tensor.gather`, the values of :attr:`index` must be
  3444. between ``0`` and ``self.size(dim) - 1`` inclusive.
  3445. .. warning::
  3446. When indices are not unique, the behavior is non-deterministic (one of the
  3447. values from ``src`` will be picked arbitrarily) and the gradient will be
  3448. incorrect (it will be propagated to all locations in the source that
  3449. correspond to the same index)!
  3450. .. note::
  3451. The backward pass is implemented only for ``src.shape == index.shape``.
  3452. Additionally accepts an optional :attr:`reduce` argument that allows
  3453. specification of an optional reduction operation, which is applied to all
  3454. values in the tensor :attr:`src` into :attr:`self` at the indices
  3455. specified in the :attr:`index`. For each value in :attr:`src`, the reduction
  3456. operation is applied to an index in :attr:`self` which is specified by
  3457. its index in :attr:`src` for ``dimension != dim`` and by the corresponding
  3458. value in :attr:`index` for ``dimension = dim``.
  3459. Given a 3-D tensor and reduction using the multiplication operation, :attr:`self`
  3460. is updated as::
  3461. self[index[i][j][k]][j][k] *= src[i][j][k] # if dim == 0
  3462. self[i][index[i][j][k]][k] *= src[i][j][k] # if dim == 1
  3463. self[i][j][index[i][j][k]] *= src[i][j][k] # if dim == 2
  3464. Reducing with the addition operation is the same as using
  3465. :meth:`~torch.Tensor.scatter_add_`.
  3466. .. warning::
  3467. The reduce argument with Tensor ``src`` is deprecated and will be removed in
  3468. a future PyTorch release. Please use :meth:`~torch.Tensor.scatter_reduce_`
  3469. instead for more reduction options.
  3470. Args:
  3471. dim (int): the axis along which to index
  3472. index (LongTensor): the indices of elements to scatter, can be either empty
  3473. or of the same dimensionality as ``src``. When empty, the operation
  3474. returns ``self`` unchanged.
  3475. src (Tensor): the source element(s) to scatter.
  3476. Keyword args:
  3477. reduce (str, optional): reduction operation to apply, can be either
  3478. ``'add'`` or ``'multiply'``.
  3479. Example::
  3480. >>> src = torch.arange(1, 11).reshape((2, 5))
  3481. >>> src
  3482. tensor([[ 1, 2, 3, 4, 5],
  3483. [ 6, 7, 8, 9, 10]])
  3484. >>> index = torch.tensor([[0, 1, 2, 0]])
  3485. >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(0, index, src)
  3486. tensor([[1, 0, 0, 4, 0],
  3487. [0, 2, 0, 0, 0],
  3488. [0, 0, 3, 0, 0]])
  3489. >>> index = torch.tensor([[0, 1, 2], [0, 1, 4]])
  3490. >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(1, index, src)
  3491. tensor([[1, 2, 3, 0, 0],
  3492. [6, 7, 0, 0, 8],
  3493. [0, 0, 0, 0, 0]])
  3494. >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
  3495. ... 1.23, reduce='multiply')
  3496. tensor([[2.0000, 2.0000, 2.4600, 2.0000],
  3497. [2.0000, 2.0000, 2.0000, 2.4600]])
  3498. >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
  3499. ... 1.23, reduce='add')
  3500. tensor([[2.0000, 2.0000, 3.2300, 2.0000],
  3501. [2.0000, 2.0000, 2.0000, 3.2300]])
  3502. .. function:: scatter_(dim, index, value, *, reduce=None) -> Tensor:
  3503. :noindex:
  3504. Writes the value from :attr:`value` into :attr:`self` at the indices
  3505. specified in the :attr:`index` tensor. This operation is equivalent to the previous version,
  3506. with the :attr:`src` tensor filled entirely with :attr:`value`.
  3507. Args:
  3508. dim (int): the axis along which to index
  3509. index (LongTensor): the indices of elements to scatter, can be either empty
  3510. or of the same dimensionality as ``src``. When empty, the operation
  3511. returns ``self`` unchanged.
  3512. value (Scalar): the value to scatter.
  3513. Keyword args:
  3514. reduce (str, optional): reduction operation to apply, can be either
  3515. ``'add'`` or ``'multiply'``.
  3516. Example::
  3517. >>> index = torch.tensor([[0, 1]])
  3518. >>> value = 2
  3519. >>> torch.zeros(3, 5).scatter_(0, index, value)
  3520. tensor([[2., 0., 0., 0., 0.],
  3521. [0., 2., 0., 0., 0.],
  3522. [0., 0., 0., 0., 0.]])
  3523. """,
  3524. )
  3525. add_docstr_all(
  3526. "scatter_add_",
  3527. r"""
  3528. scatter_add_(dim, index, src) -> Tensor
  3529. Adds all values from the tensor :attr:`src` into :attr:`self` at the indices
  3530. specified in the :attr:`index` tensor in a similar fashion as
  3531. :meth:`~torch.Tensor.scatter_`. For each value in :attr:`src`, it is added to
  3532. an index in :attr:`self` which is specified by its index in :attr:`src`
  3533. for ``dimension != dim`` and by the corresponding value in :attr:`index` for
  3534. ``dimension = dim``.
  3535. For a 3-D tensor, :attr:`self` is updated as::
  3536. self[index[i][j][k]][j][k] += src[i][j][k] # if dim == 0
  3537. self[i][index[i][j][k]][k] += src[i][j][k] # if dim == 1
  3538. self[i][j][index[i][j][k]] += src[i][j][k] # if dim == 2
  3539. :attr:`self`, :attr:`index` and :attr:`src` should have same number of
  3540. dimensions. It is also required that ``index.size(d) <= src.size(d)`` for all
  3541. dimensions ``d``, and that ``index.size(d) <= self.size(d)`` for all dimensions
  3542. ``d != dim``. Note that ``index`` and ``src`` do not broadcast.
  3543. Note:
  3544. {forward_reproducibility_note}
  3545. .. note::
  3546. The backward pass is implemented only for ``src.shape == index.shape``.
  3547. Args:
  3548. dim (int): the axis along which to index
  3549. index (LongTensor): the indices of elements to scatter and add, can be
  3550. either empty or of the same dimensionality as ``src``. When empty, the
  3551. operation returns ``self`` unchanged.
  3552. src (Tensor): the source elements to scatter and add
  3553. Example::
  3554. >>> src = torch.ones((2, 5))
  3555. >>> index = torch.tensor([[0, 1, 2, 0, 0]])
  3556. >>> torch.zeros(3, 5, dtype=src.dtype).scatter_add_(0, index, src)
  3557. tensor([[1., 0., 0., 1., 1.],
  3558. [0., 1., 0., 0., 0.],
  3559. [0., 0., 1., 0., 0.]])
  3560. >>> index = torch.tensor([[0, 1, 2, 0, 0], [0, 1, 2, 2, 2]])
  3561. >>> torch.zeros(3, 5, dtype=src.dtype).scatter_add_(0, index, src)
  3562. tensor([[2., 0., 0., 1., 1.],
  3563. [0., 2., 0., 0., 0.],
  3564. [0., 0., 2., 1., 1.]])
  3565. """.format(
  3566. **reproducibility_notes
  3567. ),
  3568. )
  3569. add_docstr_all(
  3570. "scatter_reduce_",
  3571. r"""
  3572. scatter_reduce_(dim, index, src, reduce, *, include_self=True) -> Tensor
  3573. Reduces all values from the :attr:`src` tensor to the indices specified in
  3574. the :attr:`index` tensor in the :attr:`self` tensor using the applied reduction
  3575. defined via the :attr:`reduce` argument (:obj:`"sum"`, :obj:`"prod"`, :obj:`"mean"`,
  3576. :obj:`"amax"`, :obj:`"amin"`). For each value in :attr:`src`, it is reduced to an
  3577. index in :attr:`self` which is specified by its index in :attr:`src` for
  3578. ``dimension != dim`` and by the corresponding value in :attr:`index` for
  3579. ``dimension = dim``. If :obj:`include_self="True"`, the values in the :attr:`self`
  3580. tensor are included in the reduction.
  3581. :attr:`self`, :attr:`index` and :attr:`src` should all have
  3582. the same number of dimensions. It is also required that
  3583. ``index.size(d) <= src.size(d)`` for all dimensions ``d``, and that
  3584. ``index.size(d) <= self.size(d)`` for all dimensions ``d != dim``.
  3585. Note that ``index`` and ``src`` do not broadcast.
  3586. For a 3-D tensor with :obj:`reduce="sum"` and :obj:`include_self=True` the
  3587. output is given as::
  3588. self[index[i][j][k]][j][k] += src[i][j][k] # if dim == 0
  3589. self[i][index[i][j][k]][k] += src[i][j][k] # if dim == 1
  3590. self[i][j][index[i][j][k]] += src[i][j][k] # if dim == 2
  3591. Note:
  3592. {forward_reproducibility_note}
  3593. .. note::
  3594. The backward pass is implemented only for ``src.shape == index.shape``.
  3595. .. warning::
  3596. This function is in beta and may change in the near future.
  3597. Args:
  3598. dim (int): the axis along which to index
  3599. index (LongTensor): the indices of elements to scatter and reduce.
  3600. src (Tensor): the source elements to scatter and reduce
  3601. reduce (str): the reduction operation to apply for non-unique indices
  3602. (:obj:`"sum"`, :obj:`"prod"`, :obj:`"mean"`, :obj:`"amax"`, :obj:`"amin"`)
  3603. include_self (bool): whether elements from the :attr:`self` tensor are
  3604. included in the reduction
  3605. Example::
  3606. >>> src = torch.tensor([1., 2., 3., 4., 5., 6.])
  3607. >>> index = torch.tensor([0, 1, 0, 1, 2, 1])
  3608. >>> input = torch.tensor([1., 2., 3., 4.])
  3609. >>> input.scatter_reduce(0, index, src, reduce="sum")
  3610. tensor([5., 14., 8., 4.])
  3611. >>> input.scatter_reduce(0, index, src, reduce="sum", include_self=False)
  3612. tensor([4., 12., 5., 4.])
  3613. >>> input2 = torch.tensor([5., 4., 3., 2.])
  3614. >>> input2.scatter_reduce(0, index, src, reduce="amax")
  3615. tensor([5., 6., 5., 2.])
  3616. >>> input2.scatter_reduce(0, index, src, reduce="amax", include_self=False)
  3617. tensor([3., 6., 5., 2.])
  3618. """.format(
  3619. **reproducibility_notes
  3620. ),
  3621. )
  3622. add_docstr_all(
  3623. "select",
  3624. r"""
  3625. select(dim, index) -> Tensor
  3626. See :func:`torch.select`
  3627. """,
  3628. )
  3629. add_docstr_all(
  3630. "select_scatter",
  3631. r"""
  3632. select_scatter(src, dim, index) -> Tensor
  3633. See :func:`torch.select_scatter`
  3634. """,
  3635. )
  3636. add_docstr_all(
  3637. "slice_scatter",
  3638. r"""
  3639. slice_scatter(src, dim=0, start=None, end=None, step=1) -> Tensor
  3640. See :func:`torch.slice_scatter`
  3641. """,
  3642. )
  3643. add_docstr_all(
  3644. "set_",
  3645. r"""
  3646. set_(source=None, storage_offset=0, size=None, stride=None) -> Tensor
  3647. Sets the underlying storage, size, and strides. If :attr:`source` is a tensor,
  3648. :attr:`self` tensor will share the same storage and have the same size and
  3649. strides as :attr:`source`. Changes to elements in one tensor will be reflected
  3650. in the other.
  3651. If :attr:`source` is a :class:`~torch.Storage`, the method sets the underlying
  3652. storage, offset, size, and stride.
  3653. Args:
  3654. source (Tensor or Storage): the tensor or storage to use
  3655. storage_offset (int, optional): the offset in the storage
  3656. size (torch.Size, optional): the desired size. Defaults to the size of the source.
  3657. stride (tuple, optional): the desired stride. Defaults to C-contiguous strides.
  3658. """,
  3659. )
  3660. add_docstr_all(
  3661. "sigmoid",
  3662. r"""
  3663. sigmoid() -> Tensor
  3664. See :func:`torch.sigmoid`
  3665. """,
  3666. )
  3667. add_docstr_all(
  3668. "sigmoid_",
  3669. r"""
  3670. sigmoid_() -> Tensor
  3671. In-place version of :meth:`~Tensor.sigmoid`
  3672. """,
  3673. )
  3674. add_docstr_all(
  3675. "logit",
  3676. r"""
  3677. logit() -> Tensor
  3678. See :func:`torch.logit`
  3679. """,
  3680. )
  3681. add_docstr_all(
  3682. "logit_",
  3683. r"""
  3684. logit_() -> Tensor
  3685. In-place version of :meth:`~Tensor.logit`
  3686. """,
  3687. )
  3688. add_docstr_all(
  3689. "sign",
  3690. r"""
  3691. sign() -> Tensor
  3692. See :func:`torch.sign`
  3693. """,
  3694. )
  3695. add_docstr_all(
  3696. "sign_",
  3697. r"""
  3698. sign_() -> Tensor
  3699. In-place version of :meth:`~Tensor.sign`
  3700. """,
  3701. )
  3702. add_docstr_all(
  3703. "signbit",
  3704. r"""
  3705. signbit() -> Tensor
  3706. See :func:`torch.signbit`
  3707. """,
  3708. )
  3709. add_docstr_all(
  3710. "sgn",
  3711. r"""
  3712. sgn() -> Tensor
  3713. See :func:`torch.sgn`
  3714. """,
  3715. )
  3716. add_docstr_all(
  3717. "sgn_",
  3718. r"""
  3719. sgn_() -> Tensor
  3720. In-place version of :meth:`~Tensor.sgn`
  3721. """,
  3722. )
  3723. add_docstr_all(
  3724. "sin",
  3725. r"""
  3726. sin() -> Tensor
  3727. See :func:`torch.sin`
  3728. """,
  3729. )
  3730. add_docstr_all(
  3731. "sin_",
  3732. r"""
  3733. sin_() -> Tensor
  3734. In-place version of :meth:`~Tensor.sin`
  3735. """,
  3736. )
  3737. add_docstr_all(
  3738. "sinc",
  3739. r"""
  3740. sinc() -> Tensor
  3741. See :func:`torch.sinc`
  3742. """,
  3743. )
  3744. add_docstr_all(
  3745. "sinc_",
  3746. r"""
  3747. sinc_() -> Tensor
  3748. In-place version of :meth:`~Tensor.sinc`
  3749. """,
  3750. )
  3751. add_docstr_all(
  3752. "sinh",
  3753. r"""
  3754. sinh() -> Tensor
  3755. See :func:`torch.sinh`
  3756. """,
  3757. )
  3758. add_docstr_all(
  3759. "sinh_",
  3760. r"""
  3761. sinh_() -> Tensor
  3762. In-place version of :meth:`~Tensor.sinh`
  3763. """,
  3764. )
  3765. add_docstr_all(
  3766. "size",
  3767. r"""
  3768. size(dim=None) -> torch.Size or int
  3769. Returns the size of the :attr:`self` tensor. If ``dim`` is not specified,
  3770. the returned value is a :class:`torch.Size`, a subclass of :class:`tuple`.
  3771. If ``dim`` is specified, returns an int holding the size of that dimension.
  3772. Args:
  3773. dim (int, optional): The dimension for which to retrieve the size.
  3774. Example::
  3775. >>> t = torch.empty(3, 4, 5)
  3776. >>> t.size()
  3777. torch.Size([3, 4, 5])
  3778. >>> t.size(dim=1)
  3779. 4
  3780. """,
  3781. )
  3782. add_docstr_all(
  3783. "shape",
  3784. r"""
  3785. shape() -> torch.Size
  3786. Returns the size of the :attr:`self` tensor. Alias for :attr:`size`.
  3787. See also :meth:`Tensor.size`.
  3788. Example::
  3789. >>> t = torch.empty(3, 4, 5)
  3790. >>> t.size()
  3791. torch.Size([3, 4, 5])
  3792. >>> t.shape
  3793. torch.Size([3, 4, 5])
  3794. """,
  3795. )
  3796. add_docstr_all(
  3797. "sort",
  3798. r"""
  3799. sort(dim=-1, descending=False) -> (Tensor, LongTensor)
  3800. See :func:`torch.sort`
  3801. """,
  3802. )
  3803. add_docstr_all(
  3804. "msort",
  3805. r"""
  3806. msort() -> Tensor
  3807. See :func:`torch.msort`
  3808. """,
  3809. )
  3810. add_docstr_all(
  3811. "argsort",
  3812. r"""
  3813. argsort(dim=-1, descending=False) -> LongTensor
  3814. See :func:`torch.argsort`
  3815. """,
  3816. )
  3817. add_docstr_all(
  3818. "sparse_dim",
  3819. r"""
  3820. sparse_dim() -> int
  3821. Return the number of sparse dimensions in a :ref:`sparse tensor <sparse-docs>` :attr:`self`.
  3822. .. note::
  3823. Returns ``0`` if :attr:`self` is not a sparse tensor.
  3824. See also :meth:`Tensor.dense_dim` and :ref:`hybrid tensors <sparse-hybrid-coo-docs>`.
  3825. """,
  3826. )
  3827. add_docstr_all(
  3828. "sparse_resize_",
  3829. r"""
  3830. sparse_resize_(size, sparse_dim, dense_dim) -> Tensor
  3831. Resizes :attr:`self` :ref:`sparse tensor <sparse-docs>` to the desired
  3832. size and the number of sparse and dense dimensions.
  3833. .. note::
  3834. If the number of specified elements in :attr:`self` is zero, then
  3835. :attr:`size`, :attr:`sparse_dim`, and :attr:`dense_dim` can be any
  3836. size and positive integers such that ``len(size) == sparse_dim +
  3837. dense_dim``.
  3838. If :attr:`self` specifies one or more elements, however, then each
  3839. dimension in :attr:`size` must not be smaller than the corresponding
  3840. dimension of :attr:`self`, :attr:`sparse_dim` must equal the number
  3841. of sparse dimensions in :attr:`self`, and :attr:`dense_dim` must
  3842. equal the number of dense dimensions in :attr:`self`.
  3843. .. warning::
  3844. Throws an error if :attr:`self` is not a sparse tensor.
  3845. Args:
  3846. size (torch.Size): the desired size. If :attr:`self` is non-empty
  3847. sparse tensor, the desired size cannot be smaller than the
  3848. original size.
  3849. sparse_dim (int): the number of sparse dimensions
  3850. dense_dim (int): the number of dense dimensions
  3851. """,
  3852. )
  3853. add_docstr_all(
  3854. "sparse_resize_and_clear_",
  3855. r"""
  3856. sparse_resize_and_clear_(size, sparse_dim, dense_dim) -> Tensor
  3857. Removes all specified elements from a :ref:`sparse tensor
  3858. <sparse-docs>` :attr:`self` and resizes :attr:`self` to the desired
  3859. size and the number of sparse and dense dimensions.
  3860. .. warning:
  3861. Throws an error if :attr:`self` is not a sparse tensor.
  3862. Args:
  3863. size (torch.Size): the desired size.
  3864. sparse_dim (int): the number of sparse dimensions
  3865. dense_dim (int): the number of dense dimensions
  3866. """,
  3867. )
  3868. add_docstr_all(
  3869. "sqrt",
  3870. r"""
  3871. sqrt() -> Tensor
  3872. See :func:`torch.sqrt`
  3873. """,
  3874. )
  3875. add_docstr_all(
  3876. "sqrt_",
  3877. r"""
  3878. sqrt_() -> Tensor
  3879. In-place version of :meth:`~Tensor.sqrt`
  3880. """,
  3881. )
  3882. add_docstr_all(
  3883. "square",
  3884. r"""
  3885. square() -> Tensor
  3886. See :func:`torch.square`
  3887. """,
  3888. )
  3889. add_docstr_all(
  3890. "square_",
  3891. r"""
  3892. square_() -> Tensor
  3893. In-place version of :meth:`~Tensor.square`
  3894. """,
  3895. )
  3896. add_docstr_all(
  3897. "squeeze",
  3898. r"""
  3899. squeeze(dim=None) -> Tensor
  3900. See :func:`torch.squeeze`
  3901. """,
  3902. )
  3903. add_docstr_all(
  3904. "squeeze_",
  3905. r"""
  3906. squeeze_(dim=None) -> Tensor
  3907. In-place version of :meth:`~Tensor.squeeze`
  3908. """,
  3909. )
  3910. add_docstr_all(
  3911. "std",
  3912. r"""
  3913. std(dim=None, *, correction=1, keepdim=False) -> Tensor
  3914. See :func:`torch.std`
  3915. """,
  3916. )
  3917. add_docstr_all(
  3918. "storage_offset",
  3919. r"""
  3920. storage_offset() -> int
  3921. Returns :attr:`self` tensor's offset in the underlying storage in terms of
  3922. number of storage elements (not bytes).
  3923. Example::
  3924. >>> x = torch.tensor([1, 2, 3, 4, 5])
  3925. >>> x.storage_offset()
  3926. 0
  3927. >>> x[3:].storage_offset()
  3928. 3
  3929. """,
  3930. )
  3931. add_docstr_all(
  3932. "untyped_storage",
  3933. r"""
  3934. untyped_storage() -> torch.UntypedStorage
  3935. Returns the underlying :class:`UntypedStorage`.
  3936. """,
  3937. )
  3938. add_docstr_all(
  3939. "stride",
  3940. r"""
  3941. stride(dim) -> tuple or int
  3942. Returns the stride of :attr:`self` tensor.
  3943. Stride is the jump necessary to go from one element to the next one in the
  3944. specified dimension :attr:`dim`. A tuple of all strides is returned when no
  3945. argument is passed in. Otherwise, an integer value is returned as the stride in
  3946. the particular dimension :attr:`dim`.
  3947. Args:
  3948. dim (int, optional): the desired dimension in which stride is required
  3949. Example::
  3950. >>> x = torch.tensor([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])
  3951. >>> x.stride()
  3952. (5, 1)
  3953. >>> x.stride(0)
  3954. 5
  3955. >>> x.stride(-1)
  3956. 1
  3957. """,
  3958. )
  3959. add_docstr_all(
  3960. "sub",
  3961. r"""
  3962. sub(other, *, alpha=1) -> Tensor
  3963. See :func:`torch.sub`.
  3964. """,
  3965. )
  3966. add_docstr_all(
  3967. "sub_",
  3968. r"""
  3969. sub_(other, *, alpha=1) -> Tensor
  3970. In-place version of :meth:`~Tensor.sub`
  3971. """,
  3972. )
  3973. add_docstr_all(
  3974. "subtract",
  3975. r"""
  3976. subtract(other, *, alpha=1) -> Tensor
  3977. See :func:`torch.subtract`.
  3978. """,
  3979. )
  3980. add_docstr_all(
  3981. "subtract_",
  3982. r"""
  3983. subtract_(other, *, alpha=1) -> Tensor
  3984. In-place version of :meth:`~Tensor.subtract`.
  3985. """,
  3986. )
  3987. add_docstr_all(
  3988. "sum",
  3989. r"""
  3990. sum(dim=None, keepdim=False, dtype=None) -> Tensor
  3991. See :func:`torch.sum`
  3992. """,
  3993. )
  3994. add_docstr_all(
  3995. "nansum",
  3996. r"""
  3997. nansum(dim=None, keepdim=False, dtype=None) -> Tensor
  3998. See :func:`torch.nansum`
  3999. """,
  4000. )
  4001. add_docstr_all(
  4002. "svd",
  4003. r"""
  4004. svd(some=True, compute_uv=True) -> (Tensor, Tensor, Tensor)
  4005. See :func:`torch.svd`
  4006. """,
  4007. )
  4008. add_docstr_all(
  4009. "swapdims",
  4010. r"""
  4011. swapdims(dim0, dim1) -> Tensor
  4012. See :func:`torch.swapdims`
  4013. """,
  4014. )
  4015. add_docstr_all(
  4016. "swapdims_",
  4017. r"""
  4018. swapdims_(dim0, dim1) -> Tensor
  4019. In-place version of :meth:`~Tensor.swapdims`
  4020. """,
  4021. )
  4022. add_docstr_all(
  4023. "swapaxes",
  4024. r"""
  4025. swapaxes(axis0, axis1) -> Tensor
  4026. See :func:`torch.swapaxes`
  4027. """,
  4028. )
  4029. add_docstr_all(
  4030. "swapaxes_",
  4031. r"""
  4032. swapaxes_(axis0, axis1) -> Tensor
  4033. In-place version of :meth:`~Tensor.swapaxes`
  4034. """,
  4035. )
  4036. add_docstr_all(
  4037. "t",
  4038. r"""
  4039. t() -> Tensor
  4040. See :func:`torch.t`
  4041. """,
  4042. )
  4043. add_docstr_all(
  4044. "t_",
  4045. r"""
  4046. t_() -> Tensor
  4047. In-place version of :meth:`~Tensor.t`
  4048. """,
  4049. )
  4050. add_docstr_all(
  4051. "tile",
  4052. r"""
  4053. tile(dims) -> Tensor
  4054. See :func:`torch.tile`
  4055. """,
  4056. )
  4057. add_docstr_all(
  4058. "to",
  4059. r"""
  4060. to(*args, **kwargs) -> Tensor
  4061. Performs Tensor dtype and/or device conversion. A :class:`torch.dtype` and :class:`torch.device` are
  4062. inferred from the arguments of ``self.to(*args, **kwargs)``.
  4063. .. note::
  4064. If the ``self`` Tensor already
  4065. has the correct :class:`torch.dtype` and :class:`torch.device`, then ``self`` is returned.
  4066. Otherwise, the returned tensor is a copy of ``self`` with the desired
  4067. :class:`torch.dtype` and :class:`torch.device`.
  4068. Here are the ways to call ``to``:
  4069. .. method:: to(dtype, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor
  4070. :noindex:
  4071. Returns a Tensor with the specified :attr:`dtype`
  4072. Args:
  4073. {memory_format}
  4074. .. method:: to(device=None, dtype=None, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor
  4075. :noindex:
  4076. Returns a Tensor with the specified :attr:`device` and (optional)
  4077. :attr:`dtype`. If :attr:`dtype` is ``None`` it is inferred to be ``self.dtype``.
  4078. When :attr:`non_blocking`, tries to convert asynchronously with respect to
  4079. the host if possible, e.g., converting a CPU Tensor with pinned memory to a
  4080. CUDA Tensor.
  4081. When :attr:`copy` is set, a new Tensor is created even when the Tensor
  4082. already matches the desired conversion.
  4083. Args:
  4084. {memory_format}
  4085. .. method:: to(other, non_blocking=False, copy=False) -> Tensor
  4086. :noindex:
  4087. Returns a Tensor with same :class:`torch.dtype` and :class:`torch.device` as
  4088. the Tensor :attr:`other`. When :attr:`non_blocking`, tries to convert
  4089. asynchronously with respect to the host if possible, e.g., converting a CPU
  4090. Tensor with pinned memory to a CUDA Tensor.
  4091. When :attr:`copy` is set, a new Tensor is created even when the Tensor
  4092. already matches the desired conversion.
  4093. Example::
  4094. >>> tensor = torch.randn(2, 2) # Initially dtype=float32, device=cpu
  4095. >>> tensor.to(torch.float64)
  4096. tensor([[-0.5044, 0.0005],
  4097. [ 0.3310, -0.0584]], dtype=torch.float64)
  4098. >>> cuda0 = torch.device('cuda:0')
  4099. >>> tensor.to(cuda0)
  4100. tensor([[-0.5044, 0.0005],
  4101. [ 0.3310, -0.0584]], device='cuda:0')
  4102. >>> tensor.to(cuda0, dtype=torch.float64)
  4103. tensor([[-0.5044, 0.0005],
  4104. [ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0')
  4105. >>> other = torch.randn((), dtype=torch.float64, device=cuda0)
  4106. >>> tensor.to(other, non_blocking=True)
  4107. tensor([[-0.5044, 0.0005],
  4108. [ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0')
  4109. """.format(
  4110. **common_args
  4111. ),
  4112. )
  4113. add_docstr_all(
  4114. "byte",
  4115. r"""
  4116. byte(memory_format=torch.preserve_format) -> Tensor
  4117. ``self.byte()`` is equivalent to ``self.to(torch.uint8)``. See :func:`to`.
  4118. Args:
  4119. {memory_format}
  4120. """.format(
  4121. **common_args
  4122. ),
  4123. )
  4124. add_docstr_all(
  4125. "bool",
  4126. r"""
  4127. bool(memory_format=torch.preserve_format) -> Tensor
  4128. ``self.bool()`` is equivalent to ``self.to(torch.bool)``. See :func:`to`.
  4129. Args:
  4130. {memory_format}
  4131. """.format(
  4132. **common_args
  4133. ),
  4134. )
  4135. add_docstr_all(
  4136. "char",
  4137. r"""
  4138. char(memory_format=torch.preserve_format) -> Tensor
  4139. ``self.char()`` is equivalent to ``self.to(torch.int8)``. See :func:`to`.
  4140. Args:
  4141. {memory_format}
  4142. """.format(
  4143. **common_args
  4144. ),
  4145. )
  4146. add_docstr_all(
  4147. "bfloat16",
  4148. r"""
  4149. bfloat16(memory_format=torch.preserve_format) -> Tensor
  4150. ``self.bfloat16()`` is equivalent to ``self.to(torch.bfloat16)``. See :func:`to`.
  4151. Args:
  4152. {memory_format}
  4153. """.format(
  4154. **common_args
  4155. ),
  4156. )
  4157. add_docstr_all(
  4158. "double",
  4159. r"""
  4160. double(memory_format=torch.preserve_format) -> Tensor
  4161. ``self.double()`` is equivalent to ``self.to(torch.float64)``. See :func:`to`.
  4162. Args:
  4163. {memory_format}
  4164. """.format(
  4165. **common_args
  4166. ),
  4167. )
  4168. add_docstr_all(
  4169. "float",
  4170. r"""
  4171. float(memory_format=torch.preserve_format) -> Tensor
  4172. ``self.float()`` is equivalent to ``self.to(torch.float32)``. See :func:`to`.
  4173. Args:
  4174. {memory_format}
  4175. """.format(
  4176. **common_args
  4177. ),
  4178. )
  4179. add_docstr_all(
  4180. "cdouble",
  4181. r"""
  4182. cdouble(memory_format=torch.preserve_format) -> Tensor
  4183. ``self.cdouble()`` is equivalent to ``self.to(torch.complex128)``. See :func:`to`.
  4184. Args:
  4185. {memory_format}
  4186. """.format(
  4187. **common_args
  4188. ),
  4189. )
  4190. add_docstr_all(
  4191. "cfloat",
  4192. r"""
  4193. cfloat(memory_format=torch.preserve_format) -> Tensor
  4194. ``self.cfloat()`` is equivalent to ``self.to(torch.complex64)``. See :func:`to`.
  4195. Args:
  4196. {memory_format}
  4197. """.format(
  4198. **common_args
  4199. ),
  4200. )
  4201. add_docstr_all(
  4202. "chalf",
  4203. r"""
  4204. chalf(memory_format=torch.preserve_format) -> Tensor
  4205. ``self.chalf()`` is equivalent to ``self.to(torch.complex32)``. See :func:`to`.
  4206. Args:
  4207. {memory_format}
  4208. """.format(
  4209. **common_args
  4210. ),
  4211. )
  4212. add_docstr_all(
  4213. "half",
  4214. r"""
  4215. half(memory_format=torch.preserve_format) -> Tensor
  4216. ``self.half()`` is equivalent to ``self.to(torch.float16)``. See :func:`to`.
  4217. Args:
  4218. {memory_format}
  4219. """.format(
  4220. **common_args
  4221. ),
  4222. )
  4223. add_docstr_all(
  4224. "int",
  4225. r"""
  4226. int(memory_format=torch.preserve_format) -> Tensor
  4227. ``self.int()`` is equivalent to ``self.to(torch.int32)``. See :func:`to`.
  4228. Args:
  4229. {memory_format}
  4230. """.format(
  4231. **common_args
  4232. ),
  4233. )
  4234. add_docstr_all(
  4235. "int_repr",
  4236. r"""
  4237. int_repr() -> Tensor
  4238. Given a quantized Tensor,
  4239. ``self.int_repr()`` returns a CPU Tensor with uint8_t as data type that stores the
  4240. underlying uint8_t values of the given Tensor.
  4241. """,
  4242. )
  4243. add_docstr_all(
  4244. "long",
  4245. r"""
  4246. long(memory_format=torch.preserve_format) -> Tensor
  4247. ``self.long()`` is equivalent to ``self.to(torch.int64)``. See :func:`to`.
  4248. Args:
  4249. {memory_format}
  4250. """.format(
  4251. **common_args
  4252. ),
  4253. )
  4254. add_docstr_all(
  4255. "short",
  4256. r"""
  4257. short(memory_format=torch.preserve_format) -> Tensor
  4258. ``self.short()`` is equivalent to ``self.to(torch.int16)``. See :func:`to`.
  4259. Args:
  4260. {memory_format}
  4261. """.format(
  4262. **common_args
  4263. ),
  4264. )
  4265. add_docstr_all(
  4266. "take",
  4267. r"""
  4268. take(indices) -> Tensor
  4269. See :func:`torch.take`
  4270. """,
  4271. )
  4272. add_docstr_all(
  4273. "take_along_dim",
  4274. r"""
  4275. take_along_dim(indices, dim) -> Tensor
  4276. See :func:`torch.take_along_dim`
  4277. """,
  4278. )
  4279. add_docstr_all(
  4280. "tan",
  4281. r"""
  4282. tan() -> Tensor
  4283. See :func:`torch.tan`
  4284. """,
  4285. )
  4286. add_docstr_all(
  4287. "tan_",
  4288. r"""
  4289. tan_() -> Tensor
  4290. In-place version of :meth:`~Tensor.tan`
  4291. """,
  4292. )
  4293. add_docstr_all(
  4294. "tanh",
  4295. r"""
  4296. tanh() -> Tensor
  4297. See :func:`torch.tanh`
  4298. """,
  4299. )
  4300. add_docstr_all(
  4301. "softmax",
  4302. r"""
  4303. softmax(dim) -> Tensor
  4304. Alias for :func:`torch.nn.functional.softmax`.
  4305. """,
  4306. )
  4307. add_docstr_all(
  4308. "tanh_",
  4309. r"""
  4310. tanh_() -> Tensor
  4311. In-place version of :meth:`~Tensor.tanh`
  4312. """,
  4313. )
  4314. add_docstr_all(
  4315. "tolist",
  4316. r"""
  4317. tolist() -> list or number
  4318. Returns the tensor as a (nested) list. For scalars, a standard
  4319. Python number is returned, just like with :meth:`~Tensor.item`.
  4320. Tensors are automatically moved to the CPU first if necessary.
  4321. This operation is not differentiable.
  4322. Examples::
  4323. >>> a = torch.randn(2, 2)
  4324. >>> a.tolist()
  4325. [[0.012766935862600803, 0.5415473580360413],
  4326. [-0.08909505605697632, 0.7729271650314331]]
  4327. >>> a[0,0].tolist()
  4328. 0.012766935862600803
  4329. """,
  4330. )
  4331. add_docstr_all(
  4332. "topk",
  4333. r"""
  4334. topk(k, dim=None, largest=True, sorted=True) -> (Tensor, LongTensor)
  4335. See :func:`torch.topk`
  4336. """,
  4337. )
  4338. add_docstr_all(
  4339. "to_dense",
  4340. r"""
  4341. to_dense(dtype=None, *, masked_grad=True) -> Tensor
  4342. Creates a strided copy of :attr:`self` if :attr:`self` is not a strided tensor, otherwise returns :attr:`self`.
  4343. Keyword args:
  4344. {dtype}
  4345. masked_grad (bool, optional): If set to ``True`` (default) and
  4346. :attr:`self` has a sparse layout then the backward of
  4347. :meth:`to_dense` returns ``grad.sparse_mask(self)``.
  4348. Example::
  4349. >>> s = torch.sparse_coo_tensor(
  4350. ... torch.tensor([[1, 1],
  4351. ... [0, 2]]),
  4352. ... torch.tensor([9, 10]),
  4353. ... size=(3, 3))
  4354. >>> s.to_dense()
  4355. tensor([[ 0, 0, 0],
  4356. [ 9, 0, 10],
  4357. [ 0, 0, 0]])
  4358. """,
  4359. )
  4360. add_docstr_all(
  4361. "to_sparse",
  4362. r"""
  4363. to_sparse(sparseDims) -> Tensor
  4364. Returns a sparse copy of the tensor. PyTorch supports sparse tensors in
  4365. :ref:`coordinate format <sparse-coo-docs>`.
  4366. Args:
  4367. sparseDims (int, optional): the number of sparse dimensions to include in the new sparse tensor
  4368. Example::
  4369. >>> d = torch.tensor([[0, 0, 0], [9, 0, 10], [0, 0, 0]])
  4370. >>> d
  4371. tensor([[ 0, 0, 0],
  4372. [ 9, 0, 10],
  4373. [ 0, 0, 0]])
  4374. >>> d.to_sparse()
  4375. tensor(indices=tensor([[1, 1],
  4376. [0, 2]]),
  4377. values=tensor([ 9, 10]),
  4378. size=(3, 3), nnz=2, layout=torch.sparse_coo)
  4379. >>> d.to_sparse(1)
  4380. tensor(indices=tensor([[1]]),
  4381. values=tensor([[ 9, 0, 10]]),
  4382. size=(3, 3), nnz=1, layout=torch.sparse_coo)
  4383. .. method:: to_sparse(*, layout=None, blocksize=None, dense_dim=None) -> Tensor
  4384. :noindex:
  4385. Returns a sparse tensor with the specified layout and blocksize. If
  4386. the :attr:`self` is strided, the number of dense dimensions could be
  4387. specified, and a hybrid sparse tensor will be created, with
  4388. `dense_dim` dense dimensions and `self.dim() - 2 - dense_dim` batch
  4389. dimension.
  4390. .. note:: If the :attr:`self` layout and blocksize parameters match
  4391. with the specified layout and blocksize, return
  4392. :attr:`self`. Otherwise, return a sparse tensor copy of
  4393. :attr:`self`.
  4394. Args:
  4395. layout (:class:`torch.layout`, optional): The desired sparse
  4396. layout. One of ``torch.sparse_coo``, ``torch.sparse_csr``,
  4397. ``torch.sparse_csc``, ``torch.sparse_bsr``, or
  4398. ``torch.sparse_bsc``. Default: if ``None``,
  4399. ``torch.sparse_coo``.
  4400. blocksize (list, tuple, :class:`torch.Size`, optional): Block size
  4401. of the resulting BSR or BSC tensor. For other layouts,
  4402. specifying the block size that is not ``None`` will result in a
  4403. RuntimeError exception. A block size must be a tuple of length
  4404. two such that its items evenly divide the two sparse dimensions.
  4405. dense_dim (int, optional): Number of dense dimensions of the
  4406. resulting CSR, CSC, BSR or BSC tensor. This argument should be
  4407. used only if :attr:`self` is a strided tensor, and must be a
  4408. value between 0 and dimension of :attr:`self` tensor minus two.
  4409. Example::
  4410. >>> x = torch.tensor([[1, 0], [0, 0], [2, 3]])
  4411. >>> x.to_sparse(layout=torch.sparse_coo)
  4412. tensor(indices=tensor([[0, 2, 2],
  4413. [0, 0, 1]]),
  4414. values=tensor([1, 2, 3]),
  4415. size=(3, 2), nnz=3, layout=torch.sparse_coo)
  4416. >>> x.to_sparse(layout=torch.sparse_bsr, blocksize=(1, 2))
  4417. tensor(crow_indices=tensor([0, 1, 1, 2]),
  4418. col_indices=tensor([0, 0]),
  4419. values=tensor([[[1, 0]],
  4420. [[2, 3]]]), size=(3, 2), nnz=2, layout=torch.sparse_bsr)
  4421. >>> x.to_sparse(layout=torch.sparse_bsr, blocksize=(2, 1))
  4422. RuntimeError: Tensor size(-2) 3 needs to be divisible by blocksize[0] 2
  4423. >>> x.to_sparse(layout=torch.sparse_csr, blocksize=(3, 1))
  4424. RuntimeError: to_sparse for Strided to SparseCsr conversion does not use specified blocksize
  4425. >>> x = torch.tensor([[[1], [0]], [[0], [0]], [[2], [3]]])
  4426. >>> x.to_sparse(layout=torch.sparse_csr, dense_dim=1)
  4427. tensor(crow_indices=tensor([0, 1, 1, 3]),
  4428. col_indices=tensor([0, 0, 1]),
  4429. values=tensor([[1],
  4430. [2],
  4431. [3]]), size=(3, 2, 1), nnz=3, layout=torch.sparse_csr)
  4432. """,
  4433. )
  4434. add_docstr_all(
  4435. "to_sparse_csr",
  4436. r"""
  4437. to_sparse_csr(dense_dim=None) -> Tensor
  4438. Convert a tensor to compressed row storage format (CSR). Except for
  4439. strided tensors, only works with 2D tensors. If the :attr:`self` is
  4440. strided, then the number of dense dimensions could be specified, and a
  4441. hybrid CSR tensor will be created, with `dense_dim` dense dimensions
  4442. and `self.dim() - 2 - dense_dim` batch dimension.
  4443. Args:
  4444. dense_dim (int, optional): Number of dense dimensions of the
  4445. resulting CSR tensor. This argument should be used only if
  4446. :attr:`self` is a strided tensor, and must be a value between 0
  4447. and dimension of :attr:`self` tensor minus two.
  4448. Example::
  4449. >>> dense = torch.randn(5, 5)
  4450. >>> sparse = dense.to_sparse_csr()
  4451. >>> sparse._nnz()
  4452. 25
  4453. >>> dense = torch.zeros(3, 3, 1, 1)
  4454. >>> dense[0, 0] = dense[1, 2] = dense[2, 1] = 1
  4455. >>> dense.to_sparse_csr(dense_dim=2)
  4456. tensor(crow_indices=tensor([0, 1, 2, 3]),
  4457. col_indices=tensor([0, 2, 1]),
  4458. values=tensor([[[1.]],
  4459. [[1.]],
  4460. [[1.]]]), size=(3, 3, 1, 1), nnz=3,
  4461. layout=torch.sparse_csr)
  4462. """,
  4463. )
  4464. add_docstr_all(
  4465. "to_sparse_csc",
  4466. r"""
  4467. to_sparse_csc() -> Tensor
  4468. Convert a tensor to compressed column storage (CSC) format. Except
  4469. for strided tensors, only works with 2D tensors. If the :attr:`self`
  4470. is strided, then the number of dense dimensions could be specified,
  4471. and a hybrid CSC tensor will be created, with `dense_dim` dense
  4472. dimensions and `self.dim() - 2 - dense_dim` batch dimension.
  4473. Args:
  4474. dense_dim (int, optional): Number of dense dimensions of the
  4475. resulting CSC tensor. This argument should be used only if
  4476. :attr:`self` is a strided tensor, and must be a value between 0
  4477. and dimension of :attr:`self` tensor minus two.
  4478. Example::
  4479. >>> dense = torch.randn(5, 5)
  4480. >>> sparse = dense.to_sparse_csc()
  4481. >>> sparse._nnz()
  4482. 25
  4483. >>> dense = torch.zeros(3, 3, 1, 1)
  4484. >>> dense[0, 0] = dense[1, 2] = dense[2, 1] = 1
  4485. >>> dense.to_sparse_csc(dense_dim=2)
  4486. tensor(ccol_indices=tensor([0, 1, 2, 3]),
  4487. row_indices=tensor([0, 2, 1]),
  4488. values=tensor([[[1.]],
  4489. [[1.]],
  4490. [[1.]]]), size=(3, 3, 1, 1), nnz=3,
  4491. layout=torch.sparse_csc)
  4492. """,
  4493. )
  4494. add_docstr_all(
  4495. "to_sparse_bsr",
  4496. r"""
  4497. to_sparse_bsr(blocksize, dense_dim) -> Tensor
  4498. Convert a tensor to a block sparse row (BSR) storage format of given
  4499. blocksize. If the :attr:`self` is strided, then the number of dense
  4500. dimensions could be specified, and a hybrid BSR tensor will be
  4501. created, with `dense_dim` dense dimensions and `self.dim() - 2 -
  4502. dense_dim` batch dimension.
  4503. Args:
  4504. blocksize (list, tuple, :class:`torch.Size`, optional): Block size
  4505. of the resulting BSR tensor. A block size must be a tuple of
  4506. length two such that its items evenly divide the two sparse
  4507. dimensions.
  4508. dense_dim (int, optional): Number of dense dimensions of the
  4509. resulting BSR tensor. This argument should be used only if
  4510. :attr:`self` is a strided tensor, and must be a value between 0
  4511. and dimension of :attr:`self` tensor minus two.
  4512. Example::
  4513. >>> dense = torch.randn(10, 10)
  4514. >>> sparse = dense.to_sparse_csr()
  4515. >>> sparse_bsr = sparse.to_sparse_bsr((5, 5))
  4516. >>> sparse_bsr.col_indices()
  4517. tensor([0, 1, 0, 1])
  4518. >>> dense = torch.zeros(4, 3, 1)
  4519. >>> dense[0:2, 0] = dense[0:2, 2] = dense[2:4, 1] = 1
  4520. >>> dense.to_sparse_bsr((2, 1), 1)
  4521. tensor(crow_indices=tensor([0, 2, 3]),
  4522. col_indices=tensor([0, 2, 1]),
  4523. values=tensor([[[[1.]],
  4524. [[1.]]],
  4525. [[[1.]],
  4526. [[1.]]],
  4527. [[[1.]],
  4528. [[1.]]]]), size=(4, 3, 1), nnz=3,
  4529. layout=torch.sparse_bsr)
  4530. """,
  4531. )
  4532. add_docstr_all(
  4533. "to_sparse_bsc",
  4534. r"""
  4535. to_sparse_bsc(blocksize, dense_dim) -> Tensor
  4536. Convert a tensor to a block sparse column (BSC) storage format of
  4537. given blocksize. If the :attr:`self` is strided, then the number of
  4538. dense dimensions could be specified, and a hybrid BSC tensor will be
  4539. created, with `dense_dim` dense dimensions and `self.dim() - 2 -
  4540. dense_dim` batch dimension.
  4541. Args:
  4542. blocksize (list, tuple, :class:`torch.Size`, optional): Block size
  4543. of the resulting BSC tensor. A block size must be a tuple of
  4544. length two such that its items evenly divide the two sparse
  4545. dimensions.
  4546. dense_dim (int, optional): Number of dense dimensions of the
  4547. resulting BSC tensor. This argument should be used only if
  4548. :attr:`self` is a strided tensor, and must be a value between 0
  4549. and dimension of :attr:`self` tensor minus two.
  4550. Example::
  4551. >>> dense = torch.randn(10, 10)
  4552. >>> sparse = dense.to_sparse_csr()
  4553. >>> sparse_bsc = sparse.to_sparse_bsc((5, 5))
  4554. >>> sparse_bsc.row_indices()
  4555. tensor([0, 1, 0, 1])
  4556. >>> dense = torch.zeros(4, 3, 1)
  4557. >>> dense[0:2, 0] = dense[0:2, 2] = dense[2:4, 1] = 1
  4558. >>> dense.to_sparse_bsc((2, 1), 1)
  4559. tensor(ccol_indices=tensor([0, 1, 2, 3]),
  4560. row_indices=tensor([0, 1, 0]),
  4561. values=tensor([[[[1.]],
  4562. [[1.]]],
  4563. [[[1.]],
  4564. [[1.]]],
  4565. [[[1.]],
  4566. [[1.]]]]), size=(4, 3, 1), nnz=3,
  4567. layout=torch.sparse_bsc)
  4568. """,
  4569. )
  4570. add_docstr_all(
  4571. "to_mkldnn",
  4572. r"""
  4573. to_mkldnn() -> Tensor
  4574. Returns a copy of the tensor in ``torch.mkldnn`` layout.
  4575. """,
  4576. )
  4577. add_docstr_all(
  4578. "trace",
  4579. r"""
  4580. trace() -> Tensor
  4581. See :func:`torch.trace`
  4582. """,
  4583. )
  4584. add_docstr_all(
  4585. "transpose",
  4586. r"""
  4587. transpose(dim0, dim1) -> Tensor
  4588. See :func:`torch.transpose`
  4589. """,
  4590. )
  4591. add_docstr_all(
  4592. "transpose_",
  4593. r"""
  4594. transpose_(dim0, dim1) -> Tensor
  4595. In-place version of :meth:`~Tensor.transpose`
  4596. """,
  4597. )
  4598. add_docstr_all(
  4599. "triangular_solve",
  4600. r"""
  4601. triangular_solve(A, upper=True, transpose=False, unitriangular=False) -> (Tensor, Tensor)
  4602. See :func:`torch.triangular_solve`
  4603. """,
  4604. )
  4605. add_docstr_all(
  4606. "tril",
  4607. r"""
  4608. tril(diagonal=0) -> Tensor
  4609. See :func:`torch.tril`
  4610. """,
  4611. )
  4612. add_docstr_all(
  4613. "tril_",
  4614. r"""
  4615. tril_(diagonal=0) -> Tensor
  4616. In-place version of :meth:`~Tensor.tril`
  4617. """,
  4618. )
  4619. add_docstr_all(
  4620. "triu",
  4621. r"""
  4622. triu(diagonal=0) -> Tensor
  4623. See :func:`torch.triu`
  4624. """,
  4625. )
  4626. add_docstr_all(
  4627. "triu_",
  4628. r"""
  4629. triu_(diagonal=0) -> Tensor
  4630. In-place version of :meth:`~Tensor.triu`
  4631. """,
  4632. )
  4633. add_docstr_all(
  4634. "true_divide",
  4635. r"""
  4636. true_divide(value) -> Tensor
  4637. See :func:`torch.true_divide`
  4638. """,
  4639. )
  4640. add_docstr_all(
  4641. "true_divide_",
  4642. r"""
  4643. true_divide_(value) -> Tensor
  4644. In-place version of :meth:`~Tensor.true_divide_`
  4645. """,
  4646. )
  4647. add_docstr_all(
  4648. "trunc",
  4649. r"""
  4650. trunc() -> Tensor
  4651. See :func:`torch.trunc`
  4652. """,
  4653. )
  4654. add_docstr_all(
  4655. "fix",
  4656. r"""
  4657. fix() -> Tensor
  4658. See :func:`torch.fix`.
  4659. """,
  4660. )
  4661. add_docstr_all(
  4662. "trunc_",
  4663. r"""
  4664. trunc_() -> Tensor
  4665. In-place version of :meth:`~Tensor.trunc`
  4666. """,
  4667. )
  4668. add_docstr_all(
  4669. "fix_",
  4670. r"""
  4671. fix_() -> Tensor
  4672. In-place version of :meth:`~Tensor.fix`
  4673. """,
  4674. )
  4675. add_docstr_all(
  4676. "type",
  4677. r"""
  4678. type(dtype=None, non_blocking=False, **kwargs) -> str or Tensor
  4679. Returns the type if `dtype` is not provided, else casts this object to
  4680. the specified type.
  4681. If this is already of the correct type, no copy is performed and the
  4682. original object is returned.
  4683. Args:
  4684. dtype (dtype or string): The desired type
  4685. non_blocking (bool): If ``True``, and the source is in pinned memory
  4686. and destination is on the GPU or vice versa, the copy is performed
  4687. asynchronously with respect to the host. Otherwise, the argument
  4688. has no effect.
  4689. **kwargs: For compatibility, may contain the key ``async`` in place of
  4690. the ``non_blocking`` argument. The ``async`` arg is deprecated.
  4691. """,
  4692. )
  4693. add_docstr_all(
  4694. "type_as",
  4695. r"""
  4696. type_as(tensor) -> Tensor
  4697. Returns this tensor cast to the type of the given tensor.
  4698. This is a no-op if the tensor is already of the correct type. This is
  4699. equivalent to ``self.type(tensor.type())``
  4700. Args:
  4701. tensor (Tensor): the tensor which has the desired type
  4702. """,
  4703. )
  4704. add_docstr_all(
  4705. "unfold",
  4706. r"""
  4707. unfold(dimension, size, step) -> Tensor
  4708. Returns a view of the original tensor which contains all slices of size :attr:`size` from
  4709. :attr:`self` tensor in the dimension :attr:`dimension`.
  4710. Step between two slices is given by :attr:`step`.
  4711. If `sizedim` is the size of dimension :attr:`dimension` for :attr:`self`, the size of
  4712. dimension :attr:`dimension` in the returned tensor will be
  4713. `(sizedim - size) / step + 1`.
  4714. An additional dimension of size :attr:`size` is appended in the returned tensor.
  4715. Args:
  4716. dimension (int): dimension in which unfolding happens
  4717. size (int): the size of each slice that is unfolded
  4718. step (int): the step between each slice
  4719. Example::
  4720. >>> x = torch.arange(1., 8)
  4721. >>> x
  4722. tensor([ 1., 2., 3., 4., 5., 6., 7.])
  4723. >>> x.unfold(0, 2, 1)
  4724. tensor([[ 1., 2.],
  4725. [ 2., 3.],
  4726. [ 3., 4.],
  4727. [ 4., 5.],
  4728. [ 5., 6.],
  4729. [ 6., 7.]])
  4730. >>> x.unfold(0, 2, 2)
  4731. tensor([[ 1., 2.],
  4732. [ 3., 4.],
  4733. [ 5., 6.]])
  4734. """,
  4735. )
  4736. add_docstr_all(
  4737. "uniform_",
  4738. r"""
  4739. uniform_(from=0, to=1, *, generator=None) -> Tensor
  4740. Fills :attr:`self` tensor with numbers sampled from the continuous uniform
  4741. distribution:
  4742. .. math::
  4743. f(x) = \dfrac{1}{\text{to} - \text{from}}
  4744. """,
  4745. )
  4746. add_docstr_all(
  4747. "unsqueeze",
  4748. r"""
  4749. unsqueeze(dim) -> Tensor
  4750. See :func:`torch.unsqueeze`
  4751. """,
  4752. )
  4753. add_docstr_all(
  4754. "unsqueeze_",
  4755. r"""
  4756. unsqueeze_(dim) -> Tensor
  4757. In-place version of :meth:`~Tensor.unsqueeze`
  4758. """,
  4759. )
  4760. add_docstr_all(
  4761. "var",
  4762. r"""
  4763. var(dim=None, *, correction=1, keepdim=False) -> Tensor
  4764. See :func:`torch.var`
  4765. """,
  4766. )
  4767. add_docstr_all(
  4768. "vdot",
  4769. r"""
  4770. vdot(other) -> Tensor
  4771. See :func:`torch.vdot`
  4772. """,
  4773. )
  4774. add_docstr_all(
  4775. "view",
  4776. r"""
  4777. view(*shape) -> Tensor
  4778. Returns a new tensor with the same data as the :attr:`self` tensor but of a
  4779. different :attr:`shape`.
  4780. The returned tensor shares the same data and must have the same number
  4781. of elements, but may have a different size. For a tensor to be viewed, the new
  4782. view size must be compatible with its original size and stride, i.e., each new
  4783. view dimension must either be a subspace of an original dimension, or only span
  4784. across original dimensions :math:`d, d+1, \dots, d+k` that satisfy the following
  4785. contiguity-like condition that :math:`\forall i = d, \dots, d+k-1`,
  4786. .. math::
  4787. \text{stride}[i] = \text{stride}[i+1] \times \text{size}[i+1]
  4788. Otherwise, it will not be possible to view :attr:`self` tensor as :attr:`shape`
  4789. without copying it (e.g., via :meth:`contiguous`). When it is unclear whether a
  4790. :meth:`view` can be performed, it is advisable to use :meth:`reshape`, which
  4791. returns a view if the shapes are compatible, and copies (equivalent to calling
  4792. :meth:`contiguous`) otherwise.
  4793. Args:
  4794. shape (torch.Size or int...): the desired size
  4795. Example::
  4796. >>> x = torch.randn(4, 4)
  4797. >>> x.size()
  4798. torch.Size([4, 4])
  4799. >>> y = x.view(16)
  4800. >>> y.size()
  4801. torch.Size([16])
  4802. >>> z = x.view(-1, 8) # the size -1 is inferred from other dimensions
  4803. >>> z.size()
  4804. torch.Size([2, 8])
  4805. >>> a = torch.randn(1, 2, 3, 4)
  4806. >>> a.size()
  4807. torch.Size([1, 2, 3, 4])
  4808. >>> b = a.transpose(1, 2) # Swaps 2nd and 3rd dimension
  4809. >>> b.size()
  4810. torch.Size([1, 3, 2, 4])
  4811. >>> c = a.view(1, 3, 2, 4) # Does not change tensor layout in memory
  4812. >>> c.size()
  4813. torch.Size([1, 3, 2, 4])
  4814. >>> torch.equal(b, c)
  4815. False
  4816. .. method:: view(dtype) -> Tensor
  4817. :noindex:
  4818. Returns a new tensor with the same data as the :attr:`self` tensor but of a
  4819. different :attr:`dtype`.
  4820. If the element size of :attr:`dtype` is different than that of ``self.dtype``,
  4821. then the size of the last dimension of the output will be scaled
  4822. proportionally. For instance, if :attr:`dtype` element size is twice that of
  4823. ``self.dtype``, then each pair of elements in the last dimension of
  4824. :attr:`self` will be combined, and the size of the last dimension of the output
  4825. will be half that of :attr:`self`. If :attr:`dtype` element size is half that
  4826. of ``self.dtype``, then each element in the last dimension of :attr:`self` will
  4827. be split in two, and the size of the last dimension of the output will be
  4828. double that of :attr:`self`. For this to be possible, the following conditions
  4829. must be true:
  4830. * ``self.dim()`` must be greater than 0.
  4831. * ``self.stride(-1)`` must be 1.
  4832. Additionally, if the element size of :attr:`dtype` is greater than that of
  4833. ``self.dtype``, the following conditions must be true as well:
  4834. * ``self.size(-1)`` must be divisible by the ratio between the element
  4835. sizes of the dtypes.
  4836. * ``self.storage_offset()`` must be divisible by the ratio between the
  4837. element sizes of the dtypes.
  4838. * The strides of all dimensions, except the last dimension, must be
  4839. divisible by the ratio between the element sizes of the dtypes.
  4840. If any of the above conditions are not met, an error is thrown.
  4841. .. warning::
  4842. This overload is not supported by TorchScript, and using it in a Torchscript
  4843. program will cause undefined behavior.
  4844. Args:
  4845. dtype (:class:`torch.dtype`): the desired dtype
  4846. Example::
  4847. >>> x = torch.randn(4, 4)
  4848. >>> x
  4849. tensor([[ 0.9482, -0.0310, 1.4999, -0.5316],
  4850. [-0.1520, 0.7472, 0.5617, -0.8649],
  4851. [-2.4724, -0.0334, -0.2976, -0.8499],
  4852. [-0.2109, 1.9913, -0.9607, -0.6123]])
  4853. >>> x.dtype
  4854. torch.float32
  4855. >>> y = x.view(torch.int32)
  4856. >>> y
  4857. tensor([[ 1064483442, -1124191867, 1069546515, -1089989247],
  4858. [-1105482831, 1061112040, 1057999968, -1084397505],
  4859. [-1071760287, -1123489973, -1097310419, -1084649136],
  4860. [-1101533110, 1073668768, -1082790149, -1088634448]],
  4861. dtype=torch.int32)
  4862. >>> y[0, 0] = 1000000000
  4863. >>> x
  4864. tensor([[ 0.0047, -0.0310, 1.4999, -0.5316],
  4865. [-0.1520, 0.7472, 0.5617, -0.8649],
  4866. [-2.4724, -0.0334, -0.2976, -0.8499],
  4867. [-0.2109, 1.9913, -0.9607, -0.6123]])
  4868. >>> x.view(torch.cfloat)
  4869. tensor([[ 0.0047-0.0310j, 1.4999-0.5316j],
  4870. [-0.1520+0.7472j, 0.5617-0.8649j],
  4871. [-2.4724-0.0334j, -0.2976-0.8499j],
  4872. [-0.2109+1.9913j, -0.9607-0.6123j]])
  4873. >>> x.view(torch.cfloat).size()
  4874. torch.Size([4, 2])
  4875. >>> x.view(torch.uint8)
  4876. tensor([[ 0, 202, 154, 59, 182, 243, 253, 188, 185, 252, 191, 63, 240, 22,
  4877. 8, 191],
  4878. [227, 165, 27, 190, 128, 72, 63, 63, 146, 203, 15, 63, 22, 106,
  4879. 93, 191],
  4880. [205, 59, 30, 192, 112, 206, 8, 189, 7, 95, 152, 190, 12, 147,
  4881. 89, 191],
  4882. [ 43, 246, 87, 190, 235, 226, 254, 63, 111, 240, 117, 191, 177, 191,
  4883. 28, 191]], dtype=torch.uint8)
  4884. >>> x.view(torch.uint8).size()
  4885. torch.Size([4, 16])
  4886. """,
  4887. )
  4888. add_docstr_all(
  4889. "view_as",
  4890. r"""
  4891. view_as(other) -> Tensor
  4892. View this tensor as the same size as :attr:`other`.
  4893. ``self.view_as(other)`` is equivalent to ``self.view(other.size())``.
  4894. Please see :meth:`~Tensor.view` for more information about ``view``.
  4895. Args:
  4896. other (:class:`torch.Tensor`): The result tensor has the same size
  4897. as :attr:`other`.
  4898. """,
  4899. )
  4900. add_docstr_all(
  4901. "expand",
  4902. r"""
  4903. expand(*sizes) -> Tensor
  4904. Returns a new view of the :attr:`self` tensor with singleton dimensions expanded
  4905. to a larger size.
  4906. Passing -1 as the size for a dimension means not changing the size of
  4907. that dimension.
  4908. Tensor can be also expanded to a larger number of dimensions, and the
  4909. new ones will be appended at the front. For the new dimensions, the
  4910. size cannot be set to -1.
  4911. Expanding a tensor does not allocate new memory, but only creates a
  4912. new view on the existing tensor where a dimension of size one is
  4913. expanded to a larger size by setting the ``stride`` to 0. Any dimension
  4914. of size 1 can be expanded to an arbitrary value without allocating new
  4915. memory.
  4916. Args:
  4917. *sizes (torch.Size or int...): the desired expanded size
  4918. .. warning::
  4919. More than one element of an expanded tensor may refer to a single
  4920. memory location. As a result, in-place operations (especially ones that
  4921. are vectorized) may result in incorrect behavior. If you need to write
  4922. to the tensors, please clone them first.
  4923. Example::
  4924. >>> x = torch.tensor([[1], [2], [3]])
  4925. >>> x.size()
  4926. torch.Size([3, 1])
  4927. >>> x.expand(3, 4)
  4928. tensor([[ 1, 1, 1, 1],
  4929. [ 2, 2, 2, 2],
  4930. [ 3, 3, 3, 3]])
  4931. >>> x.expand(-1, 4) # -1 means not changing the size of that dimension
  4932. tensor([[ 1, 1, 1, 1],
  4933. [ 2, 2, 2, 2],
  4934. [ 3, 3, 3, 3]])
  4935. """,
  4936. )
  4937. add_docstr_all(
  4938. "expand_as",
  4939. r"""
  4940. expand_as(other) -> Tensor
  4941. Expand this tensor to the same size as :attr:`other`.
  4942. ``self.expand_as(other)`` is equivalent to ``self.expand(other.size())``.
  4943. Please see :meth:`~Tensor.expand` for more information about ``expand``.
  4944. Args:
  4945. other (:class:`torch.Tensor`): The result tensor has the same size
  4946. as :attr:`other`.
  4947. """,
  4948. )
  4949. add_docstr_all(
  4950. "sum_to_size",
  4951. r"""
  4952. sum_to_size(*size) -> Tensor
  4953. Sum ``this`` tensor to :attr:`size`.
  4954. :attr:`size` must be broadcastable to ``this`` tensor size.
  4955. Args:
  4956. size (int...): a sequence of integers defining the shape of the output tensor.
  4957. """,
  4958. )
  4959. add_docstr_all(
  4960. "zero_",
  4961. r"""
  4962. zero_() -> Tensor
  4963. Fills :attr:`self` tensor with zeros.
  4964. """,
  4965. )
  4966. add_docstr_all(
  4967. "matmul",
  4968. r"""
  4969. matmul(tensor2) -> Tensor
  4970. See :func:`torch.matmul`
  4971. """,
  4972. )
  4973. add_docstr_all(
  4974. "chunk",
  4975. r"""
  4976. chunk(chunks, dim=0) -> List of Tensors
  4977. See :func:`torch.chunk`
  4978. """,
  4979. )
  4980. add_docstr_all(
  4981. "unsafe_chunk",
  4982. r"""
  4983. unsafe_chunk(chunks, dim=0) -> List of Tensors
  4984. See :func:`torch.unsafe_chunk`
  4985. """,
  4986. )
  4987. add_docstr_all(
  4988. "unsafe_split",
  4989. r"""
  4990. unsafe_split(split_size, dim=0) -> List of Tensors
  4991. See :func:`torch.unsafe_split`
  4992. """,
  4993. )
  4994. add_docstr_all(
  4995. "tensor_split",
  4996. r"""
  4997. tensor_split(indices_or_sections, dim=0) -> List of Tensors
  4998. See :func:`torch.tensor_split`
  4999. """,
  5000. )
  5001. add_docstr_all(
  5002. "hsplit",
  5003. r"""
  5004. hsplit(split_size_or_sections) -> List of Tensors
  5005. See :func:`torch.hsplit`
  5006. """,
  5007. )
  5008. add_docstr_all(
  5009. "vsplit",
  5010. r"""
  5011. vsplit(split_size_or_sections) -> List of Tensors
  5012. See :func:`torch.vsplit`
  5013. """,
  5014. )
  5015. add_docstr_all(
  5016. "dsplit",
  5017. r"""
  5018. dsplit(split_size_or_sections) -> List of Tensors
  5019. See :func:`torch.dsplit`
  5020. """,
  5021. )
  5022. add_docstr_all(
  5023. "stft",
  5024. r"""
  5025. stft(frame_length, hop, fft_size=None, return_onesided=True, window=None, pad_end=0) -> Tensor
  5026. See :func:`torch.stft`
  5027. """,
  5028. )
  5029. add_docstr_all(
  5030. "istft",
  5031. r"""
  5032. istft(n_fft, hop_length=None, win_length=None, window=None,
  5033. center=True, normalized=False, onesided=True, length=None) -> Tensor
  5034. See :func:`torch.istft`
  5035. """,
  5036. )
  5037. add_docstr_all(
  5038. "det",
  5039. r"""
  5040. det() -> Tensor
  5041. See :func:`torch.det`
  5042. """,
  5043. )
  5044. add_docstr_all(
  5045. "where",
  5046. r"""
  5047. where(condition, y) -> Tensor
  5048. ``self.where(condition, y)`` is equivalent to ``torch.where(condition, self, y)``.
  5049. See :func:`torch.where`
  5050. """,
  5051. )
  5052. add_docstr_all(
  5053. "logdet",
  5054. r"""
  5055. logdet() -> Tensor
  5056. See :func:`torch.logdet`
  5057. """,
  5058. )
  5059. add_docstr_all(
  5060. "slogdet",
  5061. r"""
  5062. slogdet() -> (Tensor, Tensor)
  5063. See :func:`torch.slogdet`
  5064. """,
  5065. )
  5066. add_docstr_all(
  5067. "unbind",
  5068. r"""
  5069. unbind(dim=0) -> seq
  5070. See :func:`torch.unbind`
  5071. """,
  5072. )
  5073. add_docstr_all(
  5074. "pin_memory",
  5075. r"""
  5076. pin_memory() -> Tensor
  5077. Copies the tensor to pinned memory, if it's not already pinned.
  5078. """,
  5079. )
  5080. add_docstr_all(
  5081. "pinverse",
  5082. r"""
  5083. pinverse() -> Tensor
  5084. See :func:`torch.pinverse`
  5085. """,
  5086. )
  5087. add_docstr_all(
  5088. "index_add",
  5089. r"""
  5090. index_add(dim, index, source, *, alpha=1) -> Tensor
  5091. Out-of-place version of :meth:`torch.Tensor.index_add_`.
  5092. """,
  5093. )
  5094. add_docstr_all(
  5095. "index_copy",
  5096. r"""
  5097. index_copy(dim, index, tensor2) -> Tensor
  5098. Out-of-place version of :meth:`torch.Tensor.index_copy_`.
  5099. """,
  5100. )
  5101. add_docstr_all(
  5102. "index_fill",
  5103. r"""
  5104. index_fill(dim, index, value) -> Tensor
  5105. Out-of-place version of :meth:`torch.Tensor.index_fill_`.
  5106. """,
  5107. )
  5108. add_docstr_all(
  5109. "scatter",
  5110. r"""
  5111. scatter(dim, index, src) -> Tensor
  5112. Out-of-place version of :meth:`torch.Tensor.scatter_`
  5113. """,
  5114. )
  5115. add_docstr_all(
  5116. "scatter_add",
  5117. r"""
  5118. scatter_add(dim, index, src) -> Tensor
  5119. Out-of-place version of :meth:`torch.Tensor.scatter_add_`
  5120. """,
  5121. )
  5122. add_docstr_all(
  5123. "scatter_reduce",
  5124. r"""
  5125. scatter_reduce(dim, index, src, reduce, *, include_self=True) -> Tensor
  5126. Out-of-place version of :meth:`torch.Tensor.scatter_reduce_`
  5127. """,
  5128. )
  5129. add_docstr_all(
  5130. "masked_scatter",
  5131. r"""
  5132. masked_scatter(mask, tensor) -> Tensor
  5133. Out-of-place version of :meth:`torch.Tensor.masked_scatter_`
  5134. .. note::
  5135. The inputs :attr:`self` and :attr:`mask`
  5136. :ref:`broadcast <broadcasting-semantics>`.
  5137. Example:
  5138. >>> self = torch.tensor([0, 0, 0, 0, 0])
  5139. >>> mask = torch.tensor([[0, 0, 0, 1, 1], [1, 1, 0, 1, 1]], dtype=torch.bool)
  5140. >>> source = torch.tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
  5141. >>> self.masked_scatter(mask, source)
  5142. tensor([[0, 0, 0, 0, 1],
  5143. [2, 3, 0, 4, 5]])
  5144. """,
  5145. )
  5146. add_docstr_all(
  5147. "xlogy",
  5148. r"""
  5149. xlogy(other) -> Tensor
  5150. See :func:`torch.xlogy`
  5151. """,
  5152. )
  5153. add_docstr_all(
  5154. "xlogy_",
  5155. r"""
  5156. xlogy_(other) -> Tensor
  5157. In-place version of :meth:`~Tensor.xlogy`
  5158. """,
  5159. )
  5160. add_docstr_all(
  5161. "masked_fill",
  5162. r"""
  5163. masked_fill(mask, value) -> Tensor
  5164. Out-of-place version of :meth:`torch.Tensor.masked_fill_`
  5165. """,
  5166. )
  5167. add_docstr_all(
  5168. "grad",
  5169. r"""
  5170. This attribute is ``None`` by default and becomes a Tensor the first time a call to
  5171. :func:`backward` computes gradients for ``self``.
  5172. The attribute will then contain the gradients computed and future calls to
  5173. :func:`backward` will accumulate (add) gradients into it.
  5174. """,
  5175. )
  5176. add_docstr_all(
  5177. "retain_grad",
  5178. r"""
  5179. retain_grad() -> None
  5180. Enables this Tensor to have their :attr:`grad` populated during
  5181. :func:`backward`. This is a no-op for leaf tensors.
  5182. """,
  5183. )
  5184. add_docstr_all(
  5185. "retains_grad",
  5186. r"""
  5187. Is ``True`` if this Tensor is non-leaf and its :attr:`grad` is enabled to be
  5188. populated during :func:`backward`, ``False`` otherwise.
  5189. """,
  5190. )
  5191. add_docstr_all(
  5192. "requires_grad",
  5193. r"""
  5194. Is ``True`` if gradients need to be computed for this Tensor, ``False`` otherwise.
  5195. .. note::
  5196. The fact that gradients need to be computed for a Tensor do not mean that the :attr:`grad`
  5197. attribute will be populated, see :attr:`is_leaf` for more details.
  5198. """,
  5199. )
  5200. add_docstr_all(
  5201. "is_leaf",
  5202. r"""
  5203. All Tensors that have :attr:`requires_grad` which is ``False`` will be leaf Tensors by convention.
  5204. For Tensors that have :attr:`requires_grad` which is ``True``, they will be leaf Tensors if they were
  5205. created by the user. This means that they are not the result of an operation and so
  5206. :attr:`grad_fn` is None.
  5207. Only leaf Tensors will have their :attr:`grad` populated during a call to :func:`backward`.
  5208. To get :attr:`grad` populated for non-leaf Tensors, you can use :func:`retain_grad`.
  5209. Example::
  5210. >>> a = torch.rand(10, requires_grad=True)
  5211. >>> a.is_leaf
  5212. True
  5213. >>> b = torch.rand(10, requires_grad=True).cuda()
  5214. >>> b.is_leaf
  5215. False
  5216. # b was created by the operation that cast a cpu Tensor into a cuda Tensor
  5217. >>> c = torch.rand(10, requires_grad=True) + 2
  5218. >>> c.is_leaf
  5219. False
  5220. # c was created by the addition operation
  5221. >>> d = torch.rand(10).cuda()
  5222. >>> d.is_leaf
  5223. True
  5224. # d does not require gradients and so has no operation creating it (that is tracked by the autograd engine)
  5225. >>> e = torch.rand(10).cuda().requires_grad_()
  5226. >>> e.is_leaf
  5227. True
  5228. # e requires gradients and has no operations creating it
  5229. >>> f = torch.rand(10, requires_grad=True, device="cuda")
  5230. >>> f.is_leaf
  5231. True
  5232. # f requires grad, has no operation creating it
  5233. """,
  5234. )
  5235. add_docstr_all(
  5236. "names",
  5237. r"""
  5238. Stores names for each of this tensor's dimensions.
  5239. ``names[idx]`` corresponds to the name of tensor dimension ``idx``.
  5240. Names are either a string if the dimension is named or ``None`` if the
  5241. dimension is unnamed.
  5242. Dimension names may contain characters or underscore. Furthermore, a dimension
  5243. name must be a valid Python variable name (i.e., does not start with underscore).
  5244. Tensors may not have two named dimensions with the same name.
  5245. .. warning::
  5246. The named tensor API is experimental and subject to change.
  5247. """,
  5248. )
  5249. add_docstr_all(
  5250. "is_cuda",
  5251. r"""
  5252. Is ``True`` if the Tensor is stored on the GPU, ``False`` otherwise.
  5253. """,
  5254. )
  5255. add_docstr_all(
  5256. "is_cpu",
  5257. r"""
  5258. Is ``True`` if the Tensor is stored on the CPU, ``False`` otherwise.
  5259. """,
  5260. )
  5261. add_docstr_all(
  5262. "is_xla",
  5263. r"""
  5264. Is ``True`` if the Tensor is stored on an XLA device, ``False`` otherwise.
  5265. """,
  5266. )
  5267. add_docstr_all(
  5268. "is_ipu",
  5269. r"""
  5270. Is ``True`` if the Tensor is stored on the IPU, ``False`` otherwise.
  5271. """,
  5272. )
  5273. add_docstr_all(
  5274. "is_xpu",
  5275. r"""
  5276. Is ``True`` if the Tensor is stored on the XPU, ``False`` otherwise.
  5277. """,
  5278. )
  5279. add_docstr_all(
  5280. "is_quantized",
  5281. r"""
  5282. Is ``True`` if the Tensor is quantized, ``False`` otherwise.
  5283. """,
  5284. )
  5285. add_docstr_all(
  5286. "is_meta",
  5287. r"""
  5288. Is ``True`` if the Tensor is a meta tensor, ``False`` otherwise. Meta tensors
  5289. are like normal tensors, but they carry no data.
  5290. """,
  5291. )
  5292. add_docstr_all(
  5293. "is_mps",
  5294. r"""
  5295. Is ``True`` if the Tensor is stored on the MPS device, ``False`` otherwise.
  5296. """,
  5297. )
  5298. add_docstr_all(
  5299. "is_sparse",
  5300. r"""
  5301. Is ``True`` if the Tensor uses sparse COO storage layout, ``False`` otherwise.
  5302. """,
  5303. )
  5304. add_docstr_all(
  5305. "is_sparse_csr",
  5306. r"""
  5307. Is ``True`` if the Tensor uses sparse CSR storage layout, ``False`` otherwise.
  5308. """,
  5309. )
  5310. add_docstr_all(
  5311. "device",
  5312. r"""
  5313. Is the :class:`torch.device` where this Tensor is.
  5314. """,
  5315. )
  5316. add_docstr_all(
  5317. "ndim",
  5318. r"""
  5319. Alias for :meth:`~Tensor.dim()`
  5320. """,
  5321. )
  5322. add_docstr_all(
  5323. "itemsize",
  5324. r"""
  5325. Alias for :meth:`~Tensor.element_size()`
  5326. """,
  5327. )
  5328. add_docstr_all(
  5329. "nbytes",
  5330. r"""
  5331. Returns the number of bytes consumed by the "view" of elements of the Tensor
  5332. if the Tensor does not use sparse storage layout.
  5333. Defined to be :meth:`~Tensor.numel()` * :meth:`~Tensor.element_size()`
  5334. """,
  5335. )
  5336. add_docstr_all(
  5337. "T",
  5338. r"""
  5339. Returns a view of this tensor with its dimensions reversed.
  5340. If ``n`` is the number of dimensions in ``x``,
  5341. ``x.T`` is equivalent to ``x.permute(n-1, n-2, ..., 0)``.
  5342. .. warning::
  5343. The use of :func:`Tensor.T` on tensors of dimension other than 2 to reverse their shape
  5344. is deprecated and it will throw an error in a future release. Consider :attr:`~.Tensor.mT`
  5345. to transpose batches of matrices or `x.permute(*torch.arange(x.ndim - 1, -1, -1))` to reverse
  5346. the dimensions of a tensor.
  5347. """,
  5348. )
  5349. add_docstr_all(
  5350. "H",
  5351. r"""
  5352. Returns a view of a matrix (2-D tensor) conjugated and transposed.
  5353. ``x.H`` is equivalent to ``x.transpose(0, 1).conj()`` for complex matrices and
  5354. ``x.transpose(0, 1)`` for real matrices.
  5355. .. seealso::
  5356. :attr:`~.Tensor.mH`: An attribute that also works on batches of matrices.
  5357. """,
  5358. )
  5359. add_docstr_all(
  5360. "mT",
  5361. r"""
  5362. Returns a view of this tensor with the last two dimensions transposed.
  5363. ``x.mT`` is equivalent to ``x.transpose(-2, -1)``.
  5364. """,
  5365. )
  5366. add_docstr_all(
  5367. "mH",
  5368. r"""
  5369. Accessing this property is equivalent to calling :func:`adjoint`.
  5370. """,
  5371. )
  5372. add_docstr_all(
  5373. "adjoint",
  5374. r"""
  5375. adjoint() -> Tensor
  5376. Alias for :func:`adjoint`
  5377. """,
  5378. )
  5379. add_docstr_all(
  5380. "real",
  5381. r"""
  5382. Returns a new tensor containing real values of the :attr:`self` tensor for a complex-valued input tensor.
  5383. The returned tensor and :attr:`self` share the same underlying storage.
  5384. Returns :attr:`self` if :attr:`self` is a real-valued tensor tensor.
  5385. Example::
  5386. >>> x=torch.randn(4, dtype=torch.cfloat)
  5387. >>> x
  5388. tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
  5389. >>> x.real
  5390. tensor([ 0.3100, -0.5445, -1.6492, -0.0638])
  5391. """,
  5392. )
  5393. add_docstr_all(
  5394. "imag",
  5395. r"""
  5396. Returns a new tensor containing imaginary values of the :attr:`self` tensor.
  5397. The returned tensor and :attr:`self` share the same underlying storage.
  5398. .. warning::
  5399. :func:`imag` is only supported for tensors with complex dtypes.
  5400. Example::
  5401. >>> x=torch.randn(4, dtype=torch.cfloat)
  5402. >>> x
  5403. tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
  5404. >>> x.imag
  5405. tensor([ 0.3553, -0.7896, -0.0633, -0.8119])
  5406. """,
  5407. )
  5408. add_docstr_all(
  5409. "as_subclass",
  5410. r"""
  5411. as_subclass(cls) -> Tensor
  5412. Makes a ``cls`` instance with the same data pointer as ``self``. Changes
  5413. in the output mirror changes in ``self``, and the output stays attached
  5414. to the autograd graph. ``cls`` must be a subclass of ``Tensor``.
  5415. """,
  5416. )
  5417. add_docstr_all(
  5418. "crow_indices",
  5419. r"""
  5420. crow_indices() -> IntTensor
  5421. Returns the tensor containing the compressed row indices of the :attr:`self`
  5422. tensor when :attr:`self` is a sparse CSR tensor of layout ``sparse_csr``.
  5423. The ``crow_indices`` tensor is strictly of shape (:attr:`self`.size(0) + 1)
  5424. and of type ``int32`` or ``int64``. When using MKL routines such as sparse
  5425. matrix multiplication, it is necessary to use ``int32`` indexing in order
  5426. to avoid downcasting and potentially losing information.
  5427. Example::
  5428. >>> csr = torch.eye(5,5).to_sparse_csr()
  5429. >>> csr.crow_indices()
  5430. tensor([0, 1, 2, 3, 4, 5], dtype=torch.int32)
  5431. """,
  5432. )
  5433. add_docstr_all(
  5434. "col_indices",
  5435. r"""
  5436. col_indices() -> IntTensor
  5437. Returns the tensor containing the column indices of the :attr:`self`
  5438. tensor when :attr:`self` is a sparse CSR tensor of layout ``sparse_csr``.
  5439. The ``col_indices`` tensor is strictly of shape (:attr:`self`.nnz())
  5440. and of type ``int32`` or ``int64``. When using MKL routines such as sparse
  5441. matrix multiplication, it is necessary to use ``int32`` indexing in order
  5442. to avoid downcasting and potentially losing information.
  5443. Example::
  5444. >>> csr = torch.eye(5,5).to_sparse_csr()
  5445. >>> csr.col_indices()
  5446. tensor([0, 1, 2, 3, 4], dtype=torch.int32)
  5447. """,
  5448. )
  5449. add_docstr_all(
  5450. "to_padded_tensor",
  5451. r"""
  5452. to_padded_tensor(padding, output_size=None) -> Tensor
  5453. See :func:`to_padded_tensor`
  5454. """,
  5455. )