_torch_docs.py 408 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158111591116011161111621116311164111651116611167111681116911170111711117211173111741117511176111771117811179111801118111182111831118411185111861118711188111891119011191111921119311194111951119611197111981119911200112011120211203112041120511206112071120811209112101121111212112131121411215112161121711218112191122011221112221122311224112251122611227112281122911230112311123211233112341123511236112371123811239112401124111242112431124411245112461124711248112491125011251112521125311254112551125611257112581125911260112611126211263112641126511266112671126811269112701127111272112731127411275112761127711278112791128011281112821128311284112851128611287112881128911290112911129211293112941129511296112971129811299113001130111302113031130411305113061130711308113091131011311113121131311314113151131611317113181131911320113211132211323113241132511326113271132811329113301133111332113331133411335113361133711338113391134011341113421134311344113451134611347113481134911350113511135211353113541135511356113571135811359113601136111362113631136411365113661136711368113691137011371113721137311374113751137611377113781137911380113811138211383113841138511386113871138811389113901139111392113931139411395113961139711398113991140011401114021140311404114051140611407114081140911410114111141211413114141141511416114171141811419114201142111422114231142411425114261142711428114291143011431114321143311434114351143611437114381143911440114411144211443114441144511446114471144811449114501145111452114531145411455114561145711458114591146011461114621146311464114651146611467114681146911470114711147211473114741147511476114771147811479114801148111482114831148411485114861148711488114891149011491114921149311494114951149611497114981149911500115011150211503115041150511506115071150811509115101151111512115131151411515115161151711518115191152011521115221152311524115251152611527115281152911530115311153211533115341153511536115371153811539115401154111542115431154411545115461154711548115491155011551115521155311554115551155611557115581155911560115611156211563115641156511566115671156811569115701157111572115731157411575115761157711578115791158011581115821158311584115851158611587115881158911590115911159211593115941159511596115971159811599116001160111602116031160411605116061160711608116091161011611116121161311614116151161611617116181161911620116211162211623116241162511626116271162811629116301163111632116331163411635116361163711638116391164011641116421164311644116451164611647116481164911650116511165211653116541165511656116571165811659116601166111662116631166411665116661166711668116691167011671116721167311674116751167611677116781167911680116811168211683116841168511686116871168811689116901169111692116931169411695116961169711698116991170011701117021170311704117051170611707117081170911710117111171211713117141171511716117171171811719117201172111722117231172411725117261172711728117291173011731117321173311734117351173611737117381173911740117411174211743117441174511746117471174811749117501175111752117531175411755117561175711758117591176011761117621176311764117651176611767117681176911770117711177211773117741177511776117771177811779117801178111782117831178411785117861178711788117891179011791117921179311794117951179611797117981179911800118011180211803118041180511806118071180811809118101181111812118131181411815118161181711818118191182011821118221182311824118251182611827118281182911830118311183211833118341183511836118371183811839118401184111842118431184411845118461184711848118491185011851118521185311854118551185611857118581185911860118611186211863118641186511866118671186811869118701187111872118731187411875118761187711878118791188011881118821188311884118851188611887118881188911890118911189211893118941189511896118971189811899119001190111902119031190411905119061190711908119091191011911119121191311914119151191611917119181191911920119211192211923119241192511926119271192811929119301193111932119331193411935119361193711938119391194011941119421194311944119451194611947119481194911950119511195211953119541195511956119571195811959119601196111962119631196411965119661196711968119691197011971119721197311974119751197611977119781197911980119811198211983119841198511986119871198811989119901199111992119931199411995119961199711998119991200012001120021200312004120051200612007120081200912010120111201212013120141201512016120171201812019120201202112022120231202412025120261202712028120291203012031120321203312034120351203612037120381203912040120411204212043120441204512046120471204812049120501205112052120531205412055120561205712058120591206012061120621206312064120651206612067120681206912070120711207212073120741207512076120771207812079120801208112082120831208412085120861208712088120891209012091120921209312094120951209612097120981209912100121011210212103121041210512106121071210812109121101211112112121131211412115121161211712118121191212012121121221212312124121251212612127121281212912130121311213212133121341213512136121371213812139121401214112142121431214412145121461214712148121491215012151121521215312154121551215612157121581215912160121611216212163121641216512166121671216812169121701217112172121731217412175121761217712178121791218012181121821218312184121851218612187121881218912190121911219212193121941219512196121971219812199122001220112202122031220412205122061220712208122091221012211122121221312214122151221612217122181221912220122211222212223122241222512226122271222812229122301223112232122331223412235122361223712238122391224012241122421224312244122451224612247122481224912250122511225212253122541225512256122571225812259122601226112262122631226412265122661226712268122691227012271122721227312274122751227612277122781227912280122811228212283122841228512286122871228812289122901229112292122931229412295122961229712298122991230012301123021230312304123051230612307123081230912310123111231212313123141231512316123171231812319123201232112322123231232412325123261232712328123291233012331123321233312334123351233612337123381233912340123411234212343123441234512346123471234812349123501235112352123531235412355123561235712358123591236012361123621236312364123651236612367123681236912370123711237212373123741237512376123771237812379123801238112382123831238412385123861238712388123891239012391123921239312394123951239612397123981239912400124011240212403124041240512406124071240812409124101241112412124131241412415124161241712418124191242012421124221242312424124251242612427124281242912430124311243212433124341243512436124371243812439124401244112442124431244412445124461244712448124491245012451124521245312454124551245612457124581245912460124611246212463124641246512466124671246812469124701247112472124731247412475124761247712478124791248012481124821248312484124851248612487124881248912490124911249212493124941249512496124971249812499125001250112502125031250412505125061250712508125091251012511125121251312514125151251612517125181251912520125211252212523125241252512526125271252812529125301253112532125331253412535125361253712538125391254012541125421254312544125451254612547125481254912550125511255212553125541255512556125571255812559125601256112562125631256412565125661256712568125691257012571125721257312574125751257612577125781257912580125811258212583125841258512586125871258812589125901259112592125931259412595125961259712598125991260012601126021260312604126051260612607126081260912610126111261212613126141261512616126171261812619126201262112622126231262412625126261262712628126291263012631126321263312634126351263612637126381263912640126411264212643126441264512646126471264812649126501265112652126531265412655126561265712658126591266012661126621266312664126651266612667126681266912670126711267212673126741267512676126771267812679126801268112682126831268412685126861268712688126891269012691126921269312694126951269612697126981269912700127011270212703127041270512706127071270812709127101271112712127131271412715127161271712718127191272012721127221272312724127251272612727127281272912730127311273212733127341273512736127371273812739127401274112742127431274412745127461274712748127491275012751127521275312754127551275612757127581275912760127611276212763127641276512766127671276812769127701277112772127731277412775127761277712778127791278012781127821278312784127851278612787127881278912790127911279212793127941279512796127971279812799128001280112802128031280412805128061280712808128091281012811128121281312814128151281612817128181281912820128211282212823128241282512826128271282812829128301283112832128331283412835128361283712838128391284012841128421284312844128451284612847128481284912850128511285212853128541285512856128571285812859128601286112862128631286412865128661286712868128691287012871128721287312874128751287612877128781287912880128811288212883128841288512886128871288812889128901289112892128931289412895128961289712898128991290012901129021290312904129051290612907129081290912910129111291212913129141291512916129171291812919129201292112922129231292412925129261292712928129291293012931129321293312934129351293612937129381293912940129411294212943129441294512946129471294812949129501295112952129531295412955129561295712958129591296012961129621296312964129651296612967129681296912970129711297212973129741297512976129771297812979129801298112982129831298412985129861298712988129891299012991129921299312994129951299612997129981299913000130011300213003130041300513006130071300813009130101301113012130131301413015130161301713018130191302013021130221302313024130251302613027130281302913030130311303213033130341303513036130371303813039130401304113042130431304413045130461304713048130491305013051130521305313054130551305613057130581305913060130611306213063130641306513066130671306813069130701307113072130731307413075130761307713078130791308013081130821308313084130851308613087130881308913090130911309213093130941309513096130971309813099131001310113102131031310413105131061310713108131091311013111131121311313114131151311613117131181311913120131211312213123131241312513126131271312813129131301313113132131331313413135131361313713138131391314013141131421314313144131451314613147131481314913150131511315213153131541315513156131571315813159131601316113162131631316413165131661316713168131691317013171131721317313174131751317613177131781317913180131811318213183131841318513186131871318813189131901319113192131931319413195131961319713198131991320013201132021320313204132051320613207132081320913210132111321213213132141321513216132171321813219132201322113222132231322413225132261322713228132291323013231132321323313234132351323613237132381323913240132411324213243132441324513246132471324813249132501325113252132531325413255132561325713258132591326013261132621326313264132651326613267132681326913270132711327213273132741327513276132771327813279132801328113282132831328413285132861328713288132891329013291132921329313294132951329613297132981329913300133011330213303133041330513306133071330813309133101331113312133131331413315133161331713318133191332013321133221332313324133251332613327133281332913330133311333213333133341333513336133371333813339133401334113342133431334413345133461334713348133491335013351133521335313354133551335613357133581335913360133611336213363133641336513366133671336813369133701337113372133731337413375133761337713378133791338013381133821338313384133851338613387133881338913390133911339213393133941339513396133971339813399134001340113402134031340413405134061340713408134091341013411134121341313414134151341613417134181341913420134211342213423134241342513426134271342813429134301343113432134331343413435134361343713438134391344013441134421344313444134451344613447134481344913450134511345213453134541345513456134571345813459134601346113462134631346413465134661346713468134691347013471134721347313474134751347613477134781347913480134811348213483134841348513486134871348813489134901349113492134931349413495134961349713498134991350013501135021350313504135051350613507135081350913510135111351213513135141351513516135171351813519135201352113522135231352413525135261352713528135291353013531135321353313534135351353613537135381353913540135411354213543135441354513546135471354813549135501355113552135531355413555135561355713558135591356013561135621356313564135651356613567135681356913570135711357213573135741357513576135771357813579135801358113582135831358413585135861358713588135891359013591135921359313594135951359613597135981359913600136011360213603136041360513606136071360813609136101361113612136131361413615136161361713618136191362013621136221362313624136251362613627136281362913630136311363213633136341363513636136371363813639136401364113642136431364413645136461364713648136491365013651136521365313654136551365613657136581365913660136611366213663136641366513666136671366813669136701367113672136731367413675136761367713678136791368013681136821368313684136851368613687136881368913690136911369213693136941369513696136971369813699137001370113702137031370413705137061370713708137091371013711137121371313714137151371613717137181371913720137211372213723137241372513726137271372813729137301373113732137331373413735137361373713738137391374013741137421374313744137451374613747137481374913750137511375213753137541375513756137571375813759137601376113762137631376413765137661376713768137691377013771137721377313774137751377613777137781377913780137811378213783137841378513786137871378813789137901379113792137931379413795137961379713798137991380013801138021380313804138051380613807138081380913810138111381213813138141381513816138171381813819138201382113822138231382413825138261382713828138291383013831138321383313834138351383613837138381383913840138411384213843138441384513846138471384813849138501385113852138531385413855138561385713858138591386013861138621386313864138651386613867138681386913870138711387213873138741387513876138771387813879138801388113882138831388413885138861388713888138891389013891138921389313894138951389613897138981389913900139011390213903139041390513906139071390813909139101391113912139131391413915139161391713918139191392013921139221392313924139251392613927139281392913930139311393213933139341393513936139371393813939139401394113942139431394413945139461394713948139491395013951139521395313954139551395613957139581395913960139611396213963139641396513966139671396813969139701397113972139731397413975139761397713978139791398013981139821398313984139851398613987139881398913990139911399213993139941399513996139971399813999140001400114002140031400414005140061400714008140091401014011140121401314014140151401614017140181401914020140211402214023140241402514026140271402814029140301403114032140331403414035140361403714038140391404014041140421404314044140451404614047140481404914050140511405214053140541405514056140571405814059140601406114062140631406414065140661406714068140691407014071140721407314074140751407614077140781407914080140811408214083140841408514086140871408814089140901409114092140931409414095140961409714098140991410014101141021410314104141051410614107141081410914110141111411214113141141411514116141171411814119141201412114122141231412414125141261412714128141291413014131141321413314134141351413614137141381413914140141411414214143141441414514146141471414814149141501415114152141531415414155141561415714158141591416014161141621416314164141651416614167141681416914170141711417214173141741417514176141771417814179141801418114182141831418414185141861418714188141891419014191141921419314194141951419614197141981419914200142011420214203142041420514206142071420814209142101421114212142131421414215142161421714218142191422014221142221422314224142251422614227142281422914230142311423214233142341423514236142371423814239142401424114242142431424414245142461424714248
  1. # mypy: allow-untyped-defs
  2. """Adds docstrings to functions defined in the torch._C module."""
  3. import re
  4. import torch._C
  5. from torch._C import _add_docstr as add_docstr
  6. def parse_kwargs(desc):
  7. r"""Map a description of args to a dictionary of {argname: description}.
  8. Input:
  9. (' weight (Tensor): a weight tensor\n' +
  10. ' Some optional description')
  11. Output: {
  12. 'weight': \
  13. 'weight (Tensor): a weight tensor\n Some optional description'
  14. }
  15. """
  16. # Split on exactly 4 spaces after a newline
  17. regx = re.compile(r"\n\s{4}(?!\s)")
  18. kwargs = [section.strip() for section in regx.split(desc)]
  19. kwargs = [section for section in kwargs if len(section) > 0]
  20. return {desc.split(" ")[0]: desc for desc in kwargs}
  21. def merge_dicts(*dicts):
  22. """Merge dictionaries into a single dictionary."""
  23. return {x: d[x] for d in dicts for x in d}
  24. common_args = parse_kwargs(
  25. """
  26. input (Tensor): the input tensor.
  27. generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
  28. out (Tensor, optional): the output tensor.
  29. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  30. returned tensor. Default: ``torch.preserve_format``.
  31. """
  32. )
  33. reduceops_common_args = merge_dicts(
  34. common_args,
  35. parse_kwargs(
  36. """
  37. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  38. If specified, the input tensor is casted to :attr:`dtype` before the operation
  39. is performed. This is useful for preventing data type overflows. Default: None.
  40. keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
  41. """
  42. ),
  43. )
  44. multi_dim_common = merge_dicts(
  45. reduceops_common_args,
  46. parse_kwargs(
  47. """
  48. dim (int or tuple of ints): the dimension or dimensions to reduce.
  49. """
  50. ),
  51. {
  52. "keepdim_details": """
  53. If :attr:`keepdim` is ``True``, the output tensor is of the same size
  54. as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
  55. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
  56. output tensor having 1 (or ``len(dim)``) fewer dimension(s).
  57. """
  58. },
  59. {
  60. "opt_dim": """
  61. dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
  62. If ``None``, all dimensions are reduced.
  63. """
  64. },
  65. )
  66. single_dim_common = merge_dicts(
  67. reduceops_common_args,
  68. parse_kwargs(
  69. """
  70. dim (int): the dimension to reduce.
  71. """
  72. ),
  73. {
  74. "keepdim_details": """If :attr:`keepdim` is ``True``, the output tensor is of the same size
  75. as :attr:`input` except in the dimension :attr:`dim` where it is of size 1.
  76. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
  77. the output tensor having 1 fewer dimension than :attr:`input`."""
  78. },
  79. )
  80. factory_common_args = merge_dicts(
  81. common_args,
  82. parse_kwargs(
  83. """
  84. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  85. Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
  86. layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
  87. Default: ``torch.strided``.
  88. device (:class:`torch.device`, optional): the desired device of returned tensor.
  89. Default: if ``None``, uses the current device for the default tensor type
  90. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  91. for CPU tensor types and the current CUDA device for CUDA tensor types.
  92. requires_grad (bool, optional): If autograd should record operations on the
  93. returned tensor. Default: ``False``.
  94. pin_memory (bool, optional): If set, returned tensor would be allocated in
  95. the pinned memory. Works only for CPU tensors. Default: ``False``.
  96. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  97. returned Tensor. Default: ``torch.contiguous_format``.
  98. check_invariants (bool, optional): If sparse tensor invariants are checked.
  99. Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`,
  100. initially False.
  101. """
  102. ),
  103. {
  104. "sparse_factory_device_note": """\
  105. .. note::
  106. If the ``device`` argument is not specified the device of the given
  107. :attr:`values` and indices tensor(s) must match. If, however, the
  108. argument is specified the input Tensors will be converted to the
  109. given device and in turn determine the device of the constructed
  110. sparse tensor."""
  111. },
  112. )
  113. factory_like_common_args = parse_kwargs(
  114. """
  115. input (Tensor): the size of :attr:`input` will determine size of the output tensor.
  116. layout (:class:`torch.layout`, optional): the desired layout of returned tensor.
  117. Default: if ``None``, defaults to the layout of :attr:`input`.
  118. dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
  119. Default: if ``None``, defaults to the dtype of :attr:`input`.
  120. device (:class:`torch.device`, optional): the desired device of returned tensor.
  121. Default: if ``None``, defaults to the device of :attr:`input`.
  122. requires_grad (bool, optional): If autograd should record operations on the
  123. returned tensor. Default: ``False``.
  124. pin_memory (bool, optional): If set, returned tensor would be allocated in
  125. the pinned memory. Works only for CPU tensors. Default: ``False``.
  126. memory_format (:class:`torch.memory_format`, optional): the desired memory format of
  127. returned Tensor. Default: ``torch.preserve_format``.
  128. """
  129. )
  130. factory_data_common_args = parse_kwargs(
  131. """
  132. data (array_like): Initial data for the tensor. Can be a list, tuple,
  133. NumPy ``ndarray``, scalar, and other types.
  134. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  135. Default: if ``None``, infers data type from :attr:`data`.
  136. device (:class:`torch.device`, optional): the desired device of returned tensor.
  137. Default: if ``None``, uses the current device for the default tensor type
  138. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  139. for CPU tensor types and the current CUDA device for CUDA tensor types.
  140. requires_grad (bool, optional): If autograd should record operations on the
  141. returned tensor. Default: ``False``.
  142. pin_memory (bool, optional): If set, returned tensor would be allocated in
  143. the pinned memory. Works only for CPU tensors. Default: ``False``.
  144. """
  145. )
  146. tf32_notes = {
  147. "tf32_note": """This operator supports :ref:`TensorFloat32<tf32_on_ampere>`."""
  148. }
  149. rocm_fp16_notes = {
  150. "rocm_fp16_note": """On certain ROCm devices, when using float16 inputs this module will use \
  151. :ref:`different precision<fp16_on_mi200>` for backward."""
  152. }
  153. reproducibility_notes = {
  154. "forward_reproducibility_note": """This operation may behave nondeterministically when given tensors on \
  155. a CUDA device. See :doc:`/notes/randomness` for more information.""",
  156. "backward_reproducibility_note": """This operation may produce nondeterministic gradients when given tensors on \
  157. a CUDA device. See :doc:`/notes/randomness` for more information.""",
  158. "cudnn_reproducibility_note": """In some circumstances when given tensors on a CUDA device \
  159. and using CuDNN, this operator may select a nondeterministic algorithm to increase performance. If this is \
  160. undesirable, you can try to make the operation deterministic (potentially at \
  161. a performance cost) by setting ``torch.backends.cudnn.deterministic = True``. \
  162. See :doc:`/notes/randomness` for more information.""",
  163. }
  164. sparse_support_notes = {
  165. "sparse_beta_warning": """
  166. .. warning::
  167. Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported,
  168. or may not have autograd support. If you notice missing functionality please
  169. open a feature request.""",
  170. }
  171. add_docstr(
  172. torch.abs,
  173. r"""
  174. abs(input, *, out=None) -> Tensor
  175. Computes the absolute value of each element in :attr:`input`.
  176. .. math::
  177. \text{out}_{i} = |\text{input}_{i}|
  178. """
  179. + r"""
  180. Args:
  181. {input}
  182. Keyword args:
  183. {out}
  184. Example::
  185. >>> torch.abs(torch.tensor([-1, -2, 3]))
  186. tensor([ 1, 2, 3])
  187. """.format(
  188. **common_args
  189. ),
  190. )
  191. add_docstr(
  192. torch.absolute,
  193. r"""
  194. absolute(input, *, out=None) -> Tensor
  195. Alias for :func:`torch.abs`
  196. """,
  197. )
  198. add_docstr(
  199. torch.acos,
  200. r"""
  201. acos(input, *, out=None) -> Tensor
  202. Computes the inverse cosine of each element in :attr:`input`.
  203. .. math::
  204. \text{out}_{i} = \cos^{-1}(\text{input}_{i})
  205. """
  206. + r"""
  207. Args:
  208. {input}
  209. Keyword args:
  210. {out}
  211. Example::
  212. >>> a = torch.randn(4)
  213. >>> a
  214. tensor([ 0.3348, -0.5889, 0.2005, -0.1584])
  215. >>> torch.acos(a)
  216. tensor([ 1.2294, 2.2004, 1.3690, 1.7298])
  217. """.format(
  218. **common_args
  219. ),
  220. )
  221. add_docstr(
  222. torch.arccos,
  223. r"""
  224. arccos(input, *, out=None) -> Tensor
  225. Alias for :func:`torch.acos`.
  226. """,
  227. )
  228. add_docstr(
  229. torch.acosh,
  230. r"""
  231. acosh(input, *, out=None) -> Tensor
  232. Returns a new tensor with the inverse hyperbolic cosine of the elements of :attr:`input`.
  233. .. math::
  234. \text{out}_{i} = \cosh^{-1}(\text{input}_{i})
  235. Note:
  236. The domain of the inverse hyperbolic cosine is `[1, inf)` and values outside this range
  237. will be mapped to ``NaN``, except for `+ INF` for which the output is mapped to `+ INF`.
  238. """
  239. + r"""
  240. Args:
  241. {input}
  242. Keyword arguments:
  243. {out}
  244. Example::
  245. >>> a = torch.randn(4).uniform_(1, 2)
  246. >>> a
  247. tensor([ 1.3192, 1.9915, 1.9674, 1.7151 ])
  248. >>> torch.acosh(a)
  249. tensor([ 0.7791, 1.3120, 1.2979, 1.1341 ])
  250. """.format(
  251. **common_args
  252. ),
  253. )
  254. add_docstr(
  255. torch.arccosh,
  256. r"""
  257. arccosh(input, *, out=None) -> Tensor
  258. Alias for :func:`torch.acosh`.
  259. """,
  260. )
  261. add_docstr(
  262. torch.index_add,
  263. r"""
  264. index_add(input, dim, index, source, *, alpha=1, out=None) -> Tensor
  265. See :meth:`~Tensor.index_add_` for function description.
  266. """,
  267. )
  268. add_docstr(
  269. torch.index_copy,
  270. r"""
  271. index_copy(input, dim, index, source, *, out=None) -> Tensor
  272. See :meth:`~Tensor.index_add_` for function description.
  273. """,
  274. )
  275. add_docstr(
  276. torch.index_reduce,
  277. r"""
  278. index_reduce(input, dim, index, source, reduce, *, include_self=True, out=None) -> Tensor
  279. See :meth:`~Tensor.index_reduce_` for function description.
  280. """,
  281. )
  282. add_docstr(
  283. torch.add,
  284. r"""
  285. add(input, other, *, alpha=1, out=None) -> Tensor
  286. Adds :attr:`other`, scaled by :attr:`alpha`, to :attr:`input`.
  287. .. math::
  288. \text{{out}}_i = \text{{input}}_i + \text{{alpha}} \times \text{{other}}_i
  289. """
  290. + r"""
  291. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
  292. :ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
  293. Args:
  294. {input}
  295. other (Tensor or Number): the tensor or number to add to :attr:`input`.
  296. Keyword arguments:
  297. alpha (Number): the multiplier for :attr:`other`.
  298. {out}
  299. Examples::
  300. >>> a = torch.randn(4)
  301. >>> a
  302. tensor([ 0.0202, 1.0985, 1.3506, -0.6056])
  303. >>> torch.add(a, 20)
  304. tensor([ 20.0202, 21.0985, 21.3506, 19.3944])
  305. >>> b = torch.randn(4)
  306. >>> b
  307. tensor([-0.9732, -0.3497, 0.6245, 0.4022])
  308. >>> c = torch.randn(4, 1)
  309. >>> c
  310. tensor([[ 0.3743],
  311. [-1.7724],
  312. [-0.5811],
  313. [-0.8017]])
  314. >>> torch.add(b, c, alpha=10)
  315. tensor([[ 2.7695, 3.3930, 4.3672, 4.1450],
  316. [-18.6971, -18.0736, -17.0994, -17.3216],
  317. [ -6.7845, -6.1610, -5.1868, -5.4090],
  318. [ -8.9902, -8.3667, -7.3925, -7.6147]])
  319. """.format(
  320. **common_args
  321. ),
  322. )
  323. add_docstr(
  324. torch.addbmm,
  325. r"""
  326. addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
  327. Performs a batch matrix-matrix product of matrices stored
  328. in :attr:`batch1` and :attr:`batch2`,
  329. with a reduced add step (all matrix multiplications get accumulated
  330. along the first dimension).
  331. :attr:`input` is added to the final result.
  332. :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the
  333. same number of matrices.
  334. If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
  335. :math:`(b \times m \times p)` tensor, :attr:`input` must be
  336. :ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
  337. and :attr:`out` will be a :math:`(n \times p)` tensor.
  338. .. math::
  339. out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i)
  340. If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
  341. it will not be propagated.
  342. """
  343. + r"""
  344. For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha`
  345. must be real numbers, otherwise they should be integers.
  346. {tf32_note}
  347. {rocm_fp16_note}
  348. Args:
  349. batch1 (Tensor): the first batch of matrices to be multiplied
  350. batch2 (Tensor): the second batch of matrices to be multiplied
  351. Keyword args:
  352. beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
  353. input (Tensor): matrix to be added
  354. alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`)
  355. {out}
  356. Example::
  357. >>> M = torch.randn(3, 5)
  358. >>> batch1 = torch.randn(10, 3, 4)
  359. >>> batch2 = torch.randn(10, 4, 5)
  360. >>> torch.addbmm(M, batch1, batch2)
  361. tensor([[ 6.6311, 0.0503, 6.9768, -12.0362, -2.1653],
  362. [ -4.8185, -1.4255, -6.6760, 8.9453, 2.5743],
  363. [ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]])
  364. """.format(
  365. **common_args, **tf32_notes, **rocm_fp16_notes
  366. ),
  367. )
  368. add_docstr(
  369. torch.addcdiv,
  370. r"""
  371. addcdiv(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
  372. Performs the element-wise division of :attr:`tensor1` by :attr:`tensor2`,
  373. multiplies the result by the scalar :attr:`value` and adds it to :attr:`input`.
  374. .. warning::
  375. Integer division with addcdiv is no longer supported, and in a future
  376. release addcdiv will perform a true division of tensor1 and tensor2.
  377. The historic addcdiv behavior can be implemented as
  378. (input + value * torch.trunc(tensor1 / tensor2)).to(input.dtype)
  379. for integer inputs and as (input + value * tensor1 / tensor2) for float inputs.
  380. The future addcdiv behavior is just the latter implementation:
  381. (input + value * tensor1 / tensor2), for all dtypes.
  382. .. math::
  383. \text{out}_i = \text{input}_i + \text{value} \times \frac{\text{tensor1}_i}{\text{tensor2}_i}
  384. """
  385. + r"""
  386. The shapes of :attr:`input`, :attr:`tensor1`, and :attr:`tensor2` must be
  387. :ref:`broadcastable <broadcasting-semantics>`.
  388. For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
  389. a real number, otherwise an integer.
  390. Args:
  391. input (Tensor): the tensor to be added
  392. tensor1 (Tensor): the numerator tensor
  393. tensor2 (Tensor): the denominator tensor
  394. Keyword args:
  395. value (Number, optional): multiplier for :math:`\text{{tensor1}} / \text{{tensor2}}`
  396. {out}
  397. Example::
  398. >>> t = torch.randn(1, 3)
  399. >>> t1 = torch.randn(3, 1)
  400. >>> t2 = torch.randn(1, 3)
  401. >>> torch.addcdiv(t, t1, t2, value=0.1)
  402. tensor([[-0.2312, -3.6496, 0.1312],
  403. [-1.0428, 3.4292, -0.1030],
  404. [-0.5369, -0.9829, 0.0430]])
  405. """.format(
  406. **common_args
  407. ),
  408. )
  409. add_docstr(
  410. torch.addcmul,
  411. r"""
  412. addcmul(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
  413. Performs the element-wise multiplication of :attr:`tensor1`
  414. by :attr:`tensor2`, multiplies the result by the scalar :attr:`value`
  415. and adds it to :attr:`input`.
  416. .. math::
  417. \text{out}_i = \text{input}_i + \text{value} \times \text{tensor1}_i \times \text{tensor2}_i
  418. """
  419. + r"""
  420. The shapes of :attr:`tensor`, :attr:`tensor1`, and :attr:`tensor2` must be
  421. :ref:`broadcastable <broadcasting-semantics>`.
  422. For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
  423. a real number, otherwise an integer.
  424. Args:
  425. input (Tensor): the tensor to be added
  426. tensor1 (Tensor): the tensor to be multiplied
  427. tensor2 (Tensor): the tensor to be multiplied
  428. Keyword args:
  429. value (Number, optional): multiplier for :math:`tensor1 .* tensor2`
  430. {out}
  431. Example::
  432. >>> t = torch.randn(1, 3)
  433. >>> t1 = torch.randn(3, 1)
  434. >>> t2 = torch.randn(1, 3)
  435. >>> torch.addcmul(t, t1, t2, value=0.1)
  436. tensor([[-0.8635, -0.6391, 1.6174],
  437. [-0.7617, -0.5879, 1.7388],
  438. [-0.8353, -0.6249, 1.6511]])
  439. """.format(
  440. **common_args
  441. ),
  442. )
  443. add_docstr(
  444. torch.addmm,
  445. r"""
  446. addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
  447. Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`.
  448. The matrix :attr:`input` is added to the final result.
  449. If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
  450. :math:`(m \times p)` tensor, then :attr:`input` must be
  451. :ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
  452. and :attr:`out` will be a :math:`(n \times p)` tensor.
  453. :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
  454. :attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively.
  455. .. math::
  456. \text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i)
  457. If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
  458. it will not be propagated.
  459. """
  460. + r"""
  461. For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
  462. :attr:`alpha` must be real numbers, otherwise they should be integers.
  463. This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`. If
  464. :attr:`input` is sparse the result will have the same layout and if :attr:`out`
  465. is provided it must have the same layout as :attr:`input`.
  466. {sparse_beta_warning}
  467. {tf32_note}
  468. {rocm_fp16_note}
  469. Args:
  470. input (Tensor): matrix to be added
  471. mat1 (Tensor): the first matrix to be matrix multiplied
  472. mat2 (Tensor): the second matrix to be matrix multiplied
  473. Keyword args:
  474. beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
  475. alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
  476. {out}
  477. Example::
  478. >>> M = torch.randn(2, 3)
  479. >>> mat1 = torch.randn(2, 3)
  480. >>> mat2 = torch.randn(3, 3)
  481. >>> torch.addmm(M, mat1, mat2)
  482. tensor([[-4.8716, 1.4671, -1.3746],
  483. [ 0.7573, -3.9555, -2.8681]])
  484. """.format(
  485. **common_args, **tf32_notes, **rocm_fp16_notes, **sparse_support_notes
  486. ),
  487. )
  488. add_docstr(
  489. torch.adjoint,
  490. r"""
  491. adjoint(Tensor) -> Tensor
  492. Returns a view of the tensor conjugated and with the last two dimensions transposed.
  493. ``x.adjoint()`` is equivalent to ``x.transpose(-2, -1).conj()`` for complex tensors and
  494. to ``x.transpose(-2, -1)`` for real tensors.
  495. Example::
  496. >>> x = torch.arange(4, dtype=torch.float)
  497. >>> A = torch.complex(x, x).reshape(2, 2)
  498. >>> A
  499. tensor([[0.+0.j, 1.+1.j],
  500. [2.+2.j, 3.+3.j]])
  501. >>> A.adjoint()
  502. tensor([[0.-0.j, 2.-2.j],
  503. [1.-1.j, 3.-3.j]])
  504. >>> (A.adjoint() == A.mH).all()
  505. tensor(True)
  506. """,
  507. )
  508. add_docstr(
  509. torch.sspaddmm,
  510. r"""
  511. sspaddmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
  512. Matrix multiplies a sparse tensor :attr:`mat1` with a dense tensor
  513. :attr:`mat2`, then adds the sparse tensor :attr:`input` to the result.
  514. Note: This function is equivalent to :func:`torch.addmm`, except
  515. :attr:`input` and :attr:`mat1` are sparse.
  516. Args:
  517. input (Tensor): a sparse matrix to be added
  518. mat1 (Tensor): a sparse matrix to be matrix multiplied
  519. mat2 (Tensor): a dense matrix to be matrix multiplied
  520. Keyword args:
  521. beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`)
  522. alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
  523. {out}
  524. """.format(
  525. **common_args
  526. ),
  527. )
  528. add_docstr(
  529. torch.smm,
  530. r"""
  531. smm(input, mat) -> Tensor
  532. Performs a matrix multiplication of the sparse matrix :attr:`input`
  533. with the dense matrix :attr:`mat`.
  534. Args:
  535. input (Tensor): a sparse matrix to be matrix multiplied
  536. mat (Tensor): a dense matrix to be matrix multiplied
  537. """,
  538. )
  539. add_docstr(
  540. torch.addmv,
  541. r"""
  542. addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor
  543. Performs a matrix-vector product of the matrix :attr:`mat` and
  544. the vector :attr:`vec`.
  545. The vector :attr:`input` is added to the final result.
  546. If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
  547. size `m`, then :attr:`input` must be
  548. :ref:`broadcastable <broadcasting-semantics>` with a 1-D tensor of size `n` and
  549. :attr:`out` will be 1-D tensor of size `n`.
  550. :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
  551. :attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively.
  552. .. math::
  553. \text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec})
  554. If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
  555. it will not be propagated.
  556. """
  557. + r"""
  558. For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
  559. :attr:`alpha` must be real numbers, otherwise they should be integers.
  560. Args:
  561. input (Tensor): vector to be added
  562. mat (Tensor): matrix to be matrix multiplied
  563. vec (Tensor): vector to be matrix multiplied
  564. Keyword args:
  565. beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
  566. alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`)
  567. {out}
  568. Example::
  569. >>> M = torch.randn(2)
  570. >>> mat = torch.randn(2, 3)
  571. >>> vec = torch.randn(3)
  572. >>> torch.addmv(M, mat, vec)
  573. tensor([-0.3768, -5.5565])
  574. """.format(
  575. **common_args
  576. ),
  577. )
  578. add_docstr(
  579. torch.addr,
  580. r"""
  581. addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor
  582. Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2`
  583. and adds it to the matrix :attr:`input`.
  584. Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the
  585. outer product between :attr:`vec1` and :attr:`vec2` and the added matrix
  586. :attr:`input` respectively.
  587. .. math::
  588. \text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2})
  589. If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
  590. it will not be propagated.
  591. """
  592. + r"""
  593. If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector
  594. of size `m`, then :attr:`input` must be
  595. :ref:`broadcastable <broadcasting-semantics>` with a matrix of size
  596. :math:`(n \times m)` and :attr:`out` will be a matrix of size
  597. :math:`(n \times m)`.
  598. Args:
  599. input (Tensor): matrix to be added
  600. vec1 (Tensor): the first vector of the outer product
  601. vec2 (Tensor): the second vector of the outer product
  602. Keyword args:
  603. beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
  604. alpha (Number, optional): multiplier for :math:`\text{{vec1}} \otimes \text{{vec2}}` (:math:`\alpha`)
  605. {out}
  606. Example::
  607. >>> vec1 = torch.arange(1., 4.)
  608. >>> vec2 = torch.arange(1., 3.)
  609. >>> M = torch.zeros(3, 2)
  610. >>> torch.addr(M, vec1, vec2)
  611. tensor([[ 1., 2.],
  612. [ 2., 4.],
  613. [ 3., 6.]])
  614. """.format(
  615. **common_args
  616. ),
  617. )
  618. add_docstr(
  619. torch.allclose,
  620. r"""
  621. allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False) -> bool
  622. This function checks if :attr:`input` and :attr:`other` satisfy the condition:
  623. .. math::
  624. \lvert \text{input} - \text{other} \rvert \leq \texttt{atol} + \texttt{rtol} \times \lvert \text{other} \rvert
  625. """
  626. + r"""
  627. elementwise, for all elements of :attr:`input` and :attr:`other`. The behaviour of this function is analogous to
  628. `numpy.allclose <https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html>`_
  629. Args:
  630. input (Tensor): first tensor to compare
  631. other (Tensor): second tensor to compare
  632. atol (float, optional): absolute tolerance. Default: 1e-08
  633. rtol (float, optional): relative tolerance. Default: 1e-05
  634. equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False``
  635. Example::
  636. >>> torch.allclose(torch.tensor([10000., 1e-07]), torch.tensor([10000.1, 1e-08]))
  637. False
  638. >>> torch.allclose(torch.tensor([10000., 1e-08]), torch.tensor([10000.1, 1e-09]))
  639. True
  640. >>> torch.allclose(torch.tensor([1.0, float('nan')]), torch.tensor([1.0, float('nan')]))
  641. False
  642. >>> torch.allclose(torch.tensor([1.0, float('nan')]), torch.tensor([1.0, float('nan')]), equal_nan=True)
  643. True
  644. """,
  645. )
  646. add_docstr(
  647. torch.all,
  648. r"""
  649. all(input) -> Tensor
  650. Tests if all elements in :attr:`input` evaluate to `True`.
  651. .. note:: This function matches the behaviour of NumPy in returning
  652. output of dtype `bool` for all supported dtypes except `uint8`.
  653. For `uint8` the dtype of output is `uint8` itself.
  654. Example::
  655. >>> a = torch.rand(1, 2).bool()
  656. >>> a
  657. tensor([[False, True]], dtype=torch.bool)
  658. >>> torch.all(a)
  659. tensor(False, dtype=torch.bool)
  660. >>> a = torch.arange(0, 3)
  661. >>> a
  662. tensor([0, 1, 2])
  663. >>> torch.all(a)
  664. tensor(False)
  665. .. function:: all(input, dim, keepdim=False, *, out=None) -> Tensor
  666. :noindex:
  667. For each row of :attr:`input` in the given dimension :attr:`dim`,
  668. returns `True` if all elements in the row evaluate to `True` and `False` otherwise.
  669. {keepdim_details}
  670. Args:
  671. {input}
  672. {dim}
  673. {keepdim}
  674. Keyword args:
  675. {out}
  676. Example::
  677. >>> a = torch.rand(4, 2).bool()
  678. >>> a
  679. tensor([[True, True],
  680. [True, False],
  681. [True, True],
  682. [True, True]], dtype=torch.bool)
  683. >>> torch.all(a, dim=1)
  684. tensor([ True, False, True, True], dtype=torch.bool)
  685. >>> torch.all(a, dim=0)
  686. tensor([ True, False], dtype=torch.bool)
  687. """.format(
  688. **multi_dim_common
  689. ),
  690. )
  691. add_docstr(
  692. torch.any,
  693. r"""
  694. any(input) -> Tensor
  695. Tests if any element in :attr:`input` evaluates to `True`.
  696. .. note:: This function matches the behaviour of NumPy in returning
  697. output of dtype `bool` for all supported dtypes except `uint8`.
  698. For `uint8` the dtype of output is `uint8` itself.
  699. Example::
  700. >>> a = torch.rand(1, 2).bool()
  701. >>> a
  702. tensor([[False, True]], dtype=torch.bool)
  703. >>> torch.any(a)
  704. tensor(True, dtype=torch.bool)
  705. >>> a = torch.arange(0, 3)
  706. >>> a
  707. tensor([0, 1, 2])
  708. >>> torch.any(a)
  709. tensor(True)
  710. .. function:: any(input, dim, keepdim=False, *, out=None) -> Tensor
  711. :noindex:
  712. For each row of :attr:`input` in the given dimension :attr:`dim`,
  713. returns `True` if any element in the row evaluate to `True` and `False` otherwise.
  714. {keepdim_details}
  715. Args:
  716. {input}
  717. {dim}
  718. {keepdim}
  719. Keyword args:
  720. {out}
  721. Example::
  722. >>> a = torch.randn(4, 2) < 0
  723. >>> a
  724. tensor([[ True, True],
  725. [False, True],
  726. [ True, True],
  727. [False, False]])
  728. >>> torch.any(a, 1)
  729. tensor([ True, True, True, False])
  730. >>> torch.any(a, 0)
  731. tensor([True, True])
  732. """.format(
  733. **multi_dim_common
  734. ),
  735. )
  736. add_docstr(
  737. torch.angle,
  738. r"""
  739. angle(input, *, out=None) -> Tensor
  740. Computes the element-wise angle (in radians) of the given :attr:`input` tensor.
  741. .. math::
  742. \text{out}_{i} = angle(\text{input}_{i})
  743. """
  744. + r"""
  745. Args:
  746. {input}
  747. Keyword args:
  748. {out}
  749. .. note:: Starting in PyTorch 1.8, angle returns pi for negative real numbers,
  750. zero for non-negative real numbers, and propagates NaNs. Previously
  751. the function would return zero for all real numbers and not propagate
  752. floating-point NaNs.
  753. Example::
  754. >>> torch.angle(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))*180/3.14159
  755. tensor([ 135., 135, -45])
  756. """.format(
  757. **common_args
  758. ),
  759. )
  760. add_docstr(
  761. torch.as_strided,
  762. r"""
  763. as_strided(input, size, stride, storage_offset=None) -> Tensor
  764. Create a view of an existing `torch.Tensor` :attr:`input` with specified
  765. :attr:`size`, :attr:`stride` and :attr:`storage_offset`.
  766. .. warning::
  767. Prefer using other view functions, like :meth:`torch.Tensor.expand`,
  768. to setting a view's strides manually with `as_strided`, as this
  769. function's behavior depends on the implementation of a tensor's storage.
  770. The constructed view of the storage must only refer to elements within
  771. the storage or a runtime error will be thrown, and if the view is
  772. "overlapped" (with multiple indices referring to the same element in
  773. memory) its behavior is undefined.
  774. Args:
  775. {input}
  776. size (tuple or ints): the shape of the output tensor
  777. stride (tuple or ints): the stride of the output tensor
  778. storage_offset (int, optional): the offset in the underlying storage of the output tensor.
  779. If ``None``, the storage_offset of the output tensor will match the input tensor.
  780. Example::
  781. >>> x = torch.randn(3, 3)
  782. >>> x
  783. tensor([[ 0.9039, 0.6291, 1.0795],
  784. [ 0.1586, 2.1939, -0.4900],
  785. [-0.1909, -0.7503, 1.9355]])
  786. >>> t = torch.as_strided(x, (2, 2), (1, 2))
  787. >>> t
  788. tensor([[0.9039, 1.0795],
  789. [0.6291, 0.1586]])
  790. >>> t = torch.as_strided(x, (2, 2), (1, 2), 1)
  791. tensor([[0.6291, 0.1586],
  792. [1.0795, 2.1939]])
  793. """.format(
  794. **common_args
  795. ),
  796. )
  797. add_docstr(
  798. torch.as_tensor,
  799. r"""
  800. as_tensor(data, dtype=None, device=None) -> Tensor
  801. Converts :attr:`data` into a tensor, sharing data and preserving autograd
  802. history if possible.
  803. If :attr:`data` is already a tensor with the requested dtype and device
  804. then :attr:`data` itself is returned, but if :attr:`data` is a
  805. tensor with a different dtype or device then it's copied as if using
  806. `data.to(dtype=dtype, device=device)`.
  807. If :attr:`data` is a NumPy array (an ndarray) with the same dtype and device then a
  808. tensor is constructed using :func:`torch.from_numpy`.
  809. .. seealso::
  810. :func:`torch.tensor` never shares its data and creates a new "leaf tensor" (see :doc:`/notes/autograd`).
  811. Args:
  812. {data}
  813. {dtype}
  814. device (:class:`torch.device`, optional): the device of the constructed tensor. If None and data is a tensor
  815. then the device of data is used. If None and data is not a tensor then
  816. the result tensor is constructed on the current device.
  817. Example::
  818. >>> a = numpy.array([1, 2, 3])
  819. >>> t = torch.as_tensor(a)
  820. >>> t
  821. tensor([ 1, 2, 3])
  822. >>> t[0] = -1
  823. >>> a
  824. array([-1, 2, 3])
  825. >>> a = numpy.array([1, 2, 3])
  826. >>> t = torch.as_tensor(a, device=torch.device('cuda'))
  827. >>> t
  828. tensor([ 1, 2, 3])
  829. >>> t[0] = -1
  830. >>> a
  831. array([1, 2, 3])
  832. """.format(
  833. **factory_data_common_args
  834. ),
  835. )
  836. add_docstr(
  837. torch.asin,
  838. r"""
  839. asin(input, *, out=None) -> Tensor
  840. Returns a new tensor with the arcsine of the elements of :attr:`input`.
  841. .. math::
  842. \text{out}_{i} = \sin^{-1}(\text{input}_{i})
  843. """
  844. + r"""
  845. Args:
  846. {input}
  847. Keyword args:
  848. {out}
  849. Example::
  850. >>> a = torch.randn(4)
  851. >>> a
  852. tensor([-0.5962, 1.4985, -0.4396, 1.4525])
  853. >>> torch.asin(a)
  854. tensor([-0.6387, nan, -0.4552, nan])
  855. """.format(
  856. **common_args
  857. ),
  858. )
  859. add_docstr(
  860. torch.arcsin,
  861. r"""
  862. arcsin(input, *, out=None) -> Tensor
  863. Alias for :func:`torch.asin`.
  864. """,
  865. )
  866. add_docstr(
  867. torch.asinh,
  868. r"""
  869. asinh(input, *, out=None) -> Tensor
  870. Returns a new tensor with the inverse hyperbolic sine of the elements of :attr:`input`.
  871. .. math::
  872. \text{out}_{i} = \sinh^{-1}(\text{input}_{i})
  873. """
  874. + r"""
  875. Args:
  876. {input}
  877. Keyword arguments:
  878. {out}
  879. Example::
  880. >>> a = torch.randn(4)
  881. >>> a
  882. tensor([ 0.1606, -1.4267, -1.0899, -1.0250 ])
  883. >>> torch.asinh(a)
  884. tensor([ 0.1599, -1.1534, -0.9435, -0.8990 ])
  885. """.format(
  886. **common_args
  887. ),
  888. )
  889. add_docstr(
  890. torch.arcsinh,
  891. r"""
  892. arcsinh(input, *, out=None) -> Tensor
  893. Alias for :func:`torch.asinh`.
  894. """,
  895. )
  896. add_docstr(
  897. torch.atan,
  898. r"""
  899. atan(input, *, out=None) -> Tensor
  900. Returns a new tensor with the arctangent of the elements of :attr:`input`.
  901. .. math::
  902. \text{out}_{i} = \tan^{-1}(\text{input}_{i})
  903. """
  904. + r"""
  905. Args:
  906. {input}
  907. Keyword args:
  908. {out}
  909. Example::
  910. >>> a = torch.randn(4)
  911. >>> a
  912. tensor([ 0.2341, 0.2539, -0.6256, -0.6448])
  913. >>> torch.atan(a)
  914. tensor([ 0.2299, 0.2487, -0.5591, -0.5727])
  915. """.format(
  916. **common_args
  917. ),
  918. )
  919. add_docstr(
  920. torch.arctan,
  921. r"""
  922. arctan(input, *, out=None) -> Tensor
  923. Alias for :func:`torch.atan`.
  924. """,
  925. )
  926. add_docstr(
  927. torch.atan2,
  928. r"""
  929. atan2(input, other, *, out=None) -> Tensor
  930. Element-wise arctangent of :math:`\text{{input}}_{{i}} / \text{{other}}_{{i}}`
  931. with consideration of the quadrant. Returns a new tensor with the signed angles
  932. in radians between vector :math:`(\text{{other}}_{{i}}, \text{{input}}_{{i}})`
  933. and vector :math:`(1, 0)`. (Note that :math:`\text{{other}}_{{i}}`, the second
  934. parameter, is the x-coordinate, while :math:`\text{{input}}_{{i}}`, the first
  935. parameter, is the y-coordinate.)
  936. The shapes of ``input`` and ``other`` must be
  937. :ref:`broadcastable <broadcasting-semantics>`.
  938. Args:
  939. input (Tensor): the first input tensor
  940. other (Tensor): the second input tensor
  941. Keyword args:
  942. {out}
  943. Example::
  944. >>> a = torch.randn(4)
  945. >>> a
  946. tensor([ 0.9041, 0.0196, -0.3108, -2.4423])
  947. >>> torch.atan2(a, torch.randn(4))
  948. tensor([ 0.9833, 0.0811, -1.9743, -1.4151])
  949. """.format(
  950. **common_args
  951. ),
  952. )
  953. add_docstr(
  954. torch.arctan2,
  955. r"""
  956. arctan2(input, other, *, out=None) -> Tensor
  957. Alias for :func:`torch.atan2`.
  958. """,
  959. )
  960. add_docstr(
  961. torch.atanh,
  962. r"""
  963. atanh(input, *, out=None) -> Tensor
  964. Returns a new tensor with the inverse hyperbolic tangent of the elements of :attr:`input`.
  965. Note:
  966. The domain of the inverse hyperbolic tangent is `(-1, 1)` and values outside this range
  967. will be mapped to ``NaN``, except for the values `1` and `-1` for which the output is
  968. mapped to `+/-INF` respectively.
  969. .. math::
  970. \text{out}_{i} = \tanh^{-1}(\text{input}_{i})
  971. """
  972. + r"""
  973. Args:
  974. {input}
  975. Keyword arguments:
  976. {out}
  977. Example::
  978. >>> a = torch.randn(4).uniform_(-1, 1)
  979. >>> a
  980. tensor([ -0.9385, 0.2968, -0.8591, -0.1871 ])
  981. >>> torch.atanh(a)
  982. tensor([ -1.7253, 0.3060, -1.2899, -0.1893 ])
  983. """.format(
  984. **common_args
  985. ),
  986. )
  987. add_docstr(
  988. torch.arctanh,
  989. r"""
  990. arctanh(input, *, out=None) -> Tensor
  991. Alias for :func:`torch.atanh`.
  992. """,
  993. )
  994. add_docstr(
  995. torch.asarray,
  996. r"""
  997. asarray(obj, *, dtype=None, device=None, copy=None, requires_grad=False) -> Tensor
  998. Converts :attr:`obj` to a tensor.
  999. :attr:`obj` can be one of:
  1000. 1. a tensor
  1001. 2. a NumPy array or a NumPy scalar
  1002. 3. a DLPack capsule
  1003. 4. an object that implements Python's buffer protocol
  1004. 5. a scalar
  1005. 6. a sequence of scalars
  1006. When :attr:`obj` is a tensor, NumPy array, or DLPack capsule the returned tensor will,
  1007. by default, not require a gradient, have the same datatype as :attr:`obj`, be on the
  1008. same device, and share memory with it. These properties can be controlled with the
  1009. :attr:`dtype`, :attr:`device`, :attr:`copy`, and :attr:`requires_grad` keyword arguments.
  1010. If the returned tensor is of a different datatype, on a different device, or a copy is
  1011. requested then it will not share its memory with :attr:`obj`. If :attr:`requires_grad`
  1012. is ``True`` then the returned tensor will require a gradient, and if :attr:`obj` is
  1013. also a tensor with an autograd history then the returned tensor will have the same history.
  1014. When :attr:`obj` is not a tensor, NumPy array, or DLPack capsule but implements Python's
  1015. buffer protocol then the buffer is interpreted as an array of bytes grouped according to
  1016. the size of the datatype passed to the :attr:`dtype` keyword argument. (If no datatype is
  1017. passed then the default floating point datatype is used, instead.) The returned tensor
  1018. will have the specified datatype (or default floating point datatype if none is specified)
  1019. and, by default, be on the CPU device and share memory with the buffer.
  1020. When :attr:`obj` is a NumPy scalar, the returned tensor will be a 0-dimensional tensor on
  1021. the CPU and that doesn't share its memory (i.e. ``copy=True``). By default datatype will
  1022. be the PyTorch datatype corresponding to the NumPy's scalar's datatype.
  1023. When :attr:`obj` is none of the above but a scalar, or a sequence of scalars then the
  1024. returned tensor will, by default, infer its datatype from the scalar values, be on the
  1025. current default device, and not share its memory.
  1026. .. seealso::
  1027. :func:`torch.tensor` creates a tensor that always copies the data from the input object.
  1028. :func:`torch.from_numpy` creates a tensor that always shares memory from NumPy arrays.
  1029. :func:`torch.frombuffer` creates a tensor that always shares memory from objects that
  1030. implement the buffer protocol.
  1031. :func:`torch.from_dlpack` creates a tensor that always shares memory from
  1032. DLPack capsules.
  1033. Args:
  1034. obj (object): a tensor, NumPy array, DLPack Capsule, object that implements Python's
  1035. buffer protocol, scalar, or sequence of scalars.
  1036. Keyword args:
  1037. dtype (:class:`torch.dtype`, optional): the datatype of the returned tensor.
  1038. Default: ``None``, which causes the datatype of the returned tensor to be
  1039. inferred from :attr:`obj`.
  1040. copy (bool, optional): controls whether the returned tensor shares memory with :attr:`obj`.
  1041. Default: ``None``, which causes the returned tensor to share memory with :attr:`obj`
  1042. whenever possible. If ``True`` then the returned tensor does not share its memory.
  1043. If ``False`` then the returned tensor shares its memory with :attr:`obj` and an
  1044. error is thrown if it cannot.
  1045. device (:class:`torch.device`, optional): the device of the returned tensor.
  1046. Default: ``None``, which causes the device of :attr:`obj` to be used. Or, if
  1047. :attr:`obj` is a Python sequence, the current default device will be used.
  1048. requires_grad (bool, optional): whether the returned tensor requires grad.
  1049. Default: ``False``, which causes the returned tensor not to require a gradient.
  1050. If ``True``, then the returned tensor will require a gradient, and if :attr:`obj`
  1051. is also a tensor with an autograd history then the returned tensor will have
  1052. the same history.
  1053. Example::
  1054. >>> a = torch.tensor([1, 2, 3])
  1055. >>> # Shares memory with tensor 'a'
  1056. >>> b = torch.asarray(a)
  1057. >>> a.data_ptr() == b.data_ptr()
  1058. True
  1059. >>> # Forces memory copy
  1060. >>> c = torch.asarray(a, copy=True)
  1061. >>> a.data_ptr() == c.data_ptr()
  1062. False
  1063. >>> a = torch.tensor([1., 2., 3.], requires_grad=True)
  1064. >>> b = a + 2
  1065. >>> b
  1066. tensor([3., 4., 5.], grad_fn=<AddBackward0>)
  1067. >>> # Shares memory with tensor 'b', with no grad
  1068. >>> c = torch.asarray(b)
  1069. >>> c
  1070. tensor([3., 4., 5.])
  1071. >>> # Shares memory with tensor 'b', retaining autograd history
  1072. >>> d = torch.asarray(b, requires_grad=True)
  1073. >>> d
  1074. tensor([3., 4., 5.], grad_fn=<AddBackward0>)
  1075. >>> array = numpy.array([1, 2, 3])
  1076. >>> # Shares memory with array 'array'
  1077. >>> t1 = torch.asarray(array)
  1078. >>> array.__array_interface__['data'][0] == t1.data_ptr()
  1079. True
  1080. >>> # Copies memory due to dtype mismatch
  1081. >>> t2 = torch.asarray(array, dtype=torch.float32)
  1082. >>> array.__array_interface__['data'][0] == t2.data_ptr()
  1083. False
  1084. >>> scalar = numpy.float64(0.5)
  1085. >>> torch.asarray(scalar)
  1086. tensor(0.5000, dtype=torch.float64)
  1087. """,
  1088. )
  1089. add_docstr(
  1090. torch.baddbmm,
  1091. r"""
  1092. baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
  1093. Performs a batch matrix-matrix product of matrices in :attr:`batch1`
  1094. and :attr:`batch2`.
  1095. :attr:`input` is added to the final result.
  1096. :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same
  1097. number of matrices.
  1098. If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
  1099. :math:`(b \times m \times p)` tensor, then :attr:`input` must be
  1100. :ref:`broadcastable <broadcasting-semantics>` with a
  1101. :math:`(b \times n \times p)` tensor and :attr:`out` will be a
  1102. :math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the
  1103. same as the scaling factors used in :meth:`torch.addbmm`.
  1104. .. math::
  1105. \text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i)
  1106. If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
  1107. it will not be propagated.
  1108. """
  1109. + r"""
  1110. For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
  1111. :attr:`alpha` must be real numbers, otherwise they should be integers.
  1112. {tf32_note}
  1113. {rocm_fp16_note}
  1114. Args:
  1115. input (Tensor): the tensor to be added
  1116. batch1 (Tensor): the first batch of matrices to be multiplied
  1117. batch2 (Tensor): the second batch of matrices to be multiplied
  1118. Keyword args:
  1119. beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
  1120. alpha (Number, optional): multiplier for :math:`\text{{batch1}} \mathbin{{@}} \text{{batch2}}` (:math:`\alpha`)
  1121. {out}
  1122. Example::
  1123. >>> M = torch.randn(10, 3, 5)
  1124. >>> batch1 = torch.randn(10, 3, 4)
  1125. >>> batch2 = torch.randn(10, 4, 5)
  1126. >>> torch.baddbmm(M, batch1, batch2).size()
  1127. torch.Size([10, 3, 5])
  1128. """.format(
  1129. **common_args, **tf32_notes, **rocm_fp16_notes
  1130. ),
  1131. )
  1132. add_docstr(
  1133. torch.bernoulli,
  1134. r"""
  1135. bernoulli(input, *, generator=None, out=None) -> Tensor
  1136. Draws binary random numbers (0 or 1) from a Bernoulli distribution.
  1137. The :attr:`input` tensor should be a tensor containing probabilities
  1138. to be used for drawing the binary random number.
  1139. Hence, all values in :attr:`input` have to be in the range:
  1140. :math:`0 \leq \text{input}_i \leq 1`.
  1141. The :math:`\text{i}^{th}` element of the output tensor will draw a
  1142. value :math:`1` according to the :math:`\text{i}^{th}` probability value given
  1143. in :attr:`input`.
  1144. .. math::
  1145. \text{out}_{i} \sim \mathrm{Bernoulli}(p = \text{input}_{i})
  1146. """
  1147. + r"""
  1148. The returned :attr:`out` tensor only has values 0 or 1 and is of the same
  1149. shape as :attr:`input`.
  1150. :attr:`out` can have integral ``dtype``, but :attr:`input` must have floating
  1151. point ``dtype``.
  1152. Args:
  1153. input (Tensor): the input tensor of probability values for the Bernoulli distribution
  1154. Keyword args:
  1155. {generator}
  1156. {out}
  1157. Example::
  1158. >>> a = torch.empty(3, 3).uniform_(0, 1) # generate a uniform random matrix with range [0, 1]
  1159. >>> a
  1160. tensor([[ 0.1737, 0.0950, 0.3609],
  1161. [ 0.7148, 0.0289, 0.2676],
  1162. [ 0.9456, 0.8937, 0.7202]])
  1163. >>> torch.bernoulli(a)
  1164. tensor([[ 1., 0., 0.],
  1165. [ 0., 0., 0.],
  1166. [ 1., 1., 1.]])
  1167. >>> a = torch.ones(3, 3) # probability of drawing "1" is 1
  1168. >>> torch.bernoulli(a)
  1169. tensor([[ 1., 1., 1.],
  1170. [ 1., 1., 1.],
  1171. [ 1., 1., 1.]])
  1172. >>> a = torch.zeros(3, 3) # probability of drawing "1" is 0
  1173. >>> torch.bernoulli(a)
  1174. tensor([[ 0., 0., 0.],
  1175. [ 0., 0., 0.],
  1176. [ 0., 0., 0.]])
  1177. """.format(
  1178. **common_args
  1179. ),
  1180. )
  1181. add_docstr(
  1182. torch.bincount,
  1183. r"""
  1184. bincount(input, weights=None, minlength=0) -> Tensor
  1185. Count the frequency of each value in an array of non-negative ints.
  1186. The number of bins (size 1) is one larger than the largest value in
  1187. :attr:`input` unless :attr:`input` is empty, in which case the result is a
  1188. tensor of size 0. If :attr:`minlength` is specified, the number of bins is at least
  1189. :attr:`minlength` and if :attr:`input` is empty, then the result is tensor of size
  1190. :attr:`minlength` filled with zeros. If ``n`` is the value at position ``i``,
  1191. ``out[n] += weights[i]`` if :attr:`weights` is specified else
  1192. ``out[n] += 1``.
  1193. Note:
  1194. {backward_reproducibility_note}
  1195. Arguments:
  1196. input (Tensor): 1-d int tensor
  1197. weights (Tensor): optional, weight for each value in the input tensor.
  1198. Should be of same size as input tensor.
  1199. minlength (int): optional, minimum number of bins. Should be non-negative.
  1200. Returns:
  1201. output (Tensor): a tensor of shape ``Size([max(input) + 1])`` if
  1202. :attr:`input` is non-empty, else ``Size(0)``
  1203. Example::
  1204. >>> input = torch.randint(0, 8, (5,), dtype=torch.int64)
  1205. >>> weights = torch.linspace(0, 1, steps=5)
  1206. >>> input, weights
  1207. (tensor([4, 3, 6, 3, 4]),
  1208. tensor([ 0.0000, 0.2500, 0.5000, 0.7500, 1.0000])
  1209. >>> torch.bincount(input)
  1210. tensor([0, 0, 0, 2, 2, 0, 1])
  1211. >>> input.bincount(weights)
  1212. tensor([0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 0.0000, 0.5000])
  1213. """.format(
  1214. **reproducibility_notes
  1215. ),
  1216. )
  1217. add_docstr(
  1218. torch.bitwise_not,
  1219. r"""
  1220. bitwise_not(input, *, out=None) -> Tensor
  1221. Computes the bitwise NOT of the given input tensor. The input tensor must be of
  1222. integral or Boolean types. For bool tensors, it computes the logical NOT.
  1223. Args:
  1224. {input}
  1225. Keyword args:
  1226. {out}
  1227. Example::
  1228. >>> torch.bitwise_not(torch.tensor([-1, -2, 3], dtype=torch.int8))
  1229. tensor([ 0, 1, -4], dtype=torch.int8)
  1230. """.format(
  1231. **common_args
  1232. ),
  1233. )
  1234. add_docstr(
  1235. torch.bmm,
  1236. r"""
  1237. bmm(input, mat2, *, out=None) -> Tensor
  1238. Performs a batch matrix-matrix product of matrices stored in :attr:`input`
  1239. and :attr:`mat2`.
  1240. :attr:`input` and :attr:`mat2` must be 3-D tensors each containing
  1241. the same number of matrices.
  1242. If :attr:`input` is a :math:`(b \times n \times m)` tensor, :attr:`mat2` is a
  1243. :math:`(b \times m \times p)` tensor, :attr:`out` will be a
  1244. :math:`(b \times n \times p)` tensor.
  1245. .. math::
  1246. \text{out}_i = \text{input}_i \mathbin{@} \text{mat2}_i
  1247. """
  1248. + r"""
  1249. {tf32_note}
  1250. {rocm_fp16_note}
  1251. .. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
  1252. For broadcasting matrix products, see :func:`torch.matmul`.
  1253. Args:
  1254. input (Tensor): the first batch of matrices to be multiplied
  1255. mat2 (Tensor): the second batch of matrices to be multiplied
  1256. Keyword Args:
  1257. {out}
  1258. Example::
  1259. >>> input = torch.randn(10, 3, 4)
  1260. >>> mat2 = torch.randn(10, 4, 5)
  1261. >>> res = torch.bmm(input, mat2)
  1262. >>> res.size()
  1263. torch.Size([10, 3, 5])
  1264. """.format(
  1265. **common_args, **tf32_notes, **rocm_fp16_notes
  1266. ),
  1267. )
  1268. add_docstr(
  1269. torch.bitwise_and,
  1270. r"""
  1271. bitwise_and(input, other, *, out=None) -> Tensor
  1272. Computes the bitwise AND of :attr:`input` and :attr:`other`. The input tensor must be of
  1273. integral or Boolean types. For bool tensors, it computes the logical AND.
  1274. Args:
  1275. input: the first input tensor
  1276. other: the second input tensor
  1277. Keyword args:
  1278. {out}
  1279. Example::
  1280. >>> torch.bitwise_and(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
  1281. tensor([1, 0, 3], dtype=torch.int8)
  1282. >>> torch.bitwise_and(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
  1283. tensor([ False, True, False])
  1284. """.format(
  1285. **common_args
  1286. ),
  1287. )
  1288. add_docstr(
  1289. torch.bitwise_or,
  1290. r"""
  1291. bitwise_or(input, other, *, out=None) -> Tensor
  1292. Computes the bitwise OR of :attr:`input` and :attr:`other`. The input tensor must be of
  1293. integral or Boolean types. For bool tensors, it computes the logical OR.
  1294. Args:
  1295. input: the first input tensor
  1296. other: the second input tensor
  1297. Keyword args:
  1298. {out}
  1299. Example::
  1300. >>> torch.bitwise_or(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
  1301. tensor([-1, -2, 3], dtype=torch.int8)
  1302. >>> torch.bitwise_or(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
  1303. tensor([ True, True, False])
  1304. """.format(
  1305. **common_args
  1306. ),
  1307. )
  1308. add_docstr(
  1309. torch.bitwise_xor,
  1310. r"""
  1311. bitwise_xor(input, other, *, out=None) -> Tensor
  1312. Computes the bitwise XOR of :attr:`input` and :attr:`other`. The input tensor must be of
  1313. integral or Boolean types. For bool tensors, it computes the logical XOR.
  1314. Args:
  1315. input: the first input tensor
  1316. other: the second input tensor
  1317. Keyword args:
  1318. {out}
  1319. Example::
  1320. >>> torch.bitwise_xor(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
  1321. tensor([-2, -2, 0], dtype=torch.int8)
  1322. >>> torch.bitwise_xor(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
  1323. tensor([ True, False, False])
  1324. """.format(
  1325. **common_args
  1326. ),
  1327. )
  1328. add_docstr(
  1329. torch.bitwise_left_shift,
  1330. r"""
  1331. bitwise_left_shift(input, other, *, out=None) -> Tensor
  1332. Computes the left arithmetic shift of :attr:`input` by :attr:`other` bits.
  1333. The input tensor must be of integral type. This operator supports
  1334. :ref:`broadcasting to a common shape <broadcasting-semantics>` and
  1335. :ref:`type promotion <type-promotion-doc>`.
  1336. The operation applied is:
  1337. .. math::
  1338. \text{{out}}_i = \text{{input}}_i << \text{{other}}_i
  1339. Args:
  1340. input (Tensor or Scalar): the first input tensor
  1341. other (Tensor or Scalar): the second input tensor
  1342. Keyword args:
  1343. {out}
  1344. Example::
  1345. >>> torch.bitwise_left_shift(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
  1346. tensor([-2, -2, 24], dtype=torch.int8)
  1347. """.format(
  1348. **common_args
  1349. ),
  1350. )
  1351. add_docstr(
  1352. torch.bitwise_right_shift,
  1353. r"""
  1354. bitwise_right_shift(input, other, *, out=None) -> Tensor
  1355. Computes the right arithmetic shift of :attr:`input` by :attr:`other` bits.
  1356. The input tensor must be of integral type. This operator supports
  1357. :ref:`broadcasting to a common shape <broadcasting-semantics>` and
  1358. :ref:`type promotion <type-promotion-doc>`.
  1359. In any case, if the value of the right operand is negative or is greater
  1360. or equal to the number of bits in the promoted left operand, the behavior is undefined.
  1361. The operation applied is:
  1362. .. math::
  1363. \text{{out}}_i = \text{{input}}_i >> \text{{other}}_i
  1364. Args:
  1365. input (Tensor or Scalar): the first input tensor
  1366. other (Tensor or Scalar): the second input tensor
  1367. Keyword args:
  1368. {out}
  1369. Example::
  1370. >>> torch.bitwise_right_shift(torch.tensor([-2, -7, 31], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
  1371. tensor([-1, -7, 3], dtype=torch.int8)
  1372. """.format(
  1373. **common_args
  1374. ),
  1375. )
  1376. add_docstr(
  1377. torch.broadcast_to,
  1378. r"""
  1379. broadcast_to(input, shape) -> Tensor
  1380. Broadcasts :attr:`input` to the shape :attr:`\shape`.
  1381. Equivalent to calling ``input.expand(shape)``. See :meth:`~Tensor.expand` for details.
  1382. Args:
  1383. {input}
  1384. shape (list, tuple, or :class:`torch.Size`): the new shape.
  1385. Example::
  1386. >>> x = torch.tensor([1, 2, 3])
  1387. >>> torch.broadcast_to(x, (3, 3))
  1388. tensor([[1, 2, 3],
  1389. [1, 2, 3],
  1390. [1, 2, 3]])
  1391. """.format(
  1392. **common_args
  1393. ),
  1394. )
  1395. add_docstr(
  1396. torch.stack,
  1397. r"""
  1398. stack(tensors, dim=0, *, out=None) -> Tensor
  1399. Concatenates a sequence of tensors along a new dimension.
  1400. All tensors need to be of the same size.
  1401. .. seealso::
  1402. :func:`torch.cat` concatenates the given sequence along an existing dimension.
  1403. Arguments:
  1404. tensors (sequence of Tensors): sequence of tensors to concatenate
  1405. dim (int, optional): dimension to insert. Has to be between 0 and the number
  1406. of dimensions of concatenated tensors (inclusive). Default: 0
  1407. Keyword args:
  1408. {out}
  1409. Example::
  1410. >>> x = torch.randn(2, 3)
  1411. >>> x
  1412. tensor([[ 0.3367, 0.1288, 0.2345],
  1413. [ 0.2303, -1.1229, -0.1863]])
  1414. >>> x = torch.stack((x, x)) # same as torch.stack((x, x), dim=0)
  1415. >>> x
  1416. tensor([[[ 0.3367, 0.1288, 0.2345],
  1417. [ 0.2303, -1.1229, -0.1863]],
  1418. [[ 0.3367, 0.1288, 0.2345],
  1419. [ 0.2303, -1.1229, -0.1863]]])
  1420. >>> x.size()
  1421. torch.Size([2, 2, 3])
  1422. >>> x = torch.stack((x, x), dim=1)
  1423. tensor([[[ 0.3367, 0.1288, 0.2345],
  1424. [ 0.3367, 0.1288, 0.2345]],
  1425. [[ 0.2303, -1.1229, -0.1863],
  1426. [ 0.2303, -1.1229, -0.1863]]])
  1427. >>> x = torch.stack((x, x), dim=2)
  1428. tensor([[[ 0.3367, 0.3367],
  1429. [ 0.1288, 0.1288],
  1430. [ 0.2345, 0.2345]],
  1431. [[ 0.2303, 0.2303],
  1432. [-1.1229, -1.1229],
  1433. [-0.1863, -0.1863]]])
  1434. >>> x = torch.stack((x, x), dim=-1)
  1435. tensor([[[ 0.3367, 0.3367],
  1436. [ 0.1288, 0.1288],
  1437. [ 0.2345, 0.2345]],
  1438. [[ 0.2303, 0.2303],
  1439. [-1.1229, -1.1229],
  1440. [-0.1863, -0.1863]]])
  1441. """.format(
  1442. **common_args
  1443. ),
  1444. )
  1445. add_docstr(
  1446. torch.hstack,
  1447. r"""
  1448. hstack(tensors, *, out=None) -> Tensor
  1449. Stack tensors in sequence horizontally (column wise).
  1450. This is equivalent to concatenation along the first axis for 1-D tensors, and along the second axis for all other tensors.
  1451. Args:
  1452. tensors (sequence of Tensors): sequence of tensors to concatenate
  1453. Keyword args:
  1454. {out}
  1455. Example::
  1456. >>> a = torch.tensor([1, 2, 3])
  1457. >>> b = torch.tensor([4, 5, 6])
  1458. >>> torch.hstack((a,b))
  1459. tensor([1, 2, 3, 4, 5, 6])
  1460. >>> a = torch.tensor([[1],[2],[3]])
  1461. >>> b = torch.tensor([[4],[5],[6]])
  1462. >>> torch.hstack((a,b))
  1463. tensor([[1, 4],
  1464. [2, 5],
  1465. [3, 6]])
  1466. """.format(
  1467. **common_args
  1468. ),
  1469. )
  1470. add_docstr(
  1471. torch.vstack,
  1472. r"""
  1473. vstack(tensors, *, out=None) -> Tensor
  1474. Stack tensors in sequence vertically (row wise).
  1475. This is equivalent to concatenation along the first axis after all 1-D tensors have been reshaped by :func:`torch.atleast_2d`.
  1476. Args:
  1477. tensors (sequence of Tensors): sequence of tensors to concatenate
  1478. Keyword args:
  1479. {out}
  1480. Example::
  1481. >>> a = torch.tensor([1, 2, 3])
  1482. >>> b = torch.tensor([4, 5, 6])
  1483. >>> torch.vstack((a,b))
  1484. tensor([[1, 2, 3],
  1485. [4, 5, 6]])
  1486. >>> a = torch.tensor([[1],[2],[3]])
  1487. >>> b = torch.tensor([[4],[5],[6]])
  1488. >>> torch.vstack((a,b))
  1489. tensor([[1],
  1490. [2],
  1491. [3],
  1492. [4],
  1493. [5],
  1494. [6]])
  1495. """.format(
  1496. **common_args
  1497. ),
  1498. )
  1499. add_docstr(
  1500. torch.dstack,
  1501. r"""
  1502. dstack(tensors, *, out=None) -> Tensor
  1503. Stack tensors in sequence depthwise (along third axis).
  1504. This is equivalent to concatenation along the third axis after 1-D and 2-D tensors have been reshaped by :func:`torch.atleast_3d`.
  1505. Args:
  1506. tensors (sequence of Tensors): sequence of tensors to concatenate
  1507. Keyword args:
  1508. {out}
  1509. Example::
  1510. >>> a = torch.tensor([1, 2, 3])
  1511. >>> b = torch.tensor([4, 5, 6])
  1512. >>> torch.dstack((a,b))
  1513. tensor([[[1, 4],
  1514. [2, 5],
  1515. [3, 6]]])
  1516. >>> a = torch.tensor([[1],[2],[3]])
  1517. >>> b = torch.tensor([[4],[5],[6]])
  1518. >>> torch.dstack((a,b))
  1519. tensor([[[1, 4]],
  1520. [[2, 5]],
  1521. [[3, 6]]])
  1522. """.format(
  1523. **common_args
  1524. ),
  1525. )
  1526. add_docstr(
  1527. torch.tensor_split,
  1528. r"""
  1529. tensor_split(input, indices_or_sections, dim=0) -> List of Tensors
  1530. Splits a tensor into multiple sub-tensors, all of which are views of :attr:`input`,
  1531. along dimension :attr:`dim` according to the indices or number of sections specified
  1532. by :attr:`indices_or_sections`. This function is based on NumPy's
  1533. :func:`numpy.array_split`.
  1534. Args:
  1535. input (Tensor): the tensor to split
  1536. indices_or_sections (Tensor, int or list or tuple of ints):
  1537. If :attr:`indices_or_sections` is an integer ``n`` or a zero dimensional long tensor
  1538. with value ``n``, :attr:`input` is split into ``n`` sections along dimension :attr:`dim`.
  1539. If :attr:`input` is divisible by ``n`` along dimension :attr:`dim`, each
  1540. section will be of equal size, :code:`input.size(dim) / n`. If :attr:`input`
  1541. is not divisible by ``n``, the sizes of the first :code:`int(input.size(dim) % n)`
  1542. sections will have size :code:`int(input.size(dim) / n) + 1`, and the rest will
  1543. have size :code:`int(input.size(dim) / n)`.
  1544. If :attr:`indices_or_sections` is a list or tuple of ints, or a one-dimensional long
  1545. tensor, then :attr:`input` is split along dimension :attr:`dim` at each of the indices
  1546. in the list, tuple or tensor. For instance, :code:`indices_or_sections=[2, 3]` and :code:`dim=0`
  1547. would result in the tensors :code:`input[:2]`, :code:`input[2:3]`, and :code:`input[3:]`.
  1548. If :attr:`indices_or_sections` is a tensor, it must be a zero-dimensional or one-dimensional
  1549. long tensor on the CPU.
  1550. dim (int, optional): dimension along which to split the tensor. Default: ``0``
  1551. Example::
  1552. >>> x = torch.arange(8)
  1553. >>> torch.tensor_split(x, 3)
  1554. (tensor([0, 1, 2]), tensor([3, 4, 5]), tensor([6, 7]))
  1555. >>> x = torch.arange(7)
  1556. >>> torch.tensor_split(x, 3)
  1557. (tensor([0, 1, 2]), tensor([3, 4]), tensor([5, 6]))
  1558. >>> torch.tensor_split(x, (1, 6))
  1559. (tensor([0]), tensor([1, 2, 3, 4, 5]), tensor([6]))
  1560. >>> x = torch.arange(14).reshape(2, 7)
  1561. >>> x
  1562. tensor([[ 0, 1, 2, 3, 4, 5, 6],
  1563. [ 7, 8, 9, 10, 11, 12, 13]])
  1564. >>> torch.tensor_split(x, 3, dim=1)
  1565. (tensor([[0, 1, 2],
  1566. [7, 8, 9]]),
  1567. tensor([[ 3, 4],
  1568. [10, 11]]),
  1569. tensor([[ 5, 6],
  1570. [12, 13]]))
  1571. >>> torch.tensor_split(x, (1, 6), dim=1)
  1572. (tensor([[0],
  1573. [7]]),
  1574. tensor([[ 1, 2, 3, 4, 5],
  1575. [ 8, 9, 10, 11, 12]]),
  1576. tensor([[ 6],
  1577. [13]]))
  1578. """,
  1579. )
  1580. add_docstr(
  1581. torch.chunk,
  1582. r"""
  1583. chunk(input, chunks, dim=0) -> List of Tensors
  1584. Attempts to split a tensor into the specified number of chunks. Each chunk is a view of
  1585. the input tensor.
  1586. .. note::
  1587. This function may return fewer than the specified number of chunks!
  1588. .. seealso::
  1589. :func:`torch.tensor_split` a function that always returns exactly the specified number of chunks
  1590. If the tensor size along the given dimension :attr:`dim` is divisible by :attr:`chunks`,
  1591. all returned chunks will be the same size.
  1592. If the tensor size along the given dimension :attr:`dim` is not divisible by :attr:`chunks`,
  1593. all returned chunks will be the same size, except the last one.
  1594. If such division is not possible, this function may return fewer
  1595. than the specified number of chunks.
  1596. Arguments:
  1597. input (Tensor): the tensor to split
  1598. chunks (int): number of chunks to return
  1599. dim (int): dimension along which to split the tensor
  1600. Example:
  1601. >>> torch.arange(11).chunk(6)
  1602. (tensor([0, 1]),
  1603. tensor([2, 3]),
  1604. tensor([4, 5]),
  1605. tensor([6, 7]),
  1606. tensor([8, 9]),
  1607. tensor([10]))
  1608. >>> torch.arange(12).chunk(6)
  1609. (tensor([0, 1]),
  1610. tensor([2, 3]),
  1611. tensor([4, 5]),
  1612. tensor([6, 7]),
  1613. tensor([8, 9]),
  1614. tensor([10, 11]))
  1615. >>> torch.arange(13).chunk(6)
  1616. (tensor([0, 1, 2]),
  1617. tensor([3, 4, 5]),
  1618. tensor([6, 7, 8]),
  1619. tensor([ 9, 10, 11]),
  1620. tensor([12]))
  1621. """,
  1622. )
  1623. add_docstr(
  1624. torch.unsafe_chunk,
  1625. r"""
  1626. unsafe_chunk(input, chunks, dim=0) -> List of Tensors
  1627. Works like :func:`torch.chunk` but without enforcing the autograd restrictions
  1628. on inplace modification of the outputs.
  1629. .. warning::
  1630. This function is safe to use as long as only the input, or only the outputs
  1631. are modified inplace after calling this function. It is user's
  1632. responsibility to ensure that is the case. If both the input and one or more
  1633. of the outputs are modified inplace, gradients computed by autograd will be
  1634. silently incorrect.
  1635. """,
  1636. )
  1637. add_docstr(
  1638. torch.unsafe_split,
  1639. r"""
  1640. unsafe_split(tensor, split_size_or_sections, dim=0) -> List of Tensors
  1641. Works like :func:`torch.split` but without enforcing the autograd restrictions
  1642. on inplace modification of the outputs.
  1643. .. warning::
  1644. This function is safe to use as long as only the input, or only the outputs
  1645. are modified inplace after calling this function. It is user's
  1646. responsibility to ensure that is the case. If both the input and one or more
  1647. of the outputs are modified inplace, gradients computed by autograd will be
  1648. silently incorrect.
  1649. """,
  1650. )
  1651. add_docstr(
  1652. torch.hsplit,
  1653. r"""
  1654. hsplit(input, indices_or_sections) -> List of Tensors
  1655. Splits :attr:`input`, a tensor with one or more dimensions, into multiple tensors
  1656. horizontally according to :attr:`indices_or_sections`. Each split is a view of
  1657. :attr:`input`.
  1658. If :attr:`input` is one dimensional this is equivalent to calling
  1659. torch.tensor_split(input, indices_or_sections, dim=0) (the split dimension is
  1660. zero), and if :attr:`input` has two or more dimensions it's equivalent to calling
  1661. torch.tensor_split(input, indices_or_sections, dim=1) (the split dimension is 1),
  1662. except that if :attr:`indices_or_sections` is an integer it must evenly divide
  1663. the split dimension or a runtime error will be thrown.
  1664. This function is based on NumPy's :func:`numpy.hsplit`.
  1665. Args:
  1666. input (Tensor): tensor to split.
  1667. indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
  1668. Example::
  1669. >>> t = torch.arange(16.0).reshape(4,4)
  1670. >>> t
  1671. tensor([[ 0., 1., 2., 3.],
  1672. [ 4., 5., 6., 7.],
  1673. [ 8., 9., 10., 11.],
  1674. [12., 13., 14., 15.]])
  1675. >>> torch.hsplit(t, 2)
  1676. (tensor([[ 0., 1.],
  1677. [ 4., 5.],
  1678. [ 8., 9.],
  1679. [12., 13.]]),
  1680. tensor([[ 2., 3.],
  1681. [ 6., 7.],
  1682. [10., 11.],
  1683. [14., 15.]]))
  1684. >>> torch.hsplit(t, [3, 6])
  1685. (tensor([[ 0., 1., 2.],
  1686. [ 4., 5., 6.],
  1687. [ 8., 9., 10.],
  1688. [12., 13., 14.]]),
  1689. tensor([[ 3.],
  1690. [ 7.],
  1691. [11.],
  1692. [15.]]),
  1693. tensor([], size=(4, 0)))
  1694. """,
  1695. )
  1696. add_docstr(
  1697. torch.vsplit,
  1698. r"""
  1699. vsplit(input, indices_or_sections) -> List of Tensors
  1700. Splits :attr:`input`, a tensor with two or more dimensions, into multiple tensors
  1701. vertically according to :attr:`indices_or_sections`. Each split is a view of
  1702. :attr:`input`.
  1703. This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=0)
  1704. (the split dimension is 0), except that if :attr:`indices_or_sections` is an integer
  1705. it must evenly divide the split dimension or a runtime error will be thrown.
  1706. This function is based on NumPy's :func:`numpy.vsplit`.
  1707. Args:
  1708. input (Tensor): tensor to split.
  1709. indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
  1710. Example::
  1711. >>> t = torch.arange(16.0).reshape(4,4)
  1712. >>> t
  1713. tensor([[ 0., 1., 2., 3.],
  1714. [ 4., 5., 6., 7.],
  1715. [ 8., 9., 10., 11.],
  1716. [12., 13., 14., 15.]])
  1717. >>> torch.vsplit(t, 2)
  1718. (tensor([[0., 1., 2., 3.],
  1719. [4., 5., 6., 7.]]),
  1720. tensor([[ 8., 9., 10., 11.],
  1721. [12., 13., 14., 15.]]))
  1722. >>> torch.vsplit(t, [3, 6])
  1723. (tensor([[ 0., 1., 2., 3.],
  1724. [ 4., 5., 6., 7.],
  1725. [ 8., 9., 10., 11.]]),
  1726. tensor([[12., 13., 14., 15.]]),
  1727. tensor([], size=(0, 4)))
  1728. """,
  1729. )
  1730. add_docstr(
  1731. torch.dsplit,
  1732. r"""
  1733. dsplit(input, indices_or_sections) -> List of Tensors
  1734. Splits :attr:`input`, a tensor with three or more dimensions, into multiple tensors
  1735. depthwise according to :attr:`indices_or_sections`. Each split is a view of
  1736. :attr:`input`.
  1737. This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=2)
  1738. (the split dimension is 2), except that if :attr:`indices_or_sections` is an integer
  1739. it must evenly divide the split dimension or a runtime error will be thrown.
  1740. This function is based on NumPy's :func:`numpy.dsplit`.
  1741. Args:
  1742. input (Tensor): tensor to split.
  1743. indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
  1744. Example::
  1745. >>> t = torch.arange(16.0).reshape(2, 2, 4)
  1746. >>> t
  1747. tensor([[[ 0., 1., 2., 3.],
  1748. [ 4., 5., 6., 7.]],
  1749. [[ 8., 9., 10., 11.],
  1750. [12., 13., 14., 15.]]])
  1751. >>> torch.dsplit(t, 2)
  1752. (tensor([[[ 0., 1.],
  1753. [ 4., 5.]],
  1754. [[ 8., 9.],
  1755. [12., 13.]]]),
  1756. tensor([[[ 2., 3.],
  1757. [ 6., 7.]],
  1758. [[10., 11.],
  1759. [14., 15.]]]))
  1760. >>> torch.dsplit(t, [3, 6])
  1761. (tensor([[[ 0., 1., 2.],
  1762. [ 4., 5., 6.]],
  1763. [[ 8., 9., 10.],
  1764. [12., 13., 14.]]]),
  1765. tensor([[[ 3.],
  1766. [ 7.]],
  1767. [[11.],
  1768. [15.]]]),
  1769. tensor([], size=(2, 2, 0)))
  1770. """,
  1771. )
  1772. add_docstr(
  1773. torch.can_cast,
  1774. r"""
  1775. can_cast(from_, to) -> bool
  1776. Determines if a type conversion is allowed under PyTorch casting rules
  1777. described in the type promotion :ref:`documentation <type-promotion-doc>`.
  1778. Args:
  1779. from\_ (dtype): The original :class:`torch.dtype`.
  1780. to (dtype): The target :class:`torch.dtype`.
  1781. Example::
  1782. >>> torch.can_cast(torch.double, torch.float)
  1783. True
  1784. >>> torch.can_cast(torch.float, torch.int)
  1785. False
  1786. """,
  1787. )
  1788. add_docstr(
  1789. torch.corrcoef,
  1790. r"""
  1791. corrcoef(input) -> Tensor
  1792. Estimates the Pearson product-moment correlation coefficient matrix of the variables given by the :attr:`input` matrix,
  1793. where rows are the variables and columns are the observations.
  1794. .. note::
  1795. The correlation coefficient matrix R is computed using the covariance matrix C as given by
  1796. :math:`R_{ij} = \frac{ C_{ij} } { \sqrt{ C_{ii} * C_{jj} } }`
  1797. .. note::
  1798. Due to floating point rounding, the resulting array may not be Hermitian and its diagonal elements may not be 1.
  1799. The real and imaginary values are clipped to the interval [-1, 1] in an attempt to improve this situation.
  1800. Args:
  1801. input (Tensor): A 2D matrix containing multiple variables and observations, or a
  1802. Scalar or 1D vector representing a single variable.
  1803. Returns:
  1804. (Tensor) The correlation coefficient matrix of the variables.
  1805. .. seealso::
  1806. :func:`torch.cov` covariance matrix.
  1807. Example::
  1808. >>> x = torch.tensor([[0, 1, 2], [2, 1, 0]])
  1809. >>> torch.corrcoef(x)
  1810. tensor([[ 1., -1.],
  1811. [-1., 1.]])
  1812. >>> x = torch.randn(2, 4)
  1813. >>> x
  1814. tensor([[-0.2678, -0.0908, -0.3766, 0.2780],
  1815. [-0.5812, 0.1535, 0.2387, 0.2350]])
  1816. >>> torch.corrcoef(x)
  1817. tensor([[1.0000, 0.3582],
  1818. [0.3582, 1.0000]])
  1819. >>> torch.corrcoef(x[0])
  1820. tensor(1.)
  1821. """,
  1822. )
  1823. add_docstr(
  1824. torch.cov,
  1825. r"""
  1826. cov(input, *, correction=1, fweights=None, aweights=None) -> Tensor
  1827. Estimates the covariance matrix of the variables given by the :attr:`input` matrix, where rows are
  1828. the variables and columns are the observations.
  1829. A covariance matrix is a square matrix giving the covariance of each pair of variables. The diagonal contains
  1830. the variance of each variable (covariance of a variable with itself). By definition, if :attr:`input` represents
  1831. a single variable (Scalar or 1D) then its variance is returned.
  1832. The sample covariance of the variables :math:`x` and :math:`y` is given by:
  1833. .. math::
  1834. \text{cov}(x,y) = \frac{\sum^{N}_{i = 1}(x_{i} - \bar{x})(y_{i} - \bar{y})}{\max(0,~N~-~\delta N)}
  1835. where :math:`\bar{x}` and :math:`\bar{y}` are the simple means of the :math:`x` and :math:`y` respectively, and
  1836. :math:`\delta N` is the :attr:`correction`.
  1837. If :attr:`fweights` and/or :attr:`aweights` are provided, the weighted covariance
  1838. is calculated, which is given by:
  1839. .. math::
  1840. \text{cov}_w(x,y) = \frac{\sum^{N}_{i = 1}w_i(x_{i} - \mu_x^*)(y_{i} - \mu_y^*)}
  1841. {\max(0,~\sum^{N}_{i = 1}w_i~-~\frac{\sum^{N}_{i = 1}w_ia_i}{\sum^{N}_{i = 1}w_i}~\delta N)}
  1842. where :math:`w` denotes :attr:`fweights` or :attr:`aweights` (``f`` and ``a`` for brevity) based on whichever is
  1843. provided, or :math:`w = f \times a` if both are provided, and
  1844. :math:`\mu_x^* = \frac{\sum^{N}_{i = 1}w_ix_{i} }{\sum^{N}_{i = 1}w_i}` is the weighted mean of the variable. If not
  1845. provided, ``f`` and/or ``a`` can be seen as a :math:`\mathbb{1}` vector of appropriate size.
  1846. Args:
  1847. input (Tensor): A 2D matrix containing multiple variables and observations, or a
  1848. Scalar or 1D vector representing a single variable.
  1849. Keyword Args:
  1850. correction (int, optional): difference between the sample size and sample degrees of freedom.
  1851. Defaults to Bessel's correction, ``correction = 1`` which returns the unbiased estimate,
  1852. even if both :attr:`fweights` and :attr:`aweights` are specified. ``correction = 0``
  1853. will return the simple average. Defaults to ``1``.
  1854. fweights (tensor, optional): A Scalar or 1D tensor of observation vector frequencies representing the number of
  1855. times each observation should be repeated. Its numel must equal the number of columns of :attr:`input`.
  1856. Must have integral dtype. Ignored if ``None``. Defaults to ``None``.
  1857. aweights (tensor, optional): A Scalar or 1D array of observation vector weights.
  1858. These relative weights are typically large for observations considered "important" and smaller for
  1859. observations considered less "important". Its numel must equal the number of columns of :attr:`input`.
  1860. Must have floating point dtype. Ignored if ``None``. Defaults to ``None``.
  1861. Returns:
  1862. (Tensor) The covariance matrix of the variables.
  1863. .. seealso::
  1864. :func:`torch.corrcoef` normalized covariance matrix.
  1865. Example::
  1866. >>> x = torch.tensor([[0, 2], [1, 1], [2, 0]]).T
  1867. >>> x
  1868. tensor([[0, 1, 2],
  1869. [2, 1, 0]])
  1870. >>> torch.cov(x)
  1871. tensor([[ 1., -1.],
  1872. [-1., 1.]])
  1873. >>> torch.cov(x, correction=0)
  1874. tensor([[ 0.6667, -0.6667],
  1875. [-0.6667, 0.6667]])
  1876. >>> fw = torch.randint(1, 10, (3,))
  1877. >>> fw
  1878. tensor([1, 6, 9])
  1879. >>> aw = torch.rand(3)
  1880. >>> aw
  1881. tensor([0.4282, 0.0255, 0.4144])
  1882. >>> torch.cov(x, fweights=fw, aweights=aw)
  1883. tensor([[ 0.4169, -0.4169],
  1884. [-0.4169, 0.4169]])
  1885. """,
  1886. )
  1887. add_docstr(
  1888. torch.cat,
  1889. r"""
  1890. cat(tensors, dim=0, *, out=None) -> Tensor
  1891. Concatenates the given sequence of :attr:`seq` tensors in the given dimension.
  1892. All tensors must either have the same shape (except in the concatenating
  1893. dimension) or be a 1-D empty tensor with size ``(0,)``.
  1894. :func:`torch.cat` can be seen as an inverse operation for :func:`torch.split`
  1895. and :func:`torch.chunk`.
  1896. :func:`torch.cat` can be best understood via examples.
  1897. .. seealso::
  1898. :func:`torch.stack` concatenates the given sequence along a new dimension.
  1899. Args:
  1900. tensors (sequence of Tensors): any python sequence of tensors of the same type.
  1901. Non-empty tensors provided must have the same shape, except in the
  1902. cat dimension.
  1903. dim (int, optional): the dimension over which the tensors are concatenated
  1904. Keyword args:
  1905. {out}
  1906. Example::
  1907. >>> x = torch.randn(2, 3)
  1908. >>> x
  1909. tensor([[ 0.6580, -1.0969, -0.4614],
  1910. [-0.1034, -0.5790, 0.1497]])
  1911. >>> torch.cat((x, x, x), 0)
  1912. tensor([[ 0.6580, -1.0969, -0.4614],
  1913. [-0.1034, -0.5790, 0.1497],
  1914. [ 0.6580, -1.0969, -0.4614],
  1915. [-0.1034, -0.5790, 0.1497],
  1916. [ 0.6580, -1.0969, -0.4614],
  1917. [-0.1034, -0.5790, 0.1497]])
  1918. >>> torch.cat((x, x, x), 1)
  1919. tensor([[ 0.6580, -1.0969, -0.4614, 0.6580, -1.0969, -0.4614, 0.6580,
  1920. -1.0969, -0.4614],
  1921. [-0.1034, -0.5790, 0.1497, -0.1034, -0.5790, 0.1497, -0.1034,
  1922. -0.5790, 0.1497]])
  1923. """.format(
  1924. **common_args
  1925. ),
  1926. )
  1927. add_docstr(
  1928. torch.concat,
  1929. r"""
  1930. concat(tensors, dim=0, *, out=None) -> Tensor
  1931. Alias of :func:`torch.cat`.
  1932. """,
  1933. )
  1934. add_docstr(
  1935. torch.concatenate,
  1936. r"""
  1937. concatenate(tensors, axis=0, out=None) -> Tensor
  1938. Alias of :func:`torch.cat`.
  1939. """,
  1940. )
  1941. add_docstr(
  1942. torch.ceil,
  1943. r"""
  1944. ceil(input, *, out=None) -> Tensor
  1945. Returns a new tensor with the ceil of the elements of :attr:`input`,
  1946. the smallest integer greater than or equal to each element.
  1947. For integer inputs, follows the array-api convention of returning a
  1948. copy of the input tensor.
  1949. .. math::
  1950. \text{out}_{i} = \left\lceil \text{input}_{i} \right\rceil
  1951. """
  1952. + r"""
  1953. Args:
  1954. {input}
  1955. Keyword args:
  1956. {out}
  1957. Example::
  1958. >>> a = torch.randn(4)
  1959. >>> a
  1960. tensor([-0.6341, -1.4208, -1.0900, 0.5826])
  1961. >>> torch.ceil(a)
  1962. tensor([-0., -1., -1., 1.])
  1963. """.format(
  1964. **common_args
  1965. ),
  1966. )
  1967. add_docstr(
  1968. torch.real,
  1969. r"""
  1970. real(input) -> Tensor
  1971. Returns a new tensor containing real values of the :attr:`self` tensor.
  1972. The returned tensor and :attr:`self` share the same underlying storage.
  1973. Args:
  1974. {input}
  1975. Example::
  1976. >>> x=torch.randn(4, dtype=torch.cfloat)
  1977. >>> x
  1978. tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
  1979. >>> x.real
  1980. tensor([ 0.3100, -0.5445, -1.6492, -0.0638])
  1981. """.format(
  1982. **common_args
  1983. ),
  1984. )
  1985. add_docstr(
  1986. torch.imag,
  1987. r"""
  1988. imag(input) -> Tensor
  1989. Returns a new tensor containing imaginary values of the :attr:`self` tensor.
  1990. The returned tensor and :attr:`self` share the same underlying storage.
  1991. .. warning::
  1992. :func:`imag` is only supported for tensors with complex dtypes.
  1993. Args:
  1994. {input}
  1995. Example::
  1996. >>> x=torch.randn(4, dtype=torch.cfloat)
  1997. >>> x
  1998. tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
  1999. >>> x.imag
  2000. tensor([ 0.3553, -0.7896, -0.0633, -0.8119])
  2001. """.format(
  2002. **common_args
  2003. ),
  2004. )
  2005. add_docstr(
  2006. torch.view_as_real,
  2007. r"""
  2008. view_as_real(input) -> Tensor
  2009. Returns a view of :attr:`input` as a real tensor. For an input complex tensor of
  2010. :attr:`size` :math:`m1, m2, \dots, mi`, this function returns a new
  2011. real tensor of size :math:`m1, m2, \dots, mi, 2`, where the last dimension of size 2
  2012. represents the real and imaginary components of complex numbers.
  2013. .. warning::
  2014. :func:`view_as_real` is only supported for tensors with ``complex dtypes``.
  2015. Args:
  2016. {input}
  2017. Example::
  2018. >>> x=torch.randn(4, dtype=torch.cfloat)
  2019. >>> x
  2020. tensor([(0.4737-0.3839j), (-0.2098-0.6699j), (0.3470-0.9451j), (-0.5174-1.3136j)])
  2021. >>> torch.view_as_real(x)
  2022. tensor([[ 0.4737, -0.3839],
  2023. [-0.2098, -0.6699],
  2024. [ 0.3470, -0.9451],
  2025. [-0.5174, -1.3136]])
  2026. """.format(
  2027. **common_args
  2028. ),
  2029. )
  2030. add_docstr(
  2031. torch.view_as_complex,
  2032. r"""
  2033. view_as_complex(input) -> Tensor
  2034. Returns a view of :attr:`input` as a complex tensor. For an input complex
  2035. tensor of :attr:`size` :math:`m1, m2, \dots, mi, 2`, this function returns a
  2036. new complex tensor of :attr:`size` :math:`m1, m2, \dots, mi` where the last
  2037. dimension of the input tensor is expected to represent the real and imaginary
  2038. components of complex numbers.
  2039. .. warning::
  2040. :func:`view_as_complex` is only supported for tensors with
  2041. :class:`torch.dtype` ``torch.float64`` and ``torch.float32``. The input is
  2042. expected to have the last dimension of :attr:`size` 2. In addition, the
  2043. tensor must have a `stride` of 1 for its last dimension. The strides of all
  2044. other dimensions must be even numbers.
  2045. Args:
  2046. {input}
  2047. Example::
  2048. >>> x=torch.randn(4, 2)
  2049. >>> x
  2050. tensor([[ 1.6116, -0.5772],
  2051. [-1.4606, -0.9120],
  2052. [ 0.0786, -1.7497],
  2053. [-0.6561, -1.6623]])
  2054. >>> torch.view_as_complex(x)
  2055. tensor([(1.6116-0.5772j), (-1.4606-0.9120j), (0.0786-1.7497j), (-0.6561-1.6623j)])
  2056. """.format(
  2057. **common_args
  2058. ),
  2059. )
  2060. add_docstr(
  2061. torch.reciprocal,
  2062. r"""
  2063. reciprocal(input, *, out=None) -> Tensor
  2064. Returns a new tensor with the reciprocal of the elements of :attr:`input`
  2065. .. math::
  2066. \text{out}_{i} = \frac{1}{\text{input}_{i}}
  2067. .. note::
  2068. Unlike NumPy's reciprocal, torch.reciprocal supports integral inputs. Integral
  2069. inputs to reciprocal are automatically :ref:`promoted <type-promotion-doc>` to
  2070. the default scalar type.
  2071. """
  2072. + r"""
  2073. Args:
  2074. {input}
  2075. Keyword args:
  2076. {out}
  2077. Example::
  2078. >>> a = torch.randn(4)
  2079. >>> a
  2080. tensor([-0.4595, -2.1219, -1.4314, 0.7298])
  2081. >>> torch.reciprocal(a)
  2082. tensor([-2.1763, -0.4713, -0.6986, 1.3702])
  2083. """.format(
  2084. **common_args
  2085. ),
  2086. )
  2087. add_docstr(
  2088. torch.cholesky,
  2089. r"""
  2090. cholesky(input, upper=False, *, out=None) -> Tensor
  2091. Computes the Cholesky decomposition of a symmetric positive-definite
  2092. matrix :math:`A` or for batches of symmetric positive-definite matrices.
  2093. If :attr:`upper` is ``True``, the returned matrix ``U`` is upper-triangular, and
  2094. the decomposition has the form:
  2095. .. math::
  2096. A = U^TU
  2097. If :attr:`upper` is ``False``, the returned matrix ``L`` is lower-triangular, and
  2098. the decomposition has the form:
  2099. .. math::
  2100. A = LL^T
  2101. If :attr:`upper` is ``True``, and :math:`A` is a batch of symmetric positive-definite
  2102. matrices, then the returned tensor will be composed of upper-triangular Cholesky factors
  2103. of each of the individual matrices. Similarly, when :attr:`upper` is ``False``, the returned
  2104. tensor will be composed of lower-triangular Cholesky factors of each of the individual
  2105. matrices.
  2106. .. warning::
  2107. :func:`torch.cholesky` is deprecated in favor of :func:`torch.linalg.cholesky`
  2108. and will be removed in a future PyTorch release.
  2109. ``L = torch.cholesky(A)`` should be replaced with
  2110. .. code:: python
  2111. L = torch.linalg.cholesky(A)
  2112. ``U = torch.cholesky(A, upper=True)`` should be replaced with
  2113. .. code:: python
  2114. U = torch.linalg.cholesky(A).mH
  2115. This transform will produce equivalent results for all valid (symmetric positive definite) inputs.
  2116. Args:
  2117. input (Tensor): the input tensor :math:`A` of size :math:`(*, n, n)` where `*` is zero or more
  2118. batch dimensions consisting of symmetric positive-definite matrices.
  2119. upper (bool, optional): flag that indicates whether to return a
  2120. upper or lower triangular matrix. Default: ``False``
  2121. Keyword args:
  2122. out (Tensor, optional): the output matrix
  2123. Example::
  2124. >>> a = torch.randn(3, 3)
  2125. >>> a = a @ a.mT + 1e-3 # make symmetric positive-definite
  2126. >>> l = torch.cholesky(a)
  2127. >>> a
  2128. tensor([[ 2.4112, -0.7486, 1.4551],
  2129. [-0.7486, 1.3544, 0.1294],
  2130. [ 1.4551, 0.1294, 1.6724]])
  2131. >>> l
  2132. tensor([[ 1.5528, 0.0000, 0.0000],
  2133. [-0.4821, 1.0592, 0.0000],
  2134. [ 0.9371, 0.5487, 0.7023]])
  2135. >>> l @ l.mT
  2136. tensor([[ 2.4112, -0.7486, 1.4551],
  2137. [-0.7486, 1.3544, 0.1294],
  2138. [ 1.4551, 0.1294, 1.6724]])
  2139. >>> a = torch.randn(3, 2, 2) # Example for batched input
  2140. >>> a = a @ a.mT + 1e-03 # make symmetric positive-definite
  2141. >>> l = torch.cholesky(a)
  2142. >>> z = l @ l.mT
  2143. >>> torch.dist(z, a)
  2144. tensor(2.3842e-07)
  2145. """,
  2146. )
  2147. add_docstr(
  2148. torch.cholesky_solve,
  2149. r"""
  2150. cholesky_solve(B, L, upper=False, *, out=None) -> Tensor
  2151. Computes the solution of a system of linear equations with complex Hermitian
  2152. or real symmetric positive-definite lhs given its Cholesky decomposition.
  2153. Let :math:`A` be a complex Hermitian or real symmetric positive-definite matrix,
  2154. and :math:`L` its Cholesky decomposition such that:
  2155. .. math::
  2156. A = LL^{\text{H}}
  2157. where :math:`L^{\text{H}}` is the conjugate transpose when :math:`L` is complex,
  2158. and the transpose when :math:`L` is real-valued.
  2159. Returns the solution :math:`X` of the following linear system:
  2160. .. math::
  2161. AX = B
  2162. Supports inputs of float, double, cfloat and cdouble dtypes.
  2163. Also supports batches of matrices, and if :math:`A` or :math:`B` is a batch of matrices
  2164. then the output has the same batch dimensions.
  2165. Args:
  2166. B (Tensor): right-hand side tensor of shape `(*, n, k)`
  2167. where :math:`*` is zero or more batch dimensions
  2168. L (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
  2169. consisting of lower or upper triangular Cholesky decompositions of
  2170. symmetric or Hermitian positive-definite matrices.
  2171. upper (bool, optional): flag that indicates whether :math:`L` is lower triangular
  2172. or upper triangular. Default: ``False``.
  2173. Keyword args:
  2174. out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
  2175. Example::
  2176. >>> A = torch.randn(3, 3)
  2177. >>> A = A @ A.T + torch.eye(3) * 1e-3 # Creates a symmetric positive-definite matrix
  2178. >>> L = torch.linalg.cholesky(A) # Extract Cholesky decomposition
  2179. >>> B = torch.randn(3, 2)
  2180. >>> torch.cholesky_solve(B, L)
  2181. tensor([[ -8.1625, 19.6097],
  2182. [ -5.8398, 14.2387],
  2183. [ -4.3771, 10.4173]])
  2184. >>> A.inverse() @ B
  2185. tensor([[ -8.1626, 19.6097],
  2186. [ -5.8398, 14.2387],
  2187. [ -4.3771, 10.4173]])
  2188. >>> A = torch.randn(3, 2, 2, dtype=torch.complex64)
  2189. >>> A = A @ A.mH + torch.eye(2) * 1e-3 # Batch of Hermitian positive-definite matrices
  2190. >>> L = torch.linalg.cholesky(A)
  2191. >>> B = torch.randn(2, 1, dtype=torch.complex64)
  2192. >>> X = torch.cholesky_solve(B, L)
  2193. >>> torch.dist(X, A.inverse() @ B)
  2194. tensor(1.6881e-5)
  2195. """,
  2196. )
  2197. add_docstr(
  2198. torch.cholesky_inverse,
  2199. r"""
  2200. cholesky_inverse(L, upper=False, *, out=None) -> Tensor
  2201. Computes the inverse of a complex Hermitian or real symmetric
  2202. positive-definite matrix given its Cholesky decomposition.
  2203. Let :math:`A` be a complex Hermitian or real symmetric positive-definite matrix,
  2204. and :math:`L` its Cholesky decomposition such that:
  2205. .. math::
  2206. A = LL^{\text{H}}
  2207. where :math:`L^{\text{H}}` is the conjugate transpose when :math:`L` is complex,
  2208. and the transpose when :math:`L` is real-valued.
  2209. Computes the inverse matrix :math:`A^{-1}`.
  2210. Supports input of float, double, cfloat and cdouble dtypes.
  2211. Also supports batches of matrices, and if :math:`A` is a batch of matrices
  2212. then the output has the same batch dimensions.
  2213. Args:
  2214. L (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
  2215. consisting of lower or upper triangular Cholesky decompositions of
  2216. symmetric or Hermitian positive-definite matrices.
  2217. upper (bool, optional): flag that indicates whether :math:`L` is lower triangular
  2218. or upper triangular. Default: ``False``
  2219. Keyword args:
  2220. out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
  2221. Example::
  2222. >>> A = torch.randn(3, 3)
  2223. >>> A = A @ A.T + torch.eye(3) * 1e-3 # Creates a symmetric positive-definite matrix
  2224. >>> L = torch.linalg.cholesky(A) # Extract Cholesky decomposition
  2225. >>> torch.cholesky_inverse(L)
  2226. tensor([[ 1.9314, 1.2251, -0.0889],
  2227. [ 1.2251, 2.4439, 0.2122],
  2228. [-0.0889, 0.2122, 0.1412]])
  2229. >>> A.inverse()
  2230. tensor([[ 1.9314, 1.2251, -0.0889],
  2231. [ 1.2251, 2.4439, 0.2122],
  2232. [-0.0889, 0.2122, 0.1412]])
  2233. >>> A = torch.randn(3, 2, 2, dtype=torch.complex64)
  2234. >>> A = A @ A.mH + torch.eye(2) * 1e-3 # Batch of Hermitian positive-definite matrices
  2235. >>> L = torch.linalg.cholesky(A)
  2236. >>> torch.dist(torch.inverse(A), torch.cholesky_inverse(L))
  2237. tensor(5.6358e-7)
  2238. """,
  2239. )
  2240. add_docstr(
  2241. torch.clone,
  2242. r"""
  2243. clone(input, *, memory_format=torch.preserve_format) -> Tensor
  2244. Returns a copy of :attr:`input`.
  2245. .. note::
  2246. This function is differentiable, so gradients will flow back from the
  2247. result of this operation to :attr:`input`. To create a tensor without an
  2248. autograd relationship to :attr:`input` see :meth:`~Tensor.detach`.
  2249. Args:
  2250. {input}
  2251. Keyword args:
  2252. {memory_format}
  2253. """.format(
  2254. **common_args
  2255. ),
  2256. )
  2257. add_docstr(
  2258. torch.clamp,
  2259. r"""
  2260. clamp(input, min=None, max=None, *, out=None) -> Tensor
  2261. Clamps all elements in :attr:`input` into the range `[` :attr:`min`, :attr:`max` `]`.
  2262. Letting min_value and max_value be :attr:`min` and :attr:`max`, respectively, this returns:
  2263. .. math::
  2264. y_i = \min(\max(x_i, \text{min\_value}_i), \text{max\_value}_i)
  2265. If :attr:`min` is ``None``, there is no lower bound.
  2266. Or, if :attr:`max` is ``None`` there is no upper bound.
  2267. """
  2268. + r"""
  2269. .. note::
  2270. If :attr:`min` is greater than :attr:`max` :func:`torch.clamp(..., min, max) <torch.clamp>`
  2271. sets all elements in :attr:`input` to the value of :attr:`max`.
  2272. Args:
  2273. {input}
  2274. min (Number or Tensor, optional): lower-bound of the range to be clamped to
  2275. max (Number or Tensor, optional): upper-bound of the range to be clamped to
  2276. Keyword args:
  2277. {out}
  2278. Example::
  2279. >>> a = torch.randn(4)
  2280. >>> a
  2281. tensor([-1.7120, 0.1734, -0.0478, -0.0922])
  2282. >>> torch.clamp(a, min=-0.5, max=0.5)
  2283. tensor([-0.5000, 0.1734, -0.0478, -0.0922])
  2284. >>> min = torch.linspace(-1, 1, steps=4)
  2285. >>> torch.clamp(a, min=min)
  2286. tensor([-1.0000, 0.1734, 0.3333, 1.0000])
  2287. """.format(
  2288. **common_args
  2289. ),
  2290. )
  2291. add_docstr(
  2292. torch.clip,
  2293. r"""
  2294. clip(input, min=None, max=None, *, out=None) -> Tensor
  2295. Alias for :func:`torch.clamp`.
  2296. """,
  2297. )
  2298. add_docstr(
  2299. torch.column_stack,
  2300. r"""
  2301. column_stack(tensors, *, out=None) -> Tensor
  2302. Creates a new tensor by horizontally stacking the tensors in :attr:`tensors`.
  2303. Equivalent to ``torch.hstack(tensors)``, except each zero or one dimensional tensor ``t``
  2304. in :attr:`tensors` is first reshaped into a ``(t.numel(), 1)`` column before being stacked horizontally.
  2305. Args:
  2306. tensors (sequence of Tensors): sequence of tensors to concatenate
  2307. Keyword args:
  2308. {out}
  2309. Example::
  2310. >>> a = torch.tensor([1, 2, 3])
  2311. >>> b = torch.tensor([4, 5, 6])
  2312. >>> torch.column_stack((a, b))
  2313. tensor([[1, 4],
  2314. [2, 5],
  2315. [3, 6]])
  2316. >>> a = torch.arange(5)
  2317. >>> b = torch.arange(10).reshape(5, 2)
  2318. >>> torch.column_stack((a, b, b))
  2319. tensor([[0, 0, 1, 0, 1],
  2320. [1, 2, 3, 2, 3],
  2321. [2, 4, 5, 4, 5],
  2322. [3, 6, 7, 6, 7],
  2323. [4, 8, 9, 8, 9]])
  2324. """.format(
  2325. **common_args
  2326. ),
  2327. )
  2328. add_docstr(
  2329. torch.complex,
  2330. r"""
  2331. complex(real, imag, *, out=None) -> Tensor
  2332. Constructs a complex tensor with its real part equal to :attr:`real` and its
  2333. imaginary part equal to :attr:`imag`.
  2334. Args:
  2335. real (Tensor): The real part of the complex tensor. Must be half, float or double.
  2336. imag (Tensor): The imaginary part of the complex tensor. Must be same dtype
  2337. as :attr:`real`.
  2338. Keyword args:
  2339. out (Tensor): If the inputs are ``torch.float32``, must be
  2340. ``torch.complex64``. If the inputs are ``torch.float64``, must be
  2341. ``torch.complex128``.
  2342. Example::
  2343. >>> real = torch.tensor([1, 2], dtype=torch.float32)
  2344. >>> imag = torch.tensor([3, 4], dtype=torch.float32)
  2345. >>> z = torch.complex(real, imag)
  2346. >>> z
  2347. tensor([(1.+3.j), (2.+4.j)])
  2348. >>> z.dtype
  2349. torch.complex64
  2350. """,
  2351. )
  2352. add_docstr(
  2353. torch.polar,
  2354. r"""
  2355. polar(abs, angle, *, out=None) -> Tensor
  2356. Constructs a complex tensor whose elements are Cartesian coordinates
  2357. corresponding to the polar coordinates with absolute value :attr:`abs` and angle
  2358. :attr:`angle`.
  2359. .. math::
  2360. \text{out} = \text{abs} \cdot \cos(\text{angle}) + \text{abs} \cdot \sin(\text{angle}) \cdot j
  2361. .. note::
  2362. `torch.polar` is similar to
  2363. `std::polar <https://en.cppreference.com/w/cpp/numeric/complex/polar>`_
  2364. and does not compute the polar decomposition
  2365. of a complex tensor like Python's `cmath.polar` and SciPy's `linalg.polar` do.
  2366. The behavior of this function is undefined if `abs` is negative or NaN, or if `angle` is
  2367. infinite.
  2368. """
  2369. + r"""
  2370. Args:
  2371. abs (Tensor): The absolute value the complex tensor. Must be float or double.
  2372. angle (Tensor): The angle of the complex tensor. Must be same dtype as
  2373. :attr:`abs`.
  2374. Keyword args:
  2375. out (Tensor): If the inputs are ``torch.float32``, must be
  2376. ``torch.complex64``. If the inputs are ``torch.float64``, must be
  2377. ``torch.complex128``.
  2378. Example::
  2379. >>> import numpy as np
  2380. >>> abs = torch.tensor([1, 2], dtype=torch.float64)
  2381. >>> angle = torch.tensor([np.pi / 2, 5 * np.pi / 4], dtype=torch.float64)
  2382. >>> z = torch.polar(abs, angle)
  2383. >>> z
  2384. tensor([(0.0000+1.0000j), (-1.4142-1.4142j)], dtype=torch.complex128)
  2385. """,
  2386. )
  2387. add_docstr(
  2388. torch.conj_physical,
  2389. r"""
  2390. conj_physical(input, *, out=None) -> Tensor
  2391. Computes the element-wise conjugate of the given :attr:`input` tensor.
  2392. If :attr:`input` has a non-complex dtype, this function just returns :attr:`input`.
  2393. .. note::
  2394. This performs the conjugate operation regardless of the fact conjugate bit is set or not.
  2395. .. warning:: In the future, :func:`torch.conj_physical` may return a non-writeable view for an :attr:`input` of
  2396. non-complex dtype. It's recommended that programs not modify the tensor returned by :func:`torch.conj_physical`
  2397. when :attr:`input` is of non-complex dtype to be compatible with this change.
  2398. .. math::
  2399. \text{out}_{i} = conj(\text{input}_{i})
  2400. """
  2401. + r"""
  2402. Args:
  2403. {input}
  2404. Keyword args:
  2405. {out}
  2406. Example::
  2407. >>> torch.conj_physical(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))
  2408. tensor([-1 - 1j, -2 - 2j, 3 + 3j])
  2409. """.format(
  2410. **common_args
  2411. ),
  2412. )
  2413. add_docstr(
  2414. torch.conj,
  2415. r"""
  2416. conj(input) -> Tensor
  2417. Returns a view of :attr:`input` with a flipped conjugate bit. If :attr:`input` has a non-complex dtype,
  2418. this function just returns :attr:`input`.
  2419. .. note::
  2420. :func:`torch.conj` performs a lazy conjugation, but the actual conjugated tensor can be materialized
  2421. at any time using :func:`torch.resolve_conj`.
  2422. .. warning:: In the future, :func:`torch.conj` may return a non-writeable view for an :attr:`input` of
  2423. non-complex dtype. It's recommended that programs not modify the tensor returned by :func:`torch.conj_physical`
  2424. when :attr:`input` is of non-complex dtype to be compatible with this change.
  2425. Args:
  2426. {input}
  2427. Example::
  2428. >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])
  2429. >>> x.is_conj()
  2430. False
  2431. >>> y = torch.conj(x)
  2432. >>> y.is_conj()
  2433. True
  2434. """.format(
  2435. **common_args
  2436. ),
  2437. )
  2438. add_docstr(
  2439. torch.resolve_conj,
  2440. r"""
  2441. resolve_conj(input) -> Tensor
  2442. Returns a new tensor with materialized conjugation if :attr:`input`'s conjugate bit is set to `True`,
  2443. else returns :attr:`input`. The output tensor will always have its conjugate bit set to `False`.
  2444. Args:
  2445. {input}
  2446. Example::
  2447. >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])
  2448. >>> y = x.conj()
  2449. >>> y.is_conj()
  2450. True
  2451. >>> z = y.resolve_conj()
  2452. >>> z
  2453. tensor([-1 - 1j, -2 - 2j, 3 + 3j])
  2454. >>> z.is_conj()
  2455. False
  2456. """.format(
  2457. **common_args
  2458. ),
  2459. )
  2460. add_docstr(
  2461. torch.resolve_neg,
  2462. r"""
  2463. resolve_neg(input) -> Tensor
  2464. Returns a new tensor with materialized negation if :attr:`input`'s negative bit is set to `True`,
  2465. else returns :attr:`input`. The output tensor will always have its negative bit set to `False`.
  2466. Args:
  2467. {input}
  2468. Example::
  2469. >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])
  2470. >>> y = x.conj()
  2471. >>> z = y.imag
  2472. >>> z.is_neg()
  2473. True
  2474. >>> out = z.resolve_neg()
  2475. >>> out
  2476. tensor([-1., -2., 3.])
  2477. >>> out.is_neg()
  2478. False
  2479. """.format(
  2480. **common_args
  2481. ),
  2482. )
  2483. add_docstr(
  2484. torch.copysign,
  2485. r"""
  2486. copysign(input, other, *, out=None) -> Tensor
  2487. Create a new floating-point tensor with the magnitude of :attr:`input` and the sign of :attr:`other`, elementwise.
  2488. .. math::
  2489. \text{out}_{i} = \begin{cases}
  2490. -|\text{input}_{i}| & \text{if } \text{other}_{i} \leq -0.0 \\
  2491. |\text{input}_{i}| & \text{if } \text{other}_{i} \geq 0.0 \\
  2492. \end{cases}
  2493. """
  2494. + r"""
  2495. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
  2496. and integer and float inputs.
  2497. Args:
  2498. input (Tensor): magnitudes.
  2499. other (Tensor or Number): contains value(s) whose signbit(s) are
  2500. applied to the magnitudes in :attr:`input`.
  2501. Keyword args:
  2502. {out}
  2503. Example::
  2504. >>> a = torch.randn(5)
  2505. >>> a
  2506. tensor([-1.2557, -0.0026, -0.5387, 0.4740, -0.9244])
  2507. >>> torch.copysign(a, 1)
  2508. tensor([1.2557, 0.0026, 0.5387, 0.4740, 0.9244])
  2509. >>> a = torch.randn(4, 4)
  2510. >>> a
  2511. tensor([[ 0.7079, 0.2778, -1.0249, 0.5719],
  2512. [-0.0059, -0.2600, -0.4475, -1.3948],
  2513. [ 0.3667, -0.9567, -2.5757, -0.1751],
  2514. [ 0.2046, -0.0742, 0.2998, -0.1054]])
  2515. >>> b = torch.randn(4)
  2516. tensor([ 0.2373, 0.3120, 0.3190, -1.1128])
  2517. >>> torch.copysign(a, b)
  2518. tensor([[ 0.7079, 0.2778, 1.0249, -0.5719],
  2519. [ 0.0059, 0.2600, 0.4475, -1.3948],
  2520. [ 0.3667, 0.9567, 2.5757, -0.1751],
  2521. [ 0.2046, 0.0742, 0.2998, -0.1054]])
  2522. >>> a = torch.tensor([1.])
  2523. >>> b = torch.tensor([-0.])
  2524. >>> torch.copysign(a, b)
  2525. tensor([-1.])
  2526. .. note::
  2527. copysign handles signed zeros. If the other argument has a negative zero (-0),
  2528. the corresponding output value will be negative.
  2529. """.format(
  2530. **common_args
  2531. ),
  2532. )
  2533. add_docstr(
  2534. torch.cos,
  2535. r"""
  2536. cos(input, *, out=None) -> Tensor
  2537. Returns a new tensor with the cosine of the elements of :attr:`input`.
  2538. .. math::
  2539. \text{out}_{i} = \cos(\text{input}_{i})
  2540. """
  2541. + r"""
  2542. Args:
  2543. {input}
  2544. Keyword args:
  2545. {out}
  2546. Example::
  2547. >>> a = torch.randn(4)
  2548. >>> a
  2549. tensor([ 1.4309, 1.2706, -0.8562, 0.9796])
  2550. >>> torch.cos(a)
  2551. tensor([ 0.1395, 0.2957, 0.6553, 0.5574])
  2552. """.format(
  2553. **common_args
  2554. ),
  2555. )
  2556. add_docstr(
  2557. torch.cosh,
  2558. r"""
  2559. cosh(input, *, out=None) -> Tensor
  2560. Returns a new tensor with the hyperbolic cosine of the elements of
  2561. :attr:`input`.
  2562. .. math::
  2563. \text{out}_{i} = \cosh(\text{input}_{i})
  2564. """
  2565. + r"""
  2566. Args:
  2567. {input}
  2568. Keyword args:
  2569. {out}
  2570. Example::
  2571. >>> a = torch.randn(4)
  2572. >>> a
  2573. tensor([ 0.1632, 1.1835, -0.6979, -0.7325])
  2574. >>> torch.cosh(a)
  2575. tensor([ 1.0133, 1.7860, 1.2536, 1.2805])
  2576. .. note::
  2577. When :attr:`input` is on the CPU, the implementation of torch.cosh may use
  2578. the Sleef library, which rounds very large results to infinity or negative
  2579. infinity. See `here <https://sleef.org/purec.xhtml>`_ for details.
  2580. """.format(
  2581. **common_args
  2582. ),
  2583. )
  2584. add_docstr(
  2585. torch.cross,
  2586. r"""
  2587. cross(input, other, dim=None, *, out=None) -> Tensor
  2588. Returns the cross product of vectors in dimension :attr:`dim` of :attr:`input`
  2589. and :attr:`other`.
  2590. Supports input of float, double, cfloat and cdouble dtypes. Also supports batches
  2591. of vectors, for which it computes the product along the dimension :attr:`dim`.
  2592. In this case, the output has the same batch dimensions as the inputs.
  2593. .. warning::
  2594. If :attr:`dim` is not given, it defaults to the first dimension found
  2595. with the size 3. Note that this might be unexpected.
  2596. This behavior is deprecated and will be changed to match that of :func:`torch.linalg.cross`
  2597. in a future release.
  2598. .. seealso::
  2599. :func:`torch.linalg.cross` which has dim=-1 as default.
  2600. Args:
  2601. {input}
  2602. other (Tensor): the second input tensor
  2603. dim (int, optional): the dimension to take the cross-product in.
  2604. Keyword args:
  2605. {out}
  2606. Example::
  2607. >>> a = torch.randn(4, 3)
  2608. >>> a
  2609. tensor([[-0.3956, 1.1455, 1.6895],
  2610. [-0.5849, 1.3672, 0.3599],
  2611. [-1.1626, 0.7180, -0.0521],
  2612. [-0.1339, 0.9902, -2.0225]])
  2613. >>> b = torch.randn(4, 3)
  2614. >>> b
  2615. tensor([[-0.0257, -1.4725, -1.2251],
  2616. [-1.1479, -0.7005, -1.9757],
  2617. [-1.3904, 0.3726, -1.1836],
  2618. [-0.9688, -0.7153, 0.2159]])
  2619. >>> torch.cross(a, b, dim=1)
  2620. tensor([[ 1.0844, -0.5281, 0.6120],
  2621. [-2.4490, -1.5687, 1.9792],
  2622. [-0.8304, -1.3037, 0.5650],
  2623. [-1.2329, 1.9883, 1.0551]])
  2624. >>> torch.cross(a, b)
  2625. tensor([[ 1.0844, -0.5281, 0.6120],
  2626. [-2.4490, -1.5687, 1.9792],
  2627. [-0.8304, -1.3037, 0.5650],
  2628. [-1.2329, 1.9883, 1.0551]])
  2629. """.format(
  2630. **common_args
  2631. ),
  2632. )
  2633. add_docstr(
  2634. torch.logcumsumexp,
  2635. r"""
  2636. logcumsumexp(input, dim, *, out=None) -> Tensor
  2637. Returns the logarithm of the cumulative summation of the exponentiation of
  2638. elements of :attr:`input` in the dimension :attr:`dim`.
  2639. For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is
  2640. .. math::
  2641. \text{{logcumsumexp}}(x)_{{ij}} = \log \sum\limits_{{j=0}}^{{i}} \exp(x_{{ij}})
  2642. Args:
  2643. {input}
  2644. dim (int): the dimension to do the operation over
  2645. Keyword args:
  2646. {out}
  2647. Example::
  2648. >>> a = torch.randn(10)
  2649. >>> torch.logcumsumexp(a, dim=0)
  2650. tensor([-0.42296738, -0.04462666, 0.86278635, 0.94622083, 1.05277811,
  2651. 1.39202815, 1.83525007, 1.84492621, 2.06084887, 2.06844475]))
  2652. """.format(
  2653. **reduceops_common_args
  2654. ),
  2655. )
  2656. add_docstr(
  2657. torch.cummax,
  2658. r"""
  2659. cummax(input, dim, *, out=None) -> (Tensor, LongTensor)
  2660. Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative maximum of
  2661. elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index
  2662. location of each maximum value found in the dimension :attr:`dim`.
  2663. .. math::
  2664. y_i = max(x_1, x_2, x_3, \dots, x_i)
  2665. Args:
  2666. {input}
  2667. dim (int): the dimension to do the operation over
  2668. Keyword args:
  2669. out (tuple, optional): the result tuple of two output tensors (values, indices)
  2670. Example::
  2671. >>> a = torch.randn(10)
  2672. >>> a
  2673. tensor([-0.3449, -1.5447, 0.0685, -1.5104, -1.1706, 0.2259, 1.4696, -1.3284,
  2674. 1.9946, -0.8209])
  2675. >>> torch.cummax(a, dim=0)
  2676. torch.return_types.cummax(
  2677. values=tensor([-0.3449, -0.3449, 0.0685, 0.0685, 0.0685, 0.2259, 1.4696, 1.4696,
  2678. 1.9946, 1.9946]),
  2679. indices=tensor([0, 0, 2, 2, 2, 5, 6, 6, 8, 8]))
  2680. """.format(
  2681. **reduceops_common_args
  2682. ),
  2683. )
  2684. add_docstr(
  2685. torch.cummin,
  2686. r"""
  2687. cummin(input, dim, *, out=None) -> (Tensor, LongTensor)
  2688. Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative minimum of
  2689. elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index
  2690. location of each maximum value found in the dimension :attr:`dim`.
  2691. .. math::
  2692. y_i = min(x_1, x_2, x_3, \dots, x_i)
  2693. Args:
  2694. {input}
  2695. dim (int): the dimension to do the operation over
  2696. Keyword args:
  2697. out (tuple, optional): the result tuple of two output tensors (values, indices)
  2698. Example::
  2699. >>> a = torch.randn(10)
  2700. >>> a
  2701. tensor([-0.2284, -0.6628, 0.0975, 0.2680, -1.3298, -0.4220, -0.3885, 1.1762,
  2702. 0.9165, 1.6684])
  2703. >>> torch.cummin(a, dim=0)
  2704. torch.return_types.cummin(
  2705. values=tensor([-0.2284, -0.6628, -0.6628, -0.6628, -1.3298, -1.3298, -1.3298, -1.3298,
  2706. -1.3298, -1.3298]),
  2707. indices=tensor([0, 1, 1, 1, 4, 4, 4, 4, 4, 4]))
  2708. """.format(
  2709. **reduceops_common_args
  2710. ),
  2711. )
  2712. add_docstr(
  2713. torch.cumprod,
  2714. r"""
  2715. cumprod(input, dim, *, dtype=None, out=None) -> Tensor
  2716. Returns the cumulative product of elements of :attr:`input` in the dimension
  2717. :attr:`dim`.
  2718. For example, if :attr:`input` is a vector of size N, the result will also be
  2719. a vector of size N, with elements.
  2720. .. math::
  2721. y_i = x_1 \times x_2\times x_3\times \dots \times x_i
  2722. Args:
  2723. {input}
  2724. dim (int): the dimension to do the operation over
  2725. Keyword args:
  2726. {dtype}
  2727. {out}
  2728. Example::
  2729. >>> a = torch.randn(10)
  2730. >>> a
  2731. tensor([ 0.6001, 0.2069, -0.1919, 0.9792, 0.6727, 1.0062, 0.4126,
  2732. -0.2129, -0.4206, 0.1968])
  2733. >>> torch.cumprod(a, dim=0)
  2734. tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0158, -0.0065,
  2735. 0.0014, -0.0006, -0.0001])
  2736. >>> a[5] = 0.0
  2737. >>> torch.cumprod(a, dim=0)
  2738. tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0000, -0.0000,
  2739. 0.0000, -0.0000, -0.0000])
  2740. """.format(
  2741. **reduceops_common_args
  2742. ),
  2743. )
  2744. add_docstr(
  2745. torch.cumsum,
  2746. r"""
  2747. cumsum(input, dim, *, dtype=None, out=None) -> Tensor
  2748. Returns the cumulative sum of elements of :attr:`input` in the dimension
  2749. :attr:`dim`.
  2750. For example, if :attr:`input` is a vector of size N, the result will also be
  2751. a vector of size N, with elements.
  2752. .. math::
  2753. y_i = x_1 + x_2 + x_3 + \dots + x_i
  2754. Args:
  2755. {input}
  2756. dim (int): the dimension to do the operation over
  2757. Keyword args:
  2758. {dtype}
  2759. {out}
  2760. Example::
  2761. >>> a = torch.randint(1, 20, (10,))
  2762. >>> a
  2763. tensor([13, 7, 3, 10, 13, 3, 15, 10, 9, 10])
  2764. >>> torch.cumsum(a, dim=0)
  2765. tensor([13, 20, 23, 33, 46, 49, 64, 74, 83, 93])
  2766. """.format(
  2767. **reduceops_common_args
  2768. ),
  2769. )
  2770. add_docstr(
  2771. torch.count_nonzero,
  2772. r"""
  2773. count_nonzero(input, dim=None) -> Tensor
  2774. Counts the number of non-zero values in the tensor :attr:`input` along the given :attr:`dim`.
  2775. If no dim is specified then all non-zeros in the tensor are counted.
  2776. Args:
  2777. {input}
  2778. dim (int or tuple of ints, optional): Dim or tuple of dims along which to count non-zeros.
  2779. Example::
  2780. >>> x = torch.zeros(3,3)
  2781. >>> x[torch.randn(3,3) > 0.5] = 1
  2782. >>> x
  2783. tensor([[0., 1., 1.],
  2784. [0., 0., 0.],
  2785. [0., 0., 1.]])
  2786. >>> torch.count_nonzero(x)
  2787. tensor(3)
  2788. >>> torch.count_nonzero(x, dim=0)
  2789. tensor([0, 1, 2])
  2790. """.format(
  2791. **reduceops_common_args
  2792. ),
  2793. )
  2794. add_docstr(
  2795. torch.dequantize,
  2796. r"""
  2797. dequantize(tensor) -> Tensor
  2798. Returns an fp32 Tensor by dequantizing a quantized Tensor
  2799. Args:
  2800. tensor (Tensor): A quantized Tensor
  2801. .. function:: dequantize(tensors) -> sequence of Tensors
  2802. :noindex:
  2803. Given a list of quantized Tensors, dequantize them and return a list of fp32 Tensors
  2804. Args:
  2805. tensors (sequence of Tensors): A list of quantized Tensors
  2806. """,
  2807. )
  2808. add_docstr(
  2809. torch.diag,
  2810. r"""
  2811. diag(input, diagonal=0, *, out=None) -> Tensor
  2812. - If :attr:`input` is a vector (1-D tensor), then returns a 2-D square tensor
  2813. with the elements of :attr:`input` as the diagonal.
  2814. - If :attr:`input` is a matrix (2-D tensor), then returns a 1-D tensor with
  2815. the diagonal elements of :attr:`input`.
  2816. The argument :attr:`diagonal` controls which diagonal to consider:
  2817. - If :attr:`diagonal` = 0, it is the main diagonal.
  2818. - If :attr:`diagonal` > 0, it is above the main diagonal.
  2819. - If :attr:`diagonal` < 0, it is below the main diagonal.
  2820. Args:
  2821. {input}
  2822. diagonal (int, optional): the diagonal to consider
  2823. Keyword args:
  2824. {out}
  2825. .. seealso::
  2826. :func:`torch.diagonal` always returns the diagonal of its input.
  2827. :func:`torch.diagflat` always constructs a tensor with diagonal elements
  2828. specified by the input.
  2829. Examples:
  2830. Get the square matrix where the input vector is the diagonal::
  2831. >>> a = torch.randn(3)
  2832. >>> a
  2833. tensor([ 0.5950,-0.0872, 2.3298])
  2834. >>> torch.diag(a)
  2835. tensor([[ 0.5950, 0.0000, 0.0000],
  2836. [ 0.0000,-0.0872, 0.0000],
  2837. [ 0.0000, 0.0000, 2.3298]])
  2838. >>> torch.diag(a, 1)
  2839. tensor([[ 0.0000, 0.5950, 0.0000, 0.0000],
  2840. [ 0.0000, 0.0000,-0.0872, 0.0000],
  2841. [ 0.0000, 0.0000, 0.0000, 2.3298],
  2842. [ 0.0000, 0.0000, 0.0000, 0.0000]])
  2843. Get the k-th diagonal of a given matrix::
  2844. >>> a = torch.randn(3, 3)
  2845. >>> a
  2846. tensor([[-0.4264, 0.0255,-0.1064],
  2847. [ 0.8795,-0.2429, 0.1374],
  2848. [ 0.1029,-0.6482,-1.6300]])
  2849. >>> torch.diag(a, 0)
  2850. tensor([-0.4264,-0.2429,-1.6300])
  2851. >>> torch.diag(a, 1)
  2852. tensor([ 0.0255, 0.1374])
  2853. """.format(
  2854. **common_args
  2855. ),
  2856. )
  2857. add_docstr(
  2858. torch.diag_embed,
  2859. r"""
  2860. diag_embed(input, offset=0, dim1=-2, dim2=-1) -> Tensor
  2861. Creates a tensor whose diagonals of certain 2D planes (specified by
  2862. :attr:`dim1` and :attr:`dim2`) are filled by :attr:`input`.
  2863. To facilitate creating batched diagonal matrices, the 2D planes formed by
  2864. the last two dimensions of the returned tensor are chosen by default.
  2865. The argument :attr:`offset` controls which diagonal to consider:
  2866. - If :attr:`offset` = 0, it is the main diagonal.
  2867. - If :attr:`offset` > 0, it is above the main diagonal.
  2868. - If :attr:`offset` < 0, it is below the main diagonal.
  2869. The size of the new matrix will be calculated to make the specified diagonal
  2870. of the size of the last input dimension.
  2871. Note that for :attr:`offset` other than :math:`0`, the order of :attr:`dim1`
  2872. and :attr:`dim2` matters. Exchanging them is equivalent to changing the
  2873. sign of :attr:`offset`.
  2874. Applying :meth:`torch.diagonal` to the output of this function with
  2875. the same arguments yields a matrix identical to input. However,
  2876. :meth:`torch.diagonal` has different default dimensions, so those
  2877. need to be explicitly specified.
  2878. Args:
  2879. {input} Must be at least 1-dimensional.
  2880. offset (int, optional): which diagonal to consider. Default: 0
  2881. (main diagonal).
  2882. dim1 (int, optional): first dimension with respect to which to
  2883. take diagonal. Default: -2.
  2884. dim2 (int, optional): second dimension with respect to which to
  2885. take diagonal. Default: -1.
  2886. Example::
  2887. >>> a = torch.randn(2, 3)
  2888. >>> torch.diag_embed(a)
  2889. tensor([[[ 1.5410, 0.0000, 0.0000],
  2890. [ 0.0000, -0.2934, 0.0000],
  2891. [ 0.0000, 0.0000, -2.1788]],
  2892. [[ 0.5684, 0.0000, 0.0000],
  2893. [ 0.0000, -1.0845, 0.0000],
  2894. [ 0.0000, 0.0000, -1.3986]]])
  2895. >>> torch.diag_embed(a, offset=1, dim1=0, dim2=2)
  2896. tensor([[[ 0.0000, 1.5410, 0.0000, 0.0000],
  2897. [ 0.0000, 0.5684, 0.0000, 0.0000]],
  2898. [[ 0.0000, 0.0000, -0.2934, 0.0000],
  2899. [ 0.0000, 0.0000, -1.0845, 0.0000]],
  2900. [[ 0.0000, 0.0000, 0.0000, -2.1788],
  2901. [ 0.0000, 0.0000, 0.0000, -1.3986]],
  2902. [[ 0.0000, 0.0000, 0.0000, 0.0000],
  2903. [ 0.0000, 0.0000, 0.0000, 0.0000]]])
  2904. """.format(
  2905. **common_args
  2906. ),
  2907. )
  2908. add_docstr(
  2909. torch.diagflat,
  2910. r"""
  2911. diagflat(input, offset=0) -> Tensor
  2912. - If :attr:`input` is a vector (1-D tensor), then returns a 2-D square tensor
  2913. with the elements of :attr:`input` as the diagonal.
  2914. - If :attr:`input` is a tensor with more than one dimension, then returns a
  2915. 2-D tensor with diagonal elements equal to a flattened :attr:`input`.
  2916. The argument :attr:`offset` controls which diagonal to consider:
  2917. - If :attr:`offset` = 0, it is the main diagonal.
  2918. - If :attr:`offset` > 0, it is above the main diagonal.
  2919. - If :attr:`offset` < 0, it is below the main diagonal.
  2920. Args:
  2921. {input}
  2922. offset (int, optional): the diagonal to consider. Default: 0 (main
  2923. diagonal).
  2924. Examples::
  2925. >>> a = torch.randn(3)
  2926. >>> a
  2927. tensor([-0.2956, -0.9068, 0.1695])
  2928. >>> torch.diagflat(a)
  2929. tensor([[-0.2956, 0.0000, 0.0000],
  2930. [ 0.0000, -0.9068, 0.0000],
  2931. [ 0.0000, 0.0000, 0.1695]])
  2932. >>> torch.diagflat(a, 1)
  2933. tensor([[ 0.0000, -0.2956, 0.0000, 0.0000],
  2934. [ 0.0000, 0.0000, -0.9068, 0.0000],
  2935. [ 0.0000, 0.0000, 0.0000, 0.1695],
  2936. [ 0.0000, 0.0000, 0.0000, 0.0000]])
  2937. >>> a = torch.randn(2, 2)
  2938. >>> a
  2939. tensor([[ 0.2094, -0.3018],
  2940. [-0.1516, 1.9342]])
  2941. >>> torch.diagflat(a)
  2942. tensor([[ 0.2094, 0.0000, 0.0000, 0.0000],
  2943. [ 0.0000, -0.3018, 0.0000, 0.0000],
  2944. [ 0.0000, 0.0000, -0.1516, 0.0000],
  2945. [ 0.0000, 0.0000, 0.0000, 1.9342]])
  2946. """.format(
  2947. **common_args
  2948. ),
  2949. )
  2950. add_docstr(
  2951. torch.diagonal,
  2952. r"""
  2953. diagonal(input, offset=0, dim1=0, dim2=1) -> Tensor
  2954. Returns a partial view of :attr:`input` with the its diagonal elements
  2955. with respect to :attr:`dim1` and :attr:`dim2` appended as a dimension
  2956. at the end of the shape.
  2957. The argument :attr:`offset` controls which diagonal to consider:
  2958. - If :attr:`offset` = 0, it is the main diagonal.
  2959. - If :attr:`offset` > 0, it is above the main diagonal.
  2960. - If :attr:`offset` < 0, it is below the main diagonal.
  2961. Applying :meth:`torch.diag_embed` to the output of this function with
  2962. the same arguments yields a diagonal matrix with the diagonal entries
  2963. of the input. However, :meth:`torch.diag_embed` has different default
  2964. dimensions, so those need to be explicitly specified.
  2965. Args:
  2966. {input} Must be at least 2-dimensional.
  2967. offset (int, optional): which diagonal to consider. Default: 0
  2968. (main diagonal).
  2969. dim1 (int, optional): first dimension with respect to which to
  2970. take diagonal. Default: 0.
  2971. dim2 (int, optional): second dimension with respect to which to
  2972. take diagonal. Default: 1.
  2973. .. note:: To take a batch diagonal, pass in dim1=-2, dim2=-1.
  2974. Examples::
  2975. >>> a = torch.randn(3, 3)
  2976. >>> a
  2977. tensor([[-1.0854, 1.1431, -0.1752],
  2978. [ 0.8536, -0.0905, 0.0360],
  2979. [ 0.6927, -0.3735, -0.4945]])
  2980. >>> torch.diagonal(a, 0)
  2981. tensor([-1.0854, -0.0905, -0.4945])
  2982. >>> torch.diagonal(a, 1)
  2983. tensor([ 1.1431, 0.0360])
  2984. >>> x = torch.randn(2, 5, 4, 2)
  2985. >>> torch.diagonal(x, offset=-1, dim1=1, dim2=2)
  2986. tensor([[[-1.2631, 0.3755, -1.5977, -1.8172],
  2987. [-1.1065, 1.0401, -0.2235, -0.7938]],
  2988. [[-1.7325, -0.3081, 0.6166, 0.2335],
  2989. [ 1.0500, 0.7336, -0.3836, -1.1015]]])
  2990. """.format(
  2991. **common_args
  2992. ),
  2993. )
  2994. add_docstr(
  2995. torch.diagonal_scatter,
  2996. r"""
  2997. diagonal_scatter(input, src, offset=0, dim1=0, dim2=1) -> Tensor
  2998. Embeds the values of the :attr:`src` tensor into :attr:`input` along
  2999. the diagonal elements of :attr:`input`, with respect to :attr:`dim1`
  3000. and :attr:`dim2`.
  3001. This function returns a tensor with fresh storage; it does not
  3002. return a view.
  3003. The argument :attr:`offset` controls which diagonal to consider:
  3004. - If :attr:`offset` = 0, it is the main diagonal.
  3005. - If :attr:`offset` > 0, it is above the main diagonal.
  3006. - If :attr:`offset` < 0, it is below the main diagonal.
  3007. Args:
  3008. {input} Must be at least 2-dimensional.
  3009. src (Tensor): the tensor to embed into :attr:`input`.
  3010. offset (int, optional): which diagonal to consider. Default: 0
  3011. (main diagonal).
  3012. dim1 (int, optional): first dimension with respect to which to
  3013. take diagonal. Default: 0.
  3014. dim2 (int, optional): second dimension with respect to which to
  3015. take diagonal. Default: 1.
  3016. .. note::
  3017. :attr:`src` must be of the proper size in order to be embedded
  3018. into :attr:`input`. Specifically, it should have the same shape as
  3019. ``torch.diagonal(input, offset, dim1, dim2)``
  3020. Examples::
  3021. >>> a = torch.zeros(3, 3)
  3022. >>> a
  3023. tensor([[0., 0., 0.],
  3024. [0., 0., 0.],
  3025. [0., 0., 0.]])
  3026. >>> torch.diagonal_scatter(a, torch.ones(3), 0)
  3027. tensor([[1., 0., 0.],
  3028. [0., 1., 0.],
  3029. [0., 0., 1.]])
  3030. >>> torch.diagonal_scatter(a, torch.ones(2), 1)
  3031. tensor([[0., 1., 0.],
  3032. [0., 0., 1.],
  3033. [0., 0., 0.]])
  3034. """.format(
  3035. **common_args
  3036. ),
  3037. )
  3038. add_docstr(
  3039. torch.as_strided_scatter,
  3040. r"""
  3041. as_strided_scatter(input, src, size, stride, storage_offset=None) -> Tensor
  3042. Embeds the values of the :attr:`src` tensor into :attr:`input` along
  3043. the elements corresponding to the result of calling
  3044. input.as_strided(size, stride, storage_offset).
  3045. This function returns a tensor with fresh storage; it does not
  3046. return a view.
  3047. Args:
  3048. {input}
  3049. size (tuple or ints): the shape of the output tensor
  3050. stride (tuple or ints): the stride of the output tensor
  3051. storage_offset (int, optional): the offset in the underlying storage of the output tensor
  3052. .. note::
  3053. :attr:`src` must be of the proper size in order to be embedded
  3054. into :attr:`input`. Specifically, it should have the same shape as
  3055. `torch.as_strided(input, size, stride, storage_offset)`
  3056. Example::
  3057. >>> a = torch.arange(4).reshape(2, 2) + 1
  3058. >>> a
  3059. tensor([[1, 2],
  3060. [3, 4]])
  3061. >>> b = torch.zeros(3, 3)
  3062. >>> b
  3063. tensor([[0., 0., 0.],
  3064. [0., 0., 0.],
  3065. [0., 0., 0.]])
  3066. >>> torch.as_strided_scatter(b, a, (2, 2), (1, 2))
  3067. tensor([[1., 3., 2.],
  3068. [4., 0., 0.],
  3069. [0., 0., 0.]])
  3070. """.format(
  3071. **common_args
  3072. ),
  3073. )
  3074. add_docstr(
  3075. torch.diff,
  3076. r"""
  3077. diff(input, n=1, dim=-1, prepend=None, append=None) -> Tensor
  3078. Computes the n-th forward difference along the given dimension.
  3079. The first-order differences are given by `out[i] = input[i + 1] - input[i]`. Higher-order
  3080. differences are calculated by using :func:`torch.diff` recursively.
  3081. Args:
  3082. input (Tensor): the tensor to compute the differences on
  3083. n (int, optional): the number of times to recursively compute the difference
  3084. dim (int, optional): the dimension to compute the difference along.
  3085. Default is the last dimension.
  3086. prepend, append (Tensor, optional): values to prepend or append to
  3087. :attr:`input` along :attr:`dim` before computing the difference.
  3088. Their dimensions must be equivalent to that of input, and their shapes
  3089. must match input's shape except on :attr:`dim`.
  3090. Keyword args:
  3091. {out}
  3092. Example::
  3093. >>> a = torch.tensor([1, 3, 2])
  3094. >>> torch.diff(a)
  3095. tensor([ 2, -1])
  3096. >>> b = torch.tensor([4, 5])
  3097. >>> torch.diff(a, append=b)
  3098. tensor([ 2, -1, 2, 1])
  3099. >>> c = torch.tensor([[1, 2, 3], [3, 4, 5]])
  3100. >>> torch.diff(c, dim=0)
  3101. tensor([[2, 2, 2]])
  3102. >>> torch.diff(c, dim=1)
  3103. tensor([[1, 1],
  3104. [1, 1]])
  3105. """.format(
  3106. **common_args
  3107. ),
  3108. )
  3109. add_docstr(
  3110. torch.digamma,
  3111. r"""
  3112. digamma(input, *, out=None) -> Tensor
  3113. Alias for :func:`torch.special.digamma`.
  3114. """,
  3115. )
  3116. add_docstr(
  3117. torch.dist,
  3118. r"""
  3119. dist(input, other, p=2) -> Tensor
  3120. Returns the p-norm of (:attr:`input` - :attr:`other`)
  3121. The shapes of :attr:`input` and :attr:`other` must be
  3122. :ref:`broadcastable <broadcasting-semantics>`.
  3123. Args:
  3124. {input}
  3125. other (Tensor): the Right-hand-side input tensor
  3126. p (float, optional): the norm to be computed
  3127. Example::
  3128. >>> x = torch.randn(4)
  3129. >>> x
  3130. tensor([-1.5393, -0.8675, 0.5916, 1.6321])
  3131. >>> y = torch.randn(4)
  3132. >>> y
  3133. tensor([ 0.0967, -1.0511, 0.6295, 0.8360])
  3134. >>> torch.dist(x, y, 3.5)
  3135. tensor(1.6727)
  3136. >>> torch.dist(x, y, 3)
  3137. tensor(1.6973)
  3138. >>> torch.dist(x, y, 0)
  3139. tensor(4.)
  3140. >>> torch.dist(x, y, 1)
  3141. tensor(2.6537)
  3142. """.format(
  3143. **common_args
  3144. ),
  3145. )
  3146. add_docstr(
  3147. torch.div,
  3148. r"""
  3149. div(input, other, *, rounding_mode=None, out=None) -> Tensor
  3150. Divides each element of the input ``input`` by the corresponding element of
  3151. :attr:`other`.
  3152. .. math::
  3153. \text{{out}}_i = \frac{{\text{{input}}_i}}{{\text{{other}}_i}}
  3154. .. note::
  3155. By default, this performs a "true" division like Python 3.
  3156. See the :attr:`rounding_mode` argument for floor division.
  3157. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
  3158. :ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
  3159. Always promotes integer types to the default scalar type.
  3160. Args:
  3161. input (Tensor): the dividend
  3162. other (Tensor or Number): the divisor
  3163. Keyword args:
  3164. rounding_mode (str, optional): Type of rounding applied to the result:
  3165. * None - default behavior. Performs no rounding and, if both :attr:`input` and
  3166. :attr:`other` are integer types, promotes the inputs to the default scalar type.
  3167. Equivalent to true division in Python (the ``/`` operator) and NumPy's ``np.true_divide``.
  3168. * ``"trunc"`` - rounds the results of the division towards zero.
  3169. Equivalent to C-style integer division.
  3170. * ``"floor"`` - rounds the results of the division down.
  3171. Equivalent to floor division in Python (the ``//`` operator) and NumPy's ``np.floor_divide``.
  3172. {out}
  3173. Examples::
  3174. >>> x = torch.tensor([ 0.3810, 1.2774, -0.2972, -0.3719, 0.4637])
  3175. >>> torch.div(x, 0.5)
  3176. tensor([ 0.7620, 2.5548, -0.5944, -0.7438, 0.9274])
  3177. >>> a = torch.tensor([[-0.3711, -1.9353, -0.4605, -0.2917],
  3178. ... [ 0.1815, -1.0111, 0.9805, -1.5923],
  3179. ... [ 0.1062, 1.4581, 0.7759, -1.2344],
  3180. ... [-0.1830, -0.0313, 1.1908, -1.4757]])
  3181. >>> b = torch.tensor([ 0.8032, 0.2930, -0.8113, -0.2308])
  3182. >>> torch.div(a, b)
  3183. tensor([[-0.4620, -6.6051, 0.5676, 1.2639],
  3184. [ 0.2260, -3.4509, -1.2086, 6.8990],
  3185. [ 0.1322, 4.9764, -0.9564, 5.3484],
  3186. [-0.2278, -0.1068, -1.4678, 6.3938]])
  3187. >>> torch.div(a, b, rounding_mode='trunc')
  3188. tensor([[-0., -6., 0., 1.],
  3189. [ 0., -3., -1., 6.],
  3190. [ 0., 4., -0., 5.],
  3191. [-0., -0., -1., 6.]])
  3192. >>> torch.div(a, b, rounding_mode='floor')
  3193. tensor([[-1., -7., 0., 1.],
  3194. [ 0., -4., -2., 6.],
  3195. [ 0., 4., -1., 5.],
  3196. [-1., -1., -2., 6.]])
  3197. """.format(
  3198. **common_args
  3199. ),
  3200. )
  3201. add_docstr(
  3202. torch.divide,
  3203. r"""
  3204. divide(input, other, *, rounding_mode=None, out=None) -> Tensor
  3205. Alias for :func:`torch.div`.
  3206. """,
  3207. )
  3208. add_docstr(
  3209. torch.dot,
  3210. r"""
  3211. dot(input, tensor, *, out=None) -> Tensor
  3212. Computes the dot product of two 1D tensors.
  3213. .. note::
  3214. Unlike NumPy's dot, torch.dot intentionally only supports computing the dot product
  3215. of two 1D tensors with the same number of elements.
  3216. Args:
  3217. input (Tensor): first tensor in the dot product, must be 1D.
  3218. tensor (Tensor): second tensor in the dot product, must be 1D.
  3219. Keyword args:
  3220. {out}
  3221. Example::
  3222. >>> torch.dot(torch.tensor([2, 3]), torch.tensor([2, 1]))
  3223. tensor(7)
  3224. >>> t1, t2 = torch.tensor([0, 1]), torch.tensor([2, 3])
  3225. >>> torch.dot(t1, t2)
  3226. tensor(3)
  3227. """.format(
  3228. **common_args
  3229. ),
  3230. )
  3231. add_docstr(
  3232. torch.vdot,
  3233. r"""
  3234. vdot(input, other, *, out=None) -> Tensor
  3235. Computes the dot product of two 1D vectors along a dimension.
  3236. In symbols, this function computes
  3237. .. math::
  3238. \sum_{i=1}^n \overline{x_i}y_i.
  3239. where :math:`\overline{x_i}` denotes the conjugate for complex
  3240. vectors, and it is the identity for real vectors.
  3241. .. note::
  3242. Unlike NumPy's vdot, torch.vdot intentionally only supports computing the dot product
  3243. of two 1D tensors with the same number of elements.
  3244. .. seealso::
  3245. :func:`torch.linalg.vecdot` computes the dot product of two batches of vectors along a dimension.
  3246. Args:
  3247. input (Tensor): first tensor in the dot product, must be 1D. Its conjugate is used if it's complex.
  3248. other (Tensor): second tensor in the dot product, must be 1D.
  3249. Keyword args:
  3250. """
  3251. + rf"""
  3252. .. note:: {common_args["out"]}
  3253. """
  3254. + r"""
  3255. Example::
  3256. >>> torch.vdot(torch.tensor([2, 3]), torch.tensor([2, 1]))
  3257. tensor(7)
  3258. >>> a = torch.tensor((1 +2j, 3 - 1j))
  3259. >>> b = torch.tensor((2 +1j, 4 - 0j))
  3260. >>> torch.vdot(a, b)
  3261. tensor([16.+1.j])
  3262. >>> torch.vdot(b, a)
  3263. tensor([16.-1.j])
  3264. """,
  3265. )
  3266. add_docstr(
  3267. torch.eq,
  3268. r"""
  3269. eq(input, other, *, out=None) -> Tensor
  3270. Computes element-wise equality
  3271. The second argument can be a number or a tensor whose shape is
  3272. :ref:`broadcastable <broadcasting-semantics>` with the first argument.
  3273. Args:
  3274. input (Tensor): the tensor to compare
  3275. other (Tensor or float): the tensor or value to compare
  3276. Keyword args:
  3277. {out}
  3278. Returns:
  3279. A boolean tensor that is True where :attr:`input` is equal to :attr:`other` and False elsewhere
  3280. Example::
  3281. >>> torch.eq(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
  3282. tensor([[ True, False],
  3283. [False, True]])
  3284. """.format(
  3285. **common_args
  3286. ),
  3287. )
  3288. add_docstr(
  3289. torch.equal,
  3290. r"""
  3291. equal(input, other) -> bool
  3292. ``True`` if two tensors have the same size and elements, ``False`` otherwise.
  3293. Example::
  3294. >>> torch.equal(torch.tensor([1, 2]), torch.tensor([1, 2]))
  3295. True
  3296. """,
  3297. )
  3298. add_docstr(
  3299. torch.erf,
  3300. r"""
  3301. erf(input, *, out=None) -> Tensor
  3302. Alias for :func:`torch.special.erf`.
  3303. """,
  3304. )
  3305. add_docstr(
  3306. torch.erfc,
  3307. r"""
  3308. erfc(input, *, out=None) -> Tensor
  3309. Alias for :func:`torch.special.erfc`.
  3310. """,
  3311. )
  3312. add_docstr(
  3313. torch.erfinv,
  3314. r"""
  3315. erfinv(input, *, out=None) -> Tensor
  3316. Alias for :func:`torch.special.erfinv`.
  3317. """,
  3318. )
  3319. add_docstr(
  3320. torch.exp,
  3321. r"""
  3322. exp(input, *, out=None) -> Tensor
  3323. Returns a new tensor with the exponential of the elements
  3324. of the input tensor :attr:`input`.
  3325. .. math::
  3326. y_{i} = e^{x_{i}}
  3327. """
  3328. + r"""
  3329. Args:
  3330. {input}
  3331. Keyword args:
  3332. {out}
  3333. Example::
  3334. >>> torch.exp(torch.tensor([0, math.log(2.)]))
  3335. tensor([ 1., 2.])
  3336. """.format(
  3337. **common_args
  3338. ),
  3339. )
  3340. add_docstr(
  3341. torch.exp2,
  3342. r"""
  3343. exp2(input, *, out=None) -> Tensor
  3344. Alias for :func:`torch.special.exp2`.
  3345. """,
  3346. )
  3347. add_docstr(
  3348. torch.expm1,
  3349. r"""
  3350. expm1(input, *, out=None) -> Tensor
  3351. Alias for :func:`torch.special.expm1`.
  3352. """,
  3353. )
  3354. add_docstr(
  3355. torch.eye,
  3356. r"""
  3357. eye(n, m=None, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  3358. Returns a 2-D tensor with ones on the diagonal and zeros elsewhere.
  3359. Args:
  3360. n (int): the number of rows
  3361. m (int, optional): the number of columns with default being :attr:`n`
  3362. Keyword arguments:
  3363. {out}
  3364. {dtype}
  3365. {layout}
  3366. {device}
  3367. {requires_grad}
  3368. Returns:
  3369. Tensor: A 2-D tensor with ones on the diagonal and zeros elsewhere
  3370. Example::
  3371. >>> torch.eye(3)
  3372. tensor([[ 1., 0., 0.],
  3373. [ 0., 1., 0.],
  3374. [ 0., 0., 1.]])
  3375. """.format(
  3376. **factory_common_args
  3377. ),
  3378. )
  3379. add_docstr(
  3380. torch.floor,
  3381. r"""
  3382. floor(input, *, out=None) -> Tensor
  3383. Returns a new tensor with the floor of the elements of :attr:`input`,
  3384. the largest integer less than or equal to each element.
  3385. For integer inputs, follows the array-api convention of returning a
  3386. copy of the input tensor.
  3387. .. math::
  3388. \text{out}_{i} = \left\lfloor \text{input}_{i} \right\rfloor
  3389. """
  3390. + r"""
  3391. Args:
  3392. {input}
  3393. Keyword args:
  3394. {out}
  3395. Example::
  3396. >>> a = torch.randn(4)
  3397. >>> a
  3398. tensor([-0.8166, 1.5308, -0.2530, -0.2091])
  3399. >>> torch.floor(a)
  3400. tensor([-1., 1., -1., -1.])
  3401. """.format(
  3402. **common_args
  3403. ),
  3404. )
  3405. add_docstr(
  3406. torch.floor_divide,
  3407. r"""
  3408. floor_divide(input, other, *, out=None) -> Tensor
  3409. .. note::
  3410. Before PyTorch 1.13 :func:`torch.floor_divide` incorrectly performed
  3411. truncation division. To restore the previous behavior use
  3412. :func:`torch.div` with ``rounding_mode='trunc'``.
  3413. Computes :attr:`input` divided by :attr:`other`, elementwise, and floors
  3414. the result.
  3415. .. math::
  3416. \text{{out}}_i = \text{floor} \left( \frac{{\text{{input}}_i}}{{\text{{other}}_i}} \right)
  3417. """
  3418. + r"""
  3419. Supports broadcasting to a common shape, type promotion, and integer and float inputs.
  3420. Args:
  3421. input (Tensor or Number): the dividend
  3422. other (Tensor or Number): the divisor
  3423. Keyword args:
  3424. {out}
  3425. Example::
  3426. >>> a = torch.tensor([4.0, 3.0])
  3427. >>> b = torch.tensor([2.0, 2.0])
  3428. >>> torch.floor_divide(a, b)
  3429. tensor([2.0, 1.0])
  3430. >>> torch.floor_divide(a, 1.4)
  3431. tensor([2.0, 2.0])
  3432. """.format(
  3433. **common_args
  3434. ),
  3435. )
  3436. add_docstr(
  3437. torch.fmod,
  3438. r"""
  3439. fmod(input, other, *, out=None) -> Tensor
  3440. Applies C++'s `std::fmod <https://en.cppreference.com/w/cpp/numeric/math/fmod>`_ entrywise.
  3441. The result has the same sign as the dividend :attr:`input` and its absolute value
  3442. is less than that of :attr:`other`.
  3443. This function may be defined in terms of :func:`torch.div` as
  3444. .. code:: python
  3445. torch.fmod(a, b) == a - a.div(b, rounding_mode="trunc") * b
  3446. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
  3447. :ref:`type promotion <type-promotion-doc>`, and integer and float inputs.
  3448. .. note::
  3449. When the divisor is zero, returns ``NaN`` for floating point dtypes
  3450. on both CPU and GPU; raises ``RuntimeError`` for integer division by
  3451. zero on CPU; Integer division by zero on GPU may return any value.
  3452. .. note::
  3453. Complex inputs are not supported. In some cases, it is not mathematically
  3454. possible to satisfy the definition of a modulo operation with complex numbers.
  3455. .. seealso::
  3456. :func:`torch.remainder` which implements Python's modulus operator.
  3457. This one is defined using division rounding down the result.
  3458. Args:
  3459. input (Tensor): the dividend
  3460. other (Tensor or Scalar): the divisor
  3461. Keyword args:
  3462. {out}
  3463. Example::
  3464. >>> torch.fmod(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
  3465. tensor([-1., -0., -1., 1., 0., 1.])
  3466. >>> torch.fmod(torch.tensor([1, 2, 3, 4, 5]), -1.5)
  3467. tensor([1.0000, 0.5000, 0.0000, 1.0000, 0.5000])
  3468. """.format(
  3469. **common_args
  3470. ),
  3471. )
  3472. add_docstr(
  3473. torch.frac,
  3474. r"""
  3475. frac(input, *, out=None) -> Tensor
  3476. Computes the fractional portion of each element in :attr:`input`.
  3477. .. math::
  3478. \text{out}_{i} = \text{input}_{i} - \left\lfloor |\text{input}_{i}| \right\rfloor * \operatorname{sgn}(\text{input}_{i})
  3479. Example::
  3480. >>> torch.frac(torch.tensor([1, 2.5, -3.2]))
  3481. tensor([ 0.0000, 0.5000, -0.2000])
  3482. """,
  3483. )
  3484. add_docstr(
  3485. torch.frexp,
  3486. r"""
  3487. frexp(input, *, out=None) -> (Tensor mantissa, Tensor exponent)
  3488. Decomposes :attr:`input` into mantissa and exponent tensors
  3489. such that :math:`\text{input} = \text{mantissa} \times 2^{\text{exponent}}`.
  3490. The range of mantissa is the open interval (-1, 1).
  3491. Supports float inputs.
  3492. Args:
  3493. input (Tensor): the input tensor
  3494. Keyword args:
  3495. out (tuple, optional): the output tensors
  3496. Example::
  3497. >>> x = torch.arange(9.)
  3498. >>> mantissa, exponent = torch.frexp(x)
  3499. >>> mantissa
  3500. tensor([0.0000, 0.5000, 0.5000, 0.7500, 0.5000, 0.6250, 0.7500, 0.8750, 0.5000])
  3501. >>> exponent
  3502. tensor([0, 1, 2, 2, 3, 3, 3, 3, 4], dtype=torch.int32)
  3503. >>> torch.ldexp(mantissa, exponent)
  3504. tensor([0., 1., 2., 3., 4., 5., 6., 7., 8.])
  3505. """,
  3506. )
  3507. add_docstr(
  3508. torch.from_numpy,
  3509. r"""
  3510. from_numpy(ndarray) -> Tensor
  3511. Creates a :class:`Tensor` from a :class:`numpy.ndarray`.
  3512. The returned tensor and :attr:`ndarray` share the same memory. Modifications to
  3513. the tensor will be reflected in the :attr:`ndarray` and vice versa. The returned
  3514. tensor is not resizable.
  3515. It currently accepts :attr:`ndarray` with dtypes of ``numpy.float64``,
  3516. ``numpy.float32``, ``numpy.float16``, ``numpy.complex64``, ``numpy.complex128``,
  3517. ``numpy.int64``, ``numpy.int32``, ``numpy.int16``, ``numpy.int8``, ``numpy.uint8``,
  3518. and ``bool``.
  3519. .. warning::
  3520. Writing to a tensor created from a read-only NumPy array is not supported and will result in undefined behavior.
  3521. Example::
  3522. >>> a = numpy.array([1, 2, 3])
  3523. >>> t = torch.from_numpy(a)
  3524. >>> t
  3525. tensor([ 1, 2, 3])
  3526. >>> t[0] = -1
  3527. >>> a
  3528. array([-1, 2, 3])
  3529. """,
  3530. )
  3531. add_docstr(
  3532. torch.frombuffer,
  3533. r"""
  3534. frombuffer(buffer, *, dtype, count=-1, offset=0, requires_grad=False) -> Tensor
  3535. Creates a 1-dimensional :class:`Tensor` from an object that implements
  3536. the Python buffer protocol.
  3537. Skips the first :attr:`offset` bytes in the buffer, and interprets the rest of
  3538. the raw bytes as a 1-dimensional tensor of type :attr:`dtype` with :attr:`count`
  3539. elements.
  3540. Note that either of the following must be true:
  3541. 1. :attr:`count` is a positive non-zero number, and the total number of bytes
  3542. in the buffer is more than :attr:`offset` plus :attr:`count` times the size
  3543. (in bytes) of :attr:`dtype`.
  3544. 2. :attr:`count` is negative, and the length (number of bytes) of the buffer
  3545. subtracted by the :attr:`offset` is a multiple of the size (in bytes) of
  3546. :attr:`dtype`.
  3547. The returned tensor and buffer share the same memory. Modifications to
  3548. the tensor will be reflected in the buffer and vice versa. The returned
  3549. tensor is not resizable.
  3550. .. note::
  3551. This function increments the reference count for the object that
  3552. owns the shared memory. Therefore, such memory will not be deallocated
  3553. before the returned tensor goes out of scope.
  3554. .. warning::
  3555. This function's behavior is undefined when passed an object implementing
  3556. the buffer protocol whose data is not on the CPU. Doing so is likely to
  3557. cause a segmentation fault.
  3558. .. warning::
  3559. This function does not try to infer the :attr:`dtype` (hence, it is not
  3560. optional). Passing a different :attr:`dtype` than its source may result
  3561. in unexpected behavior.
  3562. Args:
  3563. buffer (object): a Python object that exposes the buffer interface.
  3564. Keyword args:
  3565. dtype (:class:`torch.dtype`): the desired data type of returned tensor.
  3566. count (int, optional): the number of desired elements to be read.
  3567. If negative, all the elements (until the end of the buffer) will be
  3568. read. Default: -1.
  3569. offset (int, optional): the number of bytes to skip at the start of
  3570. the buffer. Default: 0.
  3571. {requires_grad}
  3572. Example::
  3573. >>> import array
  3574. >>> a = array.array('i', [1, 2, 3])
  3575. >>> t = torch.frombuffer(a, dtype=torch.int32)
  3576. >>> t
  3577. tensor([ 1, 2, 3])
  3578. >>> t[0] = -1
  3579. >>> a
  3580. array([-1, 2, 3])
  3581. >>> # Interprets the signed char bytes as 32-bit integers.
  3582. >>> # Each 4 signed char elements will be interpreted as
  3583. >>> # 1 signed 32-bit integer.
  3584. >>> import array
  3585. >>> a = array.array('b', [-1, 0, 0, 0])
  3586. >>> torch.frombuffer(a, dtype=torch.int32)
  3587. tensor([255], dtype=torch.int32)
  3588. """.format(
  3589. **factory_common_args
  3590. ),
  3591. )
  3592. add_docstr(
  3593. torch.from_file,
  3594. r"""
  3595. from_file(filename, shared=None, size=0, *, dtype=None, layout=None, device=None, pin_memory=False)
  3596. Creates a CPU tensor with a storage backed by a memory-mapped file.
  3597. If ``shared`` is True, then memory is shared between processes. All changes are written to the file.
  3598. If ``shared`` is False, then changes to the tensor do not affect the file.
  3599. ``size`` is the number of elements in the Tensor. If ``shared`` is ``False``, then the file must contain
  3600. at least ``size * sizeof(dtype)`` bytes. If ``shared`` is ``True`` the file will be created if needed.
  3601. .. note::
  3602. Only CPU tensors can be mapped to files.
  3603. .. note::
  3604. For now, tensors with storages backed by a memory-mapped file cannot be created in pinned memory.
  3605. Args:
  3606. filename (str): file name to map
  3607. shared (bool): whether to share memory (whether ``MAP_SHARED`` or ``MAP_PRIVATE`` is passed to the
  3608. underlying `mmap(2) call <https://man7.org/linux/man-pages/man2/mmap.2.html>`_)
  3609. size (int): number of elements in the tensor
  3610. Keyword args:
  3611. {dtype}
  3612. {layout}
  3613. {device}
  3614. {pin_memory}
  3615. Example::
  3616. >>> t = torch.randn(2, 5, dtype=torch.float64)
  3617. >>> t.numpy().tofile('storage.pt')
  3618. >>> t_mapped = torch.from_file('storage.pt', shared=False, size=10, dtype=torch.float64)
  3619. """.format(
  3620. **factory_common_args
  3621. ),
  3622. )
  3623. add_docstr(
  3624. torch.flatten,
  3625. r"""
  3626. flatten(input, start_dim=0, end_dim=-1) -> Tensor
  3627. Flattens :attr:`input` by reshaping it into a one-dimensional tensor. If :attr:`start_dim` or :attr:`end_dim`
  3628. are passed, only dimensions starting with :attr:`start_dim` and ending with :attr:`end_dim` are flattened.
  3629. The order of elements in :attr:`input` is unchanged.
  3630. Unlike NumPy's flatten, which always copies input's data, this function may return the original object, a view,
  3631. or copy. If no dimensions are flattened, then the original object :attr:`input` is returned. Otherwise, if input can
  3632. be viewed as the flattened shape, then that view is returned. Finally, only if the input cannot be viewed as the
  3633. flattened shape is input's data copied. See :meth:`torch.Tensor.view` for details on when a view will be returned.
  3634. .. note::
  3635. Flattening a zero-dimensional tensor will return a one-dimensional view.
  3636. Args:
  3637. {input}
  3638. start_dim (int): the first dim to flatten
  3639. end_dim (int): the last dim to flatten
  3640. Example::
  3641. >>> t = torch.tensor([[[1, 2],
  3642. ... [3, 4]],
  3643. ... [[5, 6],
  3644. ... [7, 8]]])
  3645. >>> torch.flatten(t)
  3646. tensor([1, 2, 3, 4, 5, 6, 7, 8])
  3647. >>> torch.flatten(t, start_dim=1)
  3648. tensor([[1, 2, 3, 4],
  3649. [5, 6, 7, 8]])
  3650. """.format(
  3651. **common_args
  3652. ),
  3653. )
  3654. add_docstr(
  3655. torch.unflatten,
  3656. r"""
  3657. unflatten(input, dim, sizes) -> Tensor
  3658. Expands a dimension of the input tensor over multiple dimensions.
  3659. .. seealso::
  3660. :func:`torch.flatten` the inverse of this function. It coalesces several dimensions into one.
  3661. Args:
  3662. {input}
  3663. dim (int): Dimension to be unflattened, specified as an index into
  3664. ``input.shape``.
  3665. sizes (Tuple[int]): New shape of the unflattened dimension.
  3666. One of its elements can be `-1` in which case the corresponding output
  3667. dimension is inferred. Otherwise, the product of ``sizes`` *must*
  3668. equal ``input.shape[dim]``.
  3669. Returns:
  3670. A View of input with the specified dimension unflattened.
  3671. Examples::
  3672. >>> torch.unflatten(torch.randn(3, 4, 1), 1, (2, 2)).shape
  3673. torch.Size([3, 2, 2, 1])
  3674. >>> torch.unflatten(torch.randn(3, 4, 1), 1, (-1, 2)).shape
  3675. torch.Size([3, 2, 2, 1])
  3676. >>> torch.unflatten(torch.randn(5, 12, 3), -2, (2, 2, 3, 1, 1)).shape
  3677. torch.Size([5, 2, 2, 3, 1, 1, 3])
  3678. """.format(
  3679. **common_args
  3680. ),
  3681. )
  3682. add_docstr(
  3683. torch.gather,
  3684. r"""
  3685. gather(input, dim, index, *, sparse_grad=False, out=None) -> Tensor
  3686. Gathers values along an axis specified by `dim`.
  3687. For a 3-D tensor the output is specified by::
  3688. out[i][j][k] = input[index[i][j][k]][j][k] # if dim == 0
  3689. out[i][j][k] = input[i][index[i][j][k]][k] # if dim == 1
  3690. out[i][j][k] = input[i][j][index[i][j][k]] # if dim == 2
  3691. :attr:`input` and :attr:`index` must have the same number of dimensions.
  3692. It is also required that ``index.size(d) <= input.size(d)`` for all
  3693. dimensions ``d != dim``. :attr:`out` will have the same shape as :attr:`index`.
  3694. Note that ``input`` and ``index`` do not broadcast against each other.
  3695. Args:
  3696. input (Tensor): the source tensor
  3697. dim (int): the axis along which to index
  3698. index (LongTensor): the indices of elements to gather
  3699. Keyword arguments:
  3700. sparse_grad (bool, optional): If ``True``, gradient w.r.t. :attr:`input` will be a sparse tensor.
  3701. out (Tensor, optional): the destination tensor
  3702. Example::
  3703. >>> t = torch.tensor([[1, 2], [3, 4]])
  3704. >>> torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]]))
  3705. tensor([[ 1, 1],
  3706. [ 4, 3]])
  3707. """,
  3708. )
  3709. add_docstr(
  3710. torch.gcd,
  3711. r"""
  3712. gcd(input, other, *, out=None) -> Tensor
  3713. Computes the element-wise greatest common divisor (GCD) of :attr:`input` and :attr:`other`.
  3714. Both :attr:`input` and :attr:`other` must have integer types.
  3715. .. note::
  3716. This defines :math:`gcd(0, 0) = 0`.
  3717. Args:
  3718. {input}
  3719. other (Tensor): the second input tensor
  3720. Keyword arguments:
  3721. {out}
  3722. Example::
  3723. >>> a = torch.tensor([5, 10, 15])
  3724. >>> b = torch.tensor([3, 4, 5])
  3725. >>> torch.gcd(a, b)
  3726. tensor([1, 2, 5])
  3727. >>> c = torch.tensor([3])
  3728. >>> torch.gcd(a, c)
  3729. tensor([1, 1, 3])
  3730. """.format(
  3731. **common_args
  3732. ),
  3733. )
  3734. add_docstr(
  3735. torch.ge,
  3736. r"""
  3737. ge(input, other, *, out=None) -> Tensor
  3738. Computes :math:`\text{input} \geq \text{other}` element-wise.
  3739. """
  3740. + r"""
  3741. The second argument can be a number or a tensor whose shape is
  3742. :ref:`broadcastable <broadcasting-semantics>` with the first argument.
  3743. Args:
  3744. input (Tensor): the tensor to compare
  3745. other (Tensor or float): the tensor or value to compare
  3746. Keyword args:
  3747. {out}
  3748. Returns:
  3749. A boolean tensor that is True where :attr:`input` is greater than or equal to :attr:`other` and False elsewhere
  3750. Example::
  3751. >>> torch.ge(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
  3752. tensor([[True, True], [False, True]])
  3753. """.format(
  3754. **common_args
  3755. ),
  3756. )
  3757. add_docstr(
  3758. torch.greater_equal,
  3759. r"""
  3760. greater_equal(input, other, *, out=None) -> Tensor
  3761. Alias for :func:`torch.ge`.
  3762. """,
  3763. )
  3764. add_docstr(
  3765. torch.gradient,
  3766. r"""
  3767. gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors
  3768. Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in
  3769. one or more dimensions using the `second-order accurate central differences method
  3770. <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ and
  3771. either first or second order estimates at the boundaries.
  3772. The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not
  3773. specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates
  3774. to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional
  3775. :attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and
  3776. :math:`g(1, 2, 3)\ == input[1, 2, 3]`.
  3777. When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates.
  3778. This is detailed in the "Keyword Arguments" section below.
  3779. The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is
  3780. accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be
  3781. improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative
  3782. is estimated using `Taylor's theorem with remainder <https://en.wikipedia.org/wiki/Taylor%27s_theorem>`_.
  3783. Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring
  3784. it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using:
  3785. .. math::
  3786. \begin{aligned}
  3787. f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\
  3788. f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\
  3789. \end{aligned}
  3790. Using the fact that :math:`f \in C^3` and solving the linear system, we derive:
  3791. .. math::
  3792. f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l)
  3793. + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} }
  3794. .. note::
  3795. We estimate the gradient of functions in complex domain
  3796. :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way.
  3797. The value of each partial derivative at the boundary points is computed differently. See edge_order below.
  3798. Args:
  3799. input (``Tensor``): the tensor that represents the values of the function
  3800. Keyword args:
  3801. spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify
  3802. how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then
  3803. the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the
  3804. indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding
  3805. indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9).
  3806. Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for
  3807. the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then
  3808. the coordinates are (t0[1], t1[2], t2[3])
  3809. dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default
  3810. the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of
  3811. the :attr:`spacing` argument must correspond with the specified dims."
  3812. edge_order (``int``, optional): 1 or 2, for `first-order
  3813. <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ or
  3814. `second-order <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_
  3815. estimation of the boundary ("edge") values, respectively.
  3816. Examples::
  3817. >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4]
  3818. >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),)
  3819. >>> values = torch.tensor([4., 1., 1., 16.], )
  3820. >>> torch.gradient(values, spacing = coordinates)
  3821. (tensor([-3., -2., 2., 5.]),)
  3822. >>> # Estimates the gradient of the R^2 -> R function whose samples are
  3823. >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost
  3824. >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates
  3825. >>> # partial derivative for both dimensions.
  3826. >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]])
  3827. >>> torch.gradient(t)
  3828. (tensor([[ 9., 18., 36., 72.],
  3829. [ 9., 18., 36., 72.]]),
  3830. tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
  3831. [10.0000, 15.0000, 30.0000, 40.0000]]))
  3832. >>> # A scalar value for spacing modifies the relationship between tensor indices
  3833. >>> # and input coordinates by multiplying the indices to find the
  3834. >>> # coordinates. For example, below the indices of the innermost
  3835. >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of
  3836. >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2].
  3837. >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1])
  3838. (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
  3839. [ 4.5000, 9.0000, 18.0000, 36.0000]]),
  3840. tensor([[ 0.5000, 0.7500, 1.5000, 2.0000],
  3841. [ 5.0000, 7.5000, 15.0000, 20.0000]]))
  3842. >>> # doubling the spacing between samples halves the estimated partial gradients.
  3843. >>>
  3844. >>> # Estimates only the partial derivative for dimension 1
  3845. >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.)
  3846. (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
  3847. [10.0000, 15.0000, 30.0000, 40.0000]]),)
  3848. >>> # When spacing is a list of scalars, the relationship between the tensor
  3849. >>> # indices and input coordinates changes based on dimension.
  3850. >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate
  3851. >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension
  3852. >>> # 0, 1 translate to coordinates of [0, 2].
  3853. >>> torch.gradient(t, spacing = [3., 2.])
  3854. (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
  3855. [ 4.5000, 9.0000, 18.0000, 36.0000]]),
  3856. tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
  3857. [ 3.3333, 5.0000, 10.0000, 13.3333]]))
  3858. >>> # The following example is a replication of the previous one with explicit
  3859. >>> # coordinates.
  3860. >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9]))
  3861. >>> torch.gradient(t, spacing = coords)
  3862. (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
  3863. [ 4.5000, 9.0000, 18.0000, 36.0000]]),
  3864. tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
  3865. [ 3.3333, 5.0000, 10.0000, 13.3333]]))
  3866. """,
  3867. )
  3868. add_docstr(
  3869. torch.geqrf,
  3870. r"""
  3871. geqrf(input, *, out=None) -> (Tensor, Tensor)
  3872. This is a low-level function for calling LAPACK's geqrf directly. This function
  3873. returns a namedtuple (a, tau) as defined in `LAPACK documentation for geqrf`_ .
  3874. Computes a QR decomposition of :attr:`input`.
  3875. Both `Q` and `R` matrices are stored in the same output tensor `a`.
  3876. The elements of `R` are stored on and above the diagonal.
  3877. Elementary reflectors (or Householder vectors) implicitly defining matrix `Q`
  3878. are stored below the diagonal.
  3879. The results of this function can be used together with :func:`torch.linalg.householder_product`
  3880. to obtain the `Q` matrix or
  3881. with :func:`torch.ormqr`, which uses an implicit representation of the `Q` matrix,
  3882. for an efficient matrix-matrix multiplication.
  3883. See `LAPACK documentation for geqrf`_ for further details.
  3884. .. note::
  3885. See also :func:`torch.linalg.qr`, which computes Q and R matrices, and :func:`torch.linalg.lstsq`
  3886. with the ``driver="gels"`` option for a function that can solve matrix equations using a QR decomposition.
  3887. Args:
  3888. input (Tensor): the input matrix
  3889. Keyword args:
  3890. out (tuple, optional): the output tuple of (Tensor, Tensor). Ignored if `None`. Default: `None`.
  3891. .. _LAPACK documentation for geqrf:
  3892. http://www.netlib.org/lapack/explore-html/df/dc5/group__variants_g_ecomputational_ga3766ea903391b5cf9008132f7440ec7b.html
  3893. """,
  3894. )
  3895. add_docstr(
  3896. torch.inner,
  3897. r"""
  3898. inner(input, other, *, out=None) -> Tensor
  3899. Computes the dot product for 1D tensors. For higher dimensions, sums the product
  3900. of elements from :attr:`input` and :attr:`other` along their last dimension.
  3901. .. note::
  3902. If either :attr:`input` or :attr:`other` is a scalar, the result is equivalent
  3903. to `torch.mul(input, other)`.
  3904. If both :attr:`input` and :attr:`other` are non-scalars, the size of their last
  3905. dimension must match and the result is equivalent to `torch.tensordot(input,
  3906. other, dims=([-1], [-1]))`
  3907. Args:
  3908. input (Tensor): First input tensor
  3909. other (Tensor): Second input tensor
  3910. Keyword args:
  3911. out (Tensor, optional): Optional output tensor to write result into. The output
  3912. shape is `input.shape[:-1] + other.shape[:-1]`.
  3913. Example::
  3914. # Dot product
  3915. >>> torch.inner(torch.tensor([1, 2, 3]), torch.tensor([0, 2, 1]))
  3916. tensor(7)
  3917. # Multidimensional input tensors
  3918. >>> a = torch.randn(2, 3)
  3919. >>> a
  3920. tensor([[0.8173, 1.0874, 1.1784],
  3921. [0.3279, 0.1234, 2.7894]])
  3922. >>> b = torch.randn(2, 4, 3)
  3923. >>> b
  3924. tensor([[[-0.4682, -0.7159, 0.1506],
  3925. [ 0.4034, -0.3657, 1.0387],
  3926. [ 0.9892, -0.6684, 0.1774],
  3927. [ 0.9482, 1.3261, 0.3917]],
  3928. [[ 0.4537, 0.7493, 1.1724],
  3929. [ 0.2291, 0.5749, -0.2267],
  3930. [-0.7920, 0.3607, -0.3701],
  3931. [ 1.3666, -0.5850, -1.7242]]])
  3932. >>> torch.inner(a, b)
  3933. tensor([[[-0.9837, 1.1560, 0.2907, 2.6785],
  3934. [ 2.5671, 0.5452, -0.6912, -1.5509]],
  3935. [[ 0.1782, 2.9843, 0.7366, 1.5672],
  3936. [ 3.5115, -0.4864, -1.2476, -4.4337]]])
  3937. # Scalar input
  3938. >>> torch.inner(a, torch.tensor(2))
  3939. tensor([[1.6347, 2.1748, 2.3567],
  3940. [0.6558, 0.2469, 5.5787]])
  3941. """,
  3942. )
  3943. add_docstr(
  3944. torch.outer,
  3945. r"""
  3946. outer(input, vec2, *, out=None) -> Tensor
  3947. Outer product of :attr:`input` and :attr:`vec2`.
  3948. If :attr:`input` is a vector of size :math:`n` and :attr:`vec2` is a vector of
  3949. size :math:`m`, then :attr:`out` must be a matrix of size :math:`(n \times m)`.
  3950. .. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
  3951. Args:
  3952. input (Tensor): 1-D input vector
  3953. vec2 (Tensor): 1-D input vector
  3954. Keyword args:
  3955. out (Tensor, optional): optional output matrix
  3956. Example::
  3957. >>> v1 = torch.arange(1., 5.)
  3958. >>> v2 = torch.arange(1., 4.)
  3959. >>> torch.outer(v1, v2)
  3960. tensor([[ 1., 2., 3.],
  3961. [ 2., 4., 6.],
  3962. [ 3., 6., 9.],
  3963. [ 4., 8., 12.]])
  3964. """,
  3965. )
  3966. add_docstr(
  3967. torch.ger,
  3968. r"""
  3969. ger(input, vec2, *, out=None) -> Tensor
  3970. Alias of :func:`torch.outer`.
  3971. .. warning::
  3972. This function is deprecated and will be removed in a future PyTorch release.
  3973. Use :func:`torch.outer` instead.
  3974. """,
  3975. )
  3976. add_docstr(
  3977. torch.get_default_dtype,
  3978. r"""
  3979. get_default_dtype() -> torch.dtype
  3980. Get the current default floating point :class:`torch.dtype`.
  3981. Example::
  3982. >>> torch.get_default_dtype() # initial default for floating point is torch.float32
  3983. torch.float32
  3984. >>> torch.set_default_dtype(torch.float64)
  3985. >>> torch.get_default_dtype() # default is now changed to torch.float64
  3986. torch.float64
  3987. """,
  3988. )
  3989. add_docstr(
  3990. torch.get_num_threads,
  3991. r"""
  3992. get_num_threads() -> int
  3993. Returns the number of threads used for parallelizing CPU operations
  3994. """,
  3995. )
  3996. add_docstr(
  3997. torch.get_num_interop_threads,
  3998. r"""
  3999. get_num_interop_threads() -> int
  4000. Returns the number of threads used for inter-op parallelism on CPU
  4001. (e.g. in JIT interpreter)
  4002. """,
  4003. )
  4004. add_docstr(
  4005. torch.gt,
  4006. r"""
  4007. gt(input, other, *, out=None) -> Tensor
  4008. Computes :math:`\text{input} > \text{other}` element-wise.
  4009. """
  4010. + r"""
  4011. The second argument can be a number or a tensor whose shape is
  4012. :ref:`broadcastable <broadcasting-semantics>` with the first argument.
  4013. Args:
  4014. input (Tensor): the tensor to compare
  4015. other (Tensor or float): the tensor or value to compare
  4016. Keyword args:
  4017. {out}
  4018. Returns:
  4019. A boolean tensor that is True where :attr:`input` is greater than :attr:`other` and False elsewhere
  4020. Example::
  4021. >>> torch.gt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
  4022. tensor([[False, True], [False, False]])
  4023. """.format(
  4024. **common_args
  4025. ),
  4026. )
  4027. add_docstr(
  4028. torch.greater,
  4029. r"""
  4030. greater(input, other, *, out=None) -> Tensor
  4031. Alias for :func:`torch.gt`.
  4032. """,
  4033. )
  4034. add_docstr(
  4035. torch.histc,
  4036. r"""
  4037. histc(input, bins=100, min=0, max=0, *, out=None) -> Tensor
  4038. Computes the histogram of a tensor.
  4039. The elements are sorted into equal width bins between :attr:`min` and
  4040. :attr:`max`. If :attr:`min` and :attr:`max` are both zero, the minimum and
  4041. maximum values of the data are used.
  4042. Elements lower than min and higher than max and ``NaN`` elements are ignored.
  4043. Args:
  4044. {input}
  4045. bins (int): number of histogram bins
  4046. min (Scalar): lower end of the range (inclusive)
  4047. max (Scalar): upper end of the range (inclusive)
  4048. Keyword args:
  4049. {out}
  4050. Returns:
  4051. Tensor: Histogram represented as a tensor
  4052. Example::
  4053. >>> torch.histc(torch.tensor([1., 2, 1]), bins=4, min=0, max=3)
  4054. tensor([ 0., 2., 1., 0.])
  4055. """.format(
  4056. **common_args
  4057. ),
  4058. )
  4059. add_docstr(
  4060. torch.histogram,
  4061. r"""
  4062. histogram(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor)
  4063. Computes a histogram of the values in a tensor.
  4064. :attr:`bins` can be an integer or a 1D tensor.
  4065. If :attr:`bins` is an int, it specifies the number of equal-width bins.
  4066. By default, the lower and upper range of the bins is determined by the
  4067. minimum and maximum elements of the input tensor. The :attr:`range`
  4068. argument can be provided to specify a range for the bins.
  4069. If :attr:`bins` is a 1D tensor, it specifies the sequence of bin edges
  4070. including the rightmost edge. It should contain at least 2 elements
  4071. and its elements should be increasing.
  4072. Args:
  4073. {input}
  4074. bins: int or 1D Tensor. If int, defines the number of equal-width bins. If tensor,
  4075. defines the sequence of bin edges including the rightmost edge.
  4076. Keyword args:
  4077. range (tuple of float): Defines the range of the bins.
  4078. weight (Tensor): If provided, weight should have the same shape as input. Each value in
  4079. input contributes its associated weight towards its bin's result.
  4080. density (bool): If False, the result will contain the count (or total weight) in each bin.
  4081. If True, the result is the value of the probability density function over the bins,
  4082. normalized such that the integral over the range of the bins is 1.
  4083. {out} (tuple, optional): The result tuple of two output tensors (hist, bin_edges).
  4084. Returns:
  4085. hist (Tensor): 1D Tensor containing the values of the histogram.
  4086. bin_edges(Tensor): 1D Tensor containing the edges of the histogram bins.
  4087. Example::
  4088. >>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.]))
  4089. (tensor([ 0., 5., 2., 0.]), tensor([0., 0.75, 1.5, 2.25, 3.]))
  4090. >>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.]), density=True)
  4091. (tensor([ 0., 0.9524, 0.3810, 0.]), tensor([0., 0.75, 1.5, 2.25, 3.]))
  4092. """.format(
  4093. **common_args
  4094. ),
  4095. )
  4096. add_docstr(
  4097. torch.histogramdd,
  4098. r"""
  4099. histogramdd(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor[])
  4100. Computes a multi-dimensional histogram of the values in a tensor.
  4101. Interprets the elements of an input tensor whose innermost dimension has size N
  4102. as a collection of N-dimensional points. Maps each of the points into a set of
  4103. N-dimensional bins and returns the number of points (or total weight) in each bin.
  4104. :attr:`input` must be a tensor with at least 2 dimensions.
  4105. If input has shape (M, N), each of its M rows defines a point in N-dimensional space.
  4106. If input has three or more dimensions, all but the last dimension are flattened.
  4107. Each dimension is independently associated with its own strictly increasing sequence
  4108. of bin edges. Bin edges may be specified explicitly by passing a sequence of 1D
  4109. tensors. Alternatively, bin edges may be constructed automatically by passing a
  4110. sequence of integers specifying the number of equal-width bins in each dimension.
  4111. For each N-dimensional point in input:
  4112. - Each of its coordinates is binned independently among the bin edges
  4113. corresponding to its dimension
  4114. - Binning results are combined to identify the N-dimensional bin (if any)
  4115. into which the point falls
  4116. - If the point falls into a bin, the bin's count (or total weight) is incremented
  4117. - Points which do not fall into any bin do not contribute to the output
  4118. :attr:`bins` can be a sequence of N 1D tensors, a sequence of N ints, or a single int.
  4119. If :attr:`bins` is a sequence of N 1D tensors, it explicitly specifies the N sequences
  4120. of bin edges. Each 1D tensor should contain a strictly increasing sequence with at
  4121. least one element. A sequence of K bin edges defines K-1 bins, explicitly specifying
  4122. the left and right edges of all bins. Every bin is exclusive of its left edge. Only
  4123. the rightmost bin is inclusive of its right edge.
  4124. If :attr:`bins` is a sequence of N ints, it specifies the number of equal-width bins
  4125. in each dimension. By default, the leftmost and rightmost bin edges in each dimension
  4126. are determined by the minimum and maximum elements of the input tensor in the
  4127. corresponding dimension. The :attr:`range` argument can be provided to manually
  4128. specify the leftmost and rightmost bin edges in each dimension.
  4129. If :attr:`bins` is an int, it specifies the number of equal-width bins for all dimensions.
  4130. .. note::
  4131. See also :func:`torch.histogram`, which specifically computes 1D histograms.
  4132. While :func:`torch.histogramdd` infers the dimensionality of its bins and
  4133. binned values from the shape of :attr:`input`, :func:`torch.histogram`
  4134. accepts and flattens :attr:`input` of any shape.
  4135. Args:
  4136. {input}
  4137. bins: Tensor[], int[], or int.
  4138. If Tensor[], defines the sequences of bin edges.
  4139. If int[], defines the number of equal-width bins in each dimension.
  4140. If int, defines the number of equal-width bins for all dimensions.
  4141. Keyword args:
  4142. range (sequence of float): Defines the leftmost and rightmost bin edges
  4143. in each dimension.
  4144. weight (Tensor): By default, each value in the input has weight 1. If a weight
  4145. tensor is passed, each N-dimensional coordinate in input
  4146. contributes its associated weight towards its bin's result.
  4147. The weight tensor should have the same shape as the :attr:`input`
  4148. tensor excluding its innermost dimension N.
  4149. density (bool): If False (default), the result will contain the count (or total weight)
  4150. in each bin. If True, each count (weight) is divided by the total count
  4151. (total weight), then divided by the volume of its associated bin.
  4152. Returns:
  4153. hist (Tensor): N-dimensional Tensor containing the values of the histogram.
  4154. bin_edges(Tensor[]): sequence of N 1D Tensors containing the bin edges.
  4155. Example::
  4156. >>> torch.histogramdd(torch.tensor([[0., 1.], [1., 0.], [2., 0.], [2., 2.]]), bins=[3, 3],
  4157. ... weight=torch.tensor([1., 2., 4., 8.]))
  4158. torch.return_types.histogramdd(
  4159. hist=tensor([[0., 1., 0.],
  4160. [2., 0., 0.],
  4161. [4., 0., 8.]]),
  4162. bin_edges=(tensor([0.0000, 0.6667, 1.3333, 2.0000]),
  4163. tensor([0.0000, 0.6667, 1.3333, 2.0000])))
  4164. >>> torch.histogramdd(torch.tensor([[0., 0.], [1., 1.], [2., 2.]]), bins=[2, 2],
  4165. ... range=[0., 1., 0., 1.], density=True)
  4166. torch.return_types.histogramdd(
  4167. hist=tensor([[2., 0.],
  4168. [0., 2.]]),
  4169. bin_edges=(tensor([0.0000, 0.5000, 1.0000]),
  4170. tensor([0.0000, 0.5000, 1.0000])))
  4171. """.format(
  4172. **common_args
  4173. ),
  4174. )
  4175. # TODO: Fix via https://github.com/pytorch/pytorch/issues/75798
  4176. torch.histogramdd.__module__ = "torch"
  4177. add_docstr(
  4178. torch.hypot,
  4179. r"""
  4180. hypot(input, other, *, out=None) -> Tensor
  4181. Given the legs of a right triangle, return its hypotenuse.
  4182. .. math::
  4183. \text{out}_{i} = \sqrt{\text{input}_{i}^{2} + \text{other}_{i}^{2}}
  4184. The shapes of ``input`` and ``other`` must be
  4185. :ref:`broadcastable <broadcasting-semantics>`.
  4186. """
  4187. + r"""
  4188. Args:
  4189. input (Tensor): the first input tensor
  4190. other (Tensor): the second input tensor
  4191. Keyword args:
  4192. {out}
  4193. Example::
  4194. >>> a = torch.hypot(torch.tensor([4.0]), torch.tensor([3.0, 4.0, 5.0]))
  4195. tensor([5.0000, 5.6569, 6.4031])
  4196. """.format(
  4197. **common_args
  4198. ),
  4199. )
  4200. add_docstr(
  4201. torch.i0,
  4202. r"""
  4203. i0(input, *, out=None) -> Tensor
  4204. Alias for :func:`torch.special.i0`.
  4205. """,
  4206. )
  4207. add_docstr(
  4208. torch.igamma,
  4209. r"""
  4210. igamma(input, other, *, out=None) -> Tensor
  4211. Alias for :func:`torch.special.gammainc`.
  4212. """,
  4213. )
  4214. add_docstr(
  4215. torch.igammac,
  4216. r"""
  4217. igammac(input, other, *, out=None) -> Tensor
  4218. Alias for :func:`torch.special.gammaincc`.
  4219. """,
  4220. )
  4221. add_docstr(
  4222. torch.index_select,
  4223. r"""
  4224. index_select(input, dim, index, *, out=None) -> Tensor
  4225. Returns a new tensor which indexes the :attr:`input` tensor along dimension
  4226. :attr:`dim` using the entries in :attr:`index` which is a `LongTensor`.
  4227. The returned tensor has the same number of dimensions as the original tensor
  4228. (:attr:`input`). The :attr:`dim`\ th dimension has the same size as the length
  4229. of :attr:`index`; other dimensions have the same size as in the original tensor.
  4230. .. note:: The returned tensor does **not** use the same storage as the original
  4231. tensor. If :attr:`out` has a different shape than expected, we
  4232. silently change it to the correct shape, reallocating the underlying
  4233. storage if necessary.
  4234. Args:
  4235. {input}
  4236. dim (int): the dimension in which we index
  4237. index (IntTensor or LongTensor): the 1-D tensor containing the indices to index
  4238. Keyword args:
  4239. {out}
  4240. Example::
  4241. >>> x = torch.randn(3, 4)
  4242. >>> x
  4243. tensor([[ 0.1427, 0.0231, -0.5414, -1.0009],
  4244. [-0.4664, 0.2647, -0.1228, -1.1068],
  4245. [-1.1734, -0.6571, 0.7230, -0.6004]])
  4246. >>> indices = torch.tensor([0, 2])
  4247. >>> torch.index_select(x, 0, indices)
  4248. tensor([[ 0.1427, 0.0231, -0.5414, -1.0009],
  4249. [-1.1734, -0.6571, 0.7230, -0.6004]])
  4250. >>> torch.index_select(x, 1, indices)
  4251. tensor([[ 0.1427, -0.5414],
  4252. [-0.4664, -0.1228],
  4253. [-1.1734, 0.7230]])
  4254. """.format(
  4255. **common_args
  4256. ),
  4257. )
  4258. add_docstr(
  4259. torch.inverse,
  4260. r"""
  4261. inverse(input, *, out=None) -> Tensor
  4262. Alias for :func:`torch.linalg.inv`
  4263. """,
  4264. )
  4265. add_docstr(
  4266. torch.isin,
  4267. r"""
  4268. isin(elements, test_elements, *, assume_unique=False, invert=False) -> Tensor
  4269. Tests if each element of :attr:`elements` is in :attr:`test_elements`. Returns
  4270. a boolean tensor of the same shape as :attr:`elements` that is True for elements
  4271. in :attr:`test_elements` and False otherwise.
  4272. .. note::
  4273. One of :attr:`elements` or :attr:`test_elements` can be a scalar, but not both.
  4274. Args:
  4275. elements (Tensor or Scalar): Input elements
  4276. test_elements (Tensor or Scalar): Values against which to test for each input element
  4277. assume_unique (bool, optional): If True, assumes both :attr:`elements` and
  4278. :attr:`test_elements` contain unique elements, which can speed up the
  4279. calculation. Default: False
  4280. invert (bool, optional): If True, inverts the boolean return tensor, resulting in True
  4281. values for elements *not* in :attr:`test_elements`. Default: False
  4282. Returns:
  4283. A boolean tensor of the same shape as :attr:`elements` that is True for elements in
  4284. :attr:`test_elements` and False otherwise
  4285. Example:
  4286. >>> torch.isin(torch.tensor([[1, 2], [3, 4]]), torch.tensor([2, 3]))
  4287. tensor([[False, True],
  4288. [ True, False]])
  4289. """,
  4290. )
  4291. add_docstr(
  4292. torch.isinf,
  4293. r"""
  4294. isinf(input) -> Tensor
  4295. Tests if each element of :attr:`input` is infinite
  4296. (positive or negative infinity) or not.
  4297. .. note::
  4298. Complex values are infinite when their real or imaginary part is
  4299. infinite.
  4300. Args:
  4301. {input}
  4302. Returns:
  4303. A boolean tensor that is True where :attr:`input` is infinite and False elsewhere
  4304. Example::
  4305. >>> torch.isinf(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')]))
  4306. tensor([False, True, False, True, False])
  4307. """.format(
  4308. **common_args
  4309. ),
  4310. )
  4311. add_docstr(
  4312. torch.isposinf,
  4313. r"""
  4314. isposinf(input, *, out=None) -> Tensor
  4315. Tests if each element of :attr:`input` is positive infinity or not.
  4316. Args:
  4317. {input}
  4318. Keyword args:
  4319. {out}
  4320. Example::
  4321. >>> a = torch.tensor([-float('inf'), float('inf'), 1.2])
  4322. >>> torch.isposinf(a)
  4323. tensor([False, True, False])
  4324. """.format(
  4325. **common_args
  4326. ),
  4327. )
  4328. add_docstr(
  4329. torch.isneginf,
  4330. r"""
  4331. isneginf(input, *, out=None) -> Tensor
  4332. Tests if each element of :attr:`input` is negative infinity or not.
  4333. Args:
  4334. {input}
  4335. Keyword args:
  4336. {out}
  4337. Example::
  4338. >>> a = torch.tensor([-float('inf'), float('inf'), 1.2])
  4339. >>> torch.isneginf(a)
  4340. tensor([ True, False, False])
  4341. """.format(
  4342. **common_args
  4343. ),
  4344. )
  4345. add_docstr(
  4346. torch.isclose,
  4347. r"""
  4348. isclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor
  4349. Returns a new tensor with boolean elements representing if each element of
  4350. :attr:`input` is "close" to the corresponding element of :attr:`other`.
  4351. Closeness is defined as:
  4352. .. math::
  4353. \lvert \text{input} - \text{other} \rvert \leq \texttt{atol} + \texttt{rtol} \times \lvert \text{other} \rvert
  4354. """
  4355. + r"""
  4356. where :attr:`input` and :attr:`other` are finite. Where :attr:`input`
  4357. and/or :attr:`other` are nonfinite they are close if and only if
  4358. they are equal, with NaNs being considered equal to each other when
  4359. :attr:`equal_nan` is True.
  4360. Args:
  4361. input (Tensor): first tensor to compare
  4362. other (Tensor): second tensor to compare
  4363. atol (float, optional): absolute tolerance. Default: 1e-08
  4364. rtol (float, optional): relative tolerance. Default: 1e-05
  4365. equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False``
  4366. Examples::
  4367. >>> torch.isclose(torch.tensor((1., 2, 3)), torch.tensor((1 + 1e-10, 3, 4)))
  4368. tensor([ True, False, False])
  4369. >>> torch.isclose(torch.tensor((float('inf'), 4)), torch.tensor((float('inf'), 6)), rtol=.5)
  4370. tensor([True, True])
  4371. """,
  4372. )
  4373. add_docstr(
  4374. torch.isfinite,
  4375. r"""
  4376. isfinite(input) -> Tensor
  4377. Returns a new tensor with boolean elements representing if each element is `finite` or not.
  4378. Real values are finite when they are not NaN, negative infinity, or infinity.
  4379. Complex values are finite when both their real and imaginary parts are finite.
  4380. Args:
  4381. {input}
  4382. Returns:
  4383. A boolean tensor that is True where :attr:`input` is finite and False elsewhere
  4384. Example::
  4385. >>> torch.isfinite(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')]))
  4386. tensor([True, False, True, False, False])
  4387. """.format(
  4388. **common_args
  4389. ),
  4390. )
  4391. add_docstr(
  4392. torch.isnan,
  4393. r"""
  4394. isnan(input) -> Tensor
  4395. Returns a new tensor with boolean elements representing if each element of :attr:`input`
  4396. is NaN or not. Complex values are considered NaN when either their real
  4397. and/or imaginary part is NaN.
  4398. Arguments:
  4399. {input}
  4400. Returns:
  4401. A boolean tensor that is True where :attr:`input` is NaN and False elsewhere
  4402. Example::
  4403. >>> torch.isnan(torch.tensor([1, float('nan'), 2]))
  4404. tensor([False, True, False])
  4405. """.format(
  4406. **common_args
  4407. ),
  4408. )
  4409. add_docstr(
  4410. torch.isreal,
  4411. r"""
  4412. isreal(input) -> Tensor
  4413. Returns a new tensor with boolean elements representing if each element of :attr:`input` is real-valued or not.
  4414. All real-valued types are considered real. Complex values are considered real when their imaginary part is 0.
  4415. Arguments:
  4416. {input}
  4417. Returns:
  4418. A boolean tensor that is True where :attr:`input` is real and False elsewhere
  4419. Example::
  4420. >>> torch.isreal(torch.tensor([1, 1+1j, 2+0j]))
  4421. tensor([True, False, True])
  4422. """.format(
  4423. **common_args
  4424. ),
  4425. )
  4426. add_docstr(
  4427. torch.is_floating_point,
  4428. r"""
  4429. is_floating_point(input) -> (bool)
  4430. Returns True if the data type of :attr:`input` is a floating point data type i.e.,
  4431. one of ``torch.float64``, ``torch.float32``, ``torch.float16``, and ``torch.bfloat16``.
  4432. Args:
  4433. {input}
  4434. """.format(
  4435. **common_args
  4436. ),
  4437. )
  4438. add_docstr(
  4439. torch.is_complex,
  4440. r"""
  4441. is_complex(input) -> (bool)
  4442. Returns True if the data type of :attr:`input` is a complex data type i.e.,
  4443. one of ``torch.complex64``, and ``torch.complex128``.
  4444. Args:
  4445. {input}
  4446. """.format(
  4447. **common_args
  4448. ),
  4449. )
  4450. add_docstr(
  4451. torch.is_grad_enabled,
  4452. r"""
  4453. is_grad_enabled() -> (bool)
  4454. Returns True if grad mode is currently enabled.
  4455. """.format(
  4456. **common_args
  4457. ),
  4458. )
  4459. add_docstr(
  4460. torch.is_inference_mode_enabled,
  4461. r"""
  4462. is_inference_mode_enabled() -> (bool)
  4463. Returns True if inference mode is currently enabled.
  4464. """.format(
  4465. **common_args
  4466. ),
  4467. )
  4468. add_docstr(
  4469. torch.is_inference,
  4470. r"""
  4471. is_inference(input) -> (bool)
  4472. Returns True if :attr:`input` is an inference tensor.
  4473. A non-view tensor is an inference tensor if and only if it was
  4474. allocated during inference mode. A view tensor is an inference
  4475. tensor if and only if the tensor it is a view of is an inference tensor.
  4476. For details on inference mode please see
  4477. `Inference Mode <https://pytorch.org/cppdocs/notes/inference_mode.html>`_.
  4478. Args:
  4479. {input}
  4480. """.format(
  4481. **common_args
  4482. ),
  4483. )
  4484. add_docstr(
  4485. torch.is_conj,
  4486. r"""
  4487. is_conj(input) -> (bool)
  4488. Returns True if the :attr:`input` is a conjugated tensor, i.e. its conjugate bit is set to `True`.
  4489. Args:
  4490. {input}
  4491. """.format(
  4492. **common_args
  4493. ),
  4494. )
  4495. add_docstr(
  4496. torch.is_nonzero,
  4497. r"""
  4498. is_nonzero(input) -> (bool)
  4499. Returns True if the :attr:`input` is a single element tensor which is not equal to zero
  4500. after type conversions.
  4501. i.e. not equal to ``torch.tensor([0.])`` or ``torch.tensor([0])`` or
  4502. ``torch.tensor([False])``.
  4503. Throws a ``RuntimeError`` if ``torch.numel() != 1`` (even in case
  4504. of sparse tensors).
  4505. Args:
  4506. {input}
  4507. Examples::
  4508. >>> torch.is_nonzero(torch.tensor([0.]))
  4509. False
  4510. >>> torch.is_nonzero(torch.tensor([1.5]))
  4511. True
  4512. >>> torch.is_nonzero(torch.tensor([False]))
  4513. False
  4514. >>> torch.is_nonzero(torch.tensor([3]))
  4515. True
  4516. >>> torch.is_nonzero(torch.tensor([1, 3, 5]))
  4517. Traceback (most recent call last):
  4518. ...
  4519. RuntimeError: bool value of Tensor with more than one value is ambiguous
  4520. >>> torch.is_nonzero(torch.tensor([]))
  4521. Traceback (most recent call last):
  4522. ...
  4523. RuntimeError: bool value of Tensor with no values is ambiguous
  4524. """.format(
  4525. **common_args
  4526. ),
  4527. )
  4528. add_docstr(
  4529. torch.kron,
  4530. r"""
  4531. kron(input, other, *, out=None) -> Tensor
  4532. Computes the Kronecker product, denoted by :math:`\otimes`, of :attr:`input` and :attr:`other`.
  4533. If :attr:`input` is a :math:`(a_0 \times a_1 \times \dots \times a_n)` tensor and :attr:`other` is a
  4534. :math:`(b_0 \times b_1 \times \dots \times b_n)` tensor, the result will be a
  4535. :math:`(a_0*b_0 \times a_1*b_1 \times \dots \times a_n*b_n)` tensor with the following entries:
  4536. .. math::
  4537. (\text{input} \otimes \text{other})_{k_0, k_1, \dots, k_n} =
  4538. \text{input}_{i_0, i_1, \dots, i_n} * \text{other}_{j_0, j_1, \dots, j_n},
  4539. where :math:`k_t = i_t * b_t + j_t` for :math:`0 \leq t \leq n`.
  4540. If one tensor has fewer dimensions than the other it is unsqueezed until it has the same number of dimensions.
  4541. Supports real-valued and complex-valued inputs.
  4542. .. note::
  4543. This function generalizes the typical definition of the Kronecker product for two matrices to two tensors,
  4544. as described above. When :attr:`input` is a :math:`(m \times n)` matrix and :attr:`other` is a
  4545. :math:`(p \times q)` matrix, the result will be a :math:`(p*m \times q*n)` block matrix:
  4546. .. math::
  4547. \mathbf{A} \otimes \mathbf{B}=\begin{bmatrix}
  4548. a_{11} \mathbf{B} & \cdots & a_{1 n} \mathbf{B} \\
  4549. \vdots & \ddots & \vdots \\
  4550. a_{m 1} \mathbf{B} & \cdots & a_{m n} \mathbf{B} \end{bmatrix}
  4551. where :attr:`input` is :math:`\mathbf{A}` and :attr:`other` is :math:`\mathbf{B}`.
  4552. Arguments:
  4553. input (Tensor)
  4554. other (Tensor)
  4555. Keyword args:
  4556. out (Tensor, optional): The output tensor. Ignored if ``None``. Default: ``None``
  4557. Examples::
  4558. >>> mat1 = torch.eye(2)
  4559. >>> mat2 = torch.ones(2, 2)
  4560. >>> torch.kron(mat1, mat2)
  4561. tensor([[1., 1., 0., 0.],
  4562. [1., 1., 0., 0.],
  4563. [0., 0., 1., 1.],
  4564. [0., 0., 1., 1.]])
  4565. >>> mat1 = torch.eye(2)
  4566. >>> mat2 = torch.arange(1, 5).reshape(2, 2)
  4567. >>> torch.kron(mat1, mat2)
  4568. tensor([[1., 2., 0., 0.],
  4569. [3., 4., 0., 0.],
  4570. [0., 0., 1., 2.],
  4571. [0., 0., 3., 4.]])
  4572. """,
  4573. )
  4574. add_docstr(
  4575. torch.kthvalue,
  4576. r"""
  4577. kthvalue(input, k, dim=None, keepdim=False, *, out=None) -> (Tensor, LongTensor)
  4578. Returns a namedtuple ``(values, indices)`` where ``values`` is the :attr:`k` th
  4579. smallest element of each row of the :attr:`input` tensor in the given dimension
  4580. :attr:`dim`. And ``indices`` is the index location of each element found.
  4581. If :attr:`dim` is not given, the last dimension of the `input` is chosen.
  4582. If :attr:`keepdim` is ``True``, both the :attr:`values` and :attr:`indices` tensors
  4583. are the same size as :attr:`input`, except in the dimension :attr:`dim` where
  4584. they are of size 1. Otherwise, :attr:`dim` is squeezed
  4585. (see :func:`torch.squeeze`), resulting in both the :attr:`values` and
  4586. :attr:`indices` tensors having 1 fewer dimension than the :attr:`input` tensor.
  4587. .. note::
  4588. When :attr:`input` is a CUDA tensor and there are multiple valid
  4589. :attr:`k` th values, this function may nondeterministically return
  4590. :attr:`indices` for any of them.
  4591. Args:
  4592. {input}
  4593. k (int): k for the k-th smallest element
  4594. dim (int, optional): the dimension to find the kth value along
  4595. {keepdim}
  4596. Keyword args:
  4597. out (tuple, optional): the output tuple of (Tensor, LongTensor)
  4598. can be optionally given to be used as output buffers
  4599. Example::
  4600. >>> x = torch.arange(1., 6.)
  4601. >>> x
  4602. tensor([ 1., 2., 3., 4., 5.])
  4603. >>> torch.kthvalue(x, 4)
  4604. torch.return_types.kthvalue(values=tensor(4.), indices=tensor(3))
  4605. >>> x=torch.arange(1.,7.).resize_(2,3)
  4606. >>> x
  4607. tensor([[ 1., 2., 3.],
  4608. [ 4., 5., 6.]])
  4609. >>> torch.kthvalue(x, 2, 0, True)
  4610. torch.return_types.kthvalue(values=tensor([[4., 5., 6.]]), indices=tensor([[1, 1, 1]]))
  4611. """.format(
  4612. **single_dim_common
  4613. ),
  4614. )
  4615. add_docstr(
  4616. torch.lcm,
  4617. r"""
  4618. lcm(input, other, *, out=None) -> Tensor
  4619. Computes the element-wise least common multiple (LCM) of :attr:`input` and :attr:`other`.
  4620. Both :attr:`input` and :attr:`other` must have integer types.
  4621. .. note::
  4622. This defines :math:`lcm(0, 0) = 0` and :math:`lcm(0, a) = 0`.
  4623. Args:
  4624. {input}
  4625. other (Tensor): the second input tensor
  4626. Keyword arguments:
  4627. {out}
  4628. Example::
  4629. >>> a = torch.tensor([5, 10, 15])
  4630. >>> b = torch.tensor([3, 4, 5])
  4631. >>> torch.lcm(a, b)
  4632. tensor([15, 20, 15])
  4633. >>> c = torch.tensor([3])
  4634. >>> torch.lcm(a, c)
  4635. tensor([15, 30, 15])
  4636. """.format(
  4637. **common_args
  4638. ),
  4639. )
  4640. add_docstr(
  4641. torch.ldexp,
  4642. r"""
  4643. ldexp(input, other, *, out=None) -> Tensor
  4644. Multiplies :attr:`input` by 2 ** :attr:`other`.
  4645. .. math::
  4646. \text{{out}}_i = \text{{input}}_i * 2^\text{{other}}_i
  4647. """
  4648. + r"""
  4649. Typically this function is used to construct floating point numbers by multiplying
  4650. mantissas in :attr:`input` with integral powers of two created from the exponents
  4651. in :attr:`other`.
  4652. Args:
  4653. {input}
  4654. other (Tensor): a tensor of exponents, typically integers.
  4655. Keyword args:
  4656. {out}
  4657. Example::
  4658. >>> torch.ldexp(torch.tensor([1.]), torch.tensor([1]))
  4659. tensor([2.])
  4660. >>> torch.ldexp(torch.tensor([1.0]), torch.tensor([1, 2, 3, 4]))
  4661. tensor([ 2., 4., 8., 16.])
  4662. """.format(
  4663. **common_args
  4664. ),
  4665. )
  4666. add_docstr(
  4667. torch.le,
  4668. r"""
  4669. le(input, other, *, out=None) -> Tensor
  4670. Computes :math:`\text{input} \leq \text{other}` element-wise.
  4671. """
  4672. + r"""
  4673. The second argument can be a number or a tensor whose shape is
  4674. :ref:`broadcastable <broadcasting-semantics>` with the first argument.
  4675. Args:
  4676. input (Tensor): the tensor to compare
  4677. other (Tensor or Scalar): the tensor or value to compare
  4678. Keyword args:
  4679. {out}
  4680. Returns:
  4681. A boolean tensor that is True where :attr:`input` is less than or equal to
  4682. :attr:`other` and False elsewhere
  4683. Example::
  4684. >>> torch.le(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
  4685. tensor([[True, False], [True, True]])
  4686. """.format(
  4687. **common_args
  4688. ),
  4689. )
  4690. add_docstr(
  4691. torch.less_equal,
  4692. r"""
  4693. less_equal(input, other, *, out=None) -> Tensor
  4694. Alias for :func:`torch.le`.
  4695. """,
  4696. )
  4697. add_docstr(
  4698. torch.lerp,
  4699. r"""
  4700. lerp(input, end, weight, *, out=None)
  4701. Does a linear interpolation of two tensors :attr:`start` (given by :attr:`input`) and :attr:`end` based
  4702. on a scalar or tensor :attr:`weight` and returns the resulting :attr:`out` tensor.
  4703. .. math::
  4704. \text{out}_i = \text{start}_i + \text{weight}_i \times (\text{end}_i - \text{start}_i)
  4705. """
  4706. + r"""
  4707. The shapes of :attr:`start` and :attr:`end` must be
  4708. :ref:`broadcastable <broadcasting-semantics>`. If :attr:`weight` is a tensor, then
  4709. the shapes of :attr:`weight`, :attr:`start`, and :attr:`end` must be :ref:`broadcastable <broadcasting-semantics>`.
  4710. Args:
  4711. input (Tensor): the tensor with the starting points
  4712. end (Tensor): the tensor with the ending points
  4713. weight (float or tensor): the weight for the interpolation formula
  4714. Keyword args:
  4715. {out}
  4716. Example::
  4717. >>> start = torch.arange(1., 5.)
  4718. >>> end = torch.empty(4).fill_(10)
  4719. >>> start
  4720. tensor([ 1., 2., 3., 4.])
  4721. >>> end
  4722. tensor([ 10., 10., 10., 10.])
  4723. >>> torch.lerp(start, end, 0.5)
  4724. tensor([ 5.5000, 6.0000, 6.5000, 7.0000])
  4725. >>> torch.lerp(start, end, torch.full_like(start, 0.5))
  4726. tensor([ 5.5000, 6.0000, 6.5000, 7.0000])
  4727. """.format(
  4728. **common_args
  4729. ),
  4730. )
  4731. add_docstr(
  4732. torch.lgamma,
  4733. r"""
  4734. lgamma(input, *, out=None) -> Tensor
  4735. Computes the natural logarithm of the absolute value of the gamma function on :attr:`input`.
  4736. .. math::
  4737. \text{out}_{i} = \ln |\Gamma(\text{input}_{i})|
  4738. """
  4739. + """
  4740. Args:
  4741. {input}
  4742. Keyword args:
  4743. {out}
  4744. Example::
  4745. >>> a = torch.arange(0.5, 2, 0.5)
  4746. >>> torch.lgamma(a)
  4747. tensor([ 0.5724, 0.0000, -0.1208])
  4748. """.format(
  4749. **common_args
  4750. ),
  4751. )
  4752. add_docstr(
  4753. torch.linspace,
  4754. r"""
  4755. linspace(start, end, steps, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  4756. Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
  4757. spaced from :attr:`start` to :attr:`end`, inclusive. That is, the value are:
  4758. .. math::
  4759. (\text{start},
  4760. \text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1},
  4761. \ldots,
  4762. \text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1},
  4763. \text{end})
  4764. """
  4765. + """
  4766. From PyTorch 1.11 linspace requires the steps argument. Use steps=100 to restore the previous behavior.
  4767. Args:
  4768. start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
  4769. end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
  4770. steps (int): size of the constructed tensor
  4771. Keyword arguments:
  4772. {out}
  4773. dtype (torch.dtype, optional): the data type to perform the computation in.
  4774. Default: if None, uses the global default dtype (see torch.get_default_dtype())
  4775. when both :attr:`start` and :attr:`end` are real,
  4776. and corresponding complex dtype when either is complex.
  4777. {layout}
  4778. {device}
  4779. {requires_grad}
  4780. Example::
  4781. >>> torch.linspace(3, 10, steps=5)
  4782. tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000])
  4783. >>> torch.linspace(-10, 10, steps=5)
  4784. tensor([-10., -5., 0., 5., 10.])
  4785. >>> torch.linspace(start=-10, end=10, steps=5)
  4786. tensor([-10., -5., 0., 5., 10.])
  4787. >>> torch.linspace(start=-10, end=10, steps=1)
  4788. tensor([-10.])
  4789. """.format(
  4790. **factory_common_args
  4791. ),
  4792. )
  4793. add_docstr(
  4794. torch.log,
  4795. r"""
  4796. log(input, *, out=None) -> Tensor
  4797. Returns a new tensor with the natural logarithm of the elements
  4798. of :attr:`input`.
  4799. .. math::
  4800. y_{i} = \log_{e} (x_{i})
  4801. """
  4802. + r"""
  4803. Args:
  4804. {input}
  4805. Keyword args:
  4806. {out}
  4807. Example::
  4808. >>> a = torch.rand(5) * 5
  4809. >>> a
  4810. tensor([4.7767, 4.3234, 1.2156, 0.2411, 4.5739])
  4811. >>> torch.log(a)
  4812. tensor([ 1.5637, 1.4640, 0.1952, -1.4226, 1.5204])
  4813. """.format(
  4814. **common_args
  4815. ),
  4816. )
  4817. add_docstr(
  4818. torch.log10,
  4819. r"""
  4820. log10(input, *, out=None) -> Tensor
  4821. Returns a new tensor with the logarithm to the base 10 of the elements
  4822. of :attr:`input`.
  4823. .. math::
  4824. y_{i} = \log_{10} (x_{i})
  4825. """
  4826. + r"""
  4827. Args:
  4828. {input}
  4829. Keyword args:
  4830. {out}
  4831. Example::
  4832. >>> a = torch.rand(5)
  4833. >>> a
  4834. tensor([ 0.5224, 0.9354, 0.7257, 0.1301, 0.2251])
  4835. >>> torch.log10(a)
  4836. tensor([-0.2820, -0.0290, -0.1392, -0.8857, -0.6476])
  4837. """.format(
  4838. **common_args
  4839. ),
  4840. )
  4841. add_docstr(
  4842. torch.log1p,
  4843. r"""
  4844. log1p(input, *, out=None) -> Tensor
  4845. Returns a new tensor with the natural logarithm of (1 + :attr:`input`).
  4846. .. math::
  4847. y_i = \log_{e} (x_i + 1)
  4848. """
  4849. + r"""
  4850. .. note:: This function is more accurate than :func:`torch.log` for small
  4851. values of :attr:`input`
  4852. Args:
  4853. {input}
  4854. Keyword args:
  4855. {out}
  4856. Example::
  4857. >>> a = torch.randn(5)
  4858. >>> a
  4859. tensor([-1.0090, -0.9923, 1.0249, -0.5372, 0.2492])
  4860. >>> torch.log1p(a)
  4861. tensor([ nan, -4.8653, 0.7055, -0.7705, 0.2225])
  4862. """.format(
  4863. **common_args
  4864. ),
  4865. )
  4866. add_docstr(
  4867. torch.log2,
  4868. r"""
  4869. log2(input, *, out=None) -> Tensor
  4870. Returns a new tensor with the logarithm to the base 2 of the elements
  4871. of :attr:`input`.
  4872. .. math::
  4873. y_{i} = \log_{2} (x_{i})
  4874. """
  4875. + r"""
  4876. Args:
  4877. {input}
  4878. Keyword args:
  4879. {out}
  4880. Example::
  4881. >>> a = torch.rand(5)
  4882. >>> a
  4883. tensor([ 0.8419, 0.8003, 0.9971, 0.5287, 0.0490])
  4884. >>> torch.log2(a)
  4885. tensor([-0.2483, -0.3213, -0.0042, -0.9196, -4.3504])
  4886. """.format(
  4887. **common_args
  4888. ),
  4889. )
  4890. add_docstr(
  4891. torch.logaddexp,
  4892. r"""
  4893. logaddexp(input, other, *, out=None) -> Tensor
  4894. Logarithm of the sum of exponentiations of the inputs.
  4895. Calculates pointwise :math:`\log\left(e^x + e^y\right)`. This function is useful
  4896. in statistics where the calculated probabilities of events may be so small as to
  4897. exceed the range of normal floating point numbers. In such cases the logarithm
  4898. of the calculated probability is stored. This function allows adding
  4899. probabilities stored in such a fashion.
  4900. This op should be disambiguated with :func:`torch.logsumexp` which performs a
  4901. reduction on a single tensor.
  4902. Args:
  4903. {input}
  4904. other (Tensor): the second input tensor
  4905. Keyword arguments:
  4906. {out}
  4907. Example::
  4908. >>> torch.logaddexp(torch.tensor([-1.0]), torch.tensor([-1.0, -2, -3]))
  4909. tensor([-0.3069, -0.6867, -0.8731])
  4910. >>> torch.logaddexp(torch.tensor([-100.0, -200, -300]), torch.tensor([-1.0, -2, -3]))
  4911. tensor([-1., -2., -3.])
  4912. >>> torch.logaddexp(torch.tensor([1.0, 2000, 30000]), torch.tensor([-1.0, -2, -3]))
  4913. tensor([1.1269e+00, 2.0000e+03, 3.0000e+04])
  4914. """.format(
  4915. **common_args
  4916. ),
  4917. )
  4918. add_docstr(
  4919. torch.logaddexp2,
  4920. r"""
  4921. logaddexp2(input, other, *, out=None) -> Tensor
  4922. Logarithm of the sum of exponentiations of the inputs in base-2.
  4923. Calculates pointwise :math:`\log_2\left(2^x + 2^y\right)`. See
  4924. :func:`torch.logaddexp` for more details.
  4925. Args:
  4926. {input}
  4927. other (Tensor): the second input tensor
  4928. Keyword arguments:
  4929. {out}
  4930. """.format(
  4931. **common_args
  4932. ),
  4933. )
  4934. add_docstr(
  4935. torch.xlogy,
  4936. r"""
  4937. xlogy(input, other, *, out=None) -> Tensor
  4938. Alias for :func:`torch.special.xlogy`.
  4939. """,
  4940. )
  4941. add_docstr(
  4942. torch.logical_and,
  4943. r"""
  4944. logical_and(input, other, *, out=None) -> Tensor
  4945. Computes the element-wise logical AND of the given input tensors. Zeros are treated as ``False`` and nonzeros are
  4946. treated as ``True``.
  4947. Args:
  4948. {input}
  4949. other (Tensor): the tensor to compute AND with
  4950. Keyword args:
  4951. {out}
  4952. Example::
  4953. >>> torch.logical_and(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
  4954. tensor([ True, False, False])
  4955. >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
  4956. >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
  4957. >>> torch.logical_and(a, b)
  4958. tensor([False, False, True, False])
  4959. >>> torch.logical_and(a.double(), b.double())
  4960. tensor([False, False, True, False])
  4961. >>> torch.logical_and(a.double(), b)
  4962. tensor([False, False, True, False])
  4963. >>> torch.logical_and(a, b, out=torch.empty(4, dtype=torch.bool))
  4964. tensor([False, False, True, False])
  4965. """.format(
  4966. **common_args
  4967. ),
  4968. )
  4969. add_docstr(
  4970. torch.logical_not,
  4971. r"""
  4972. logical_not(input, *, out=None) -> Tensor
  4973. Computes the element-wise logical NOT of the given input tensor. If not specified, the output tensor will have the bool
  4974. dtype. If the input tensor is not a bool tensor, zeros are treated as ``False`` and non-zeros are treated as ``True``.
  4975. Args:
  4976. {input}
  4977. Keyword args:
  4978. {out}
  4979. Example::
  4980. >>> torch.logical_not(torch.tensor([True, False]))
  4981. tensor([False, True])
  4982. >>> torch.logical_not(torch.tensor([0, 1, -10], dtype=torch.int8))
  4983. tensor([ True, False, False])
  4984. >>> torch.logical_not(torch.tensor([0., 1.5, -10.], dtype=torch.double))
  4985. tensor([ True, False, False])
  4986. >>> torch.logical_not(torch.tensor([0., 1., -10.], dtype=torch.double), out=torch.empty(3, dtype=torch.int16))
  4987. tensor([1, 0, 0], dtype=torch.int16)
  4988. """.format(
  4989. **common_args
  4990. ),
  4991. )
  4992. add_docstr(
  4993. torch.logical_or,
  4994. r"""
  4995. logical_or(input, other, *, out=None) -> Tensor
  4996. Computes the element-wise logical OR of the given input tensors. Zeros are treated as ``False`` and nonzeros are
  4997. treated as ``True``.
  4998. Args:
  4999. {input}
  5000. other (Tensor): the tensor to compute OR with
  5001. Keyword args:
  5002. {out}
  5003. Example::
  5004. >>> torch.logical_or(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
  5005. tensor([ True, False, True])
  5006. >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
  5007. >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
  5008. >>> torch.logical_or(a, b)
  5009. tensor([ True, True, True, False])
  5010. >>> torch.logical_or(a.double(), b.double())
  5011. tensor([ True, True, True, False])
  5012. >>> torch.logical_or(a.double(), b)
  5013. tensor([ True, True, True, False])
  5014. >>> torch.logical_or(a, b, out=torch.empty(4, dtype=torch.bool))
  5015. tensor([ True, True, True, False])
  5016. """.format(
  5017. **common_args
  5018. ),
  5019. )
  5020. add_docstr(
  5021. torch.logical_xor,
  5022. r"""
  5023. logical_xor(input, other, *, out=None) -> Tensor
  5024. Computes the element-wise logical XOR of the given input tensors. Zeros are treated as ``False`` and nonzeros are
  5025. treated as ``True``.
  5026. Args:
  5027. {input}
  5028. other (Tensor): the tensor to compute XOR with
  5029. Keyword args:
  5030. {out}
  5031. Example::
  5032. >>> torch.logical_xor(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
  5033. tensor([False, False, True])
  5034. >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
  5035. >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
  5036. >>> torch.logical_xor(a, b)
  5037. tensor([ True, True, False, False])
  5038. >>> torch.logical_xor(a.double(), b.double())
  5039. tensor([ True, True, False, False])
  5040. >>> torch.logical_xor(a.double(), b)
  5041. tensor([ True, True, False, False])
  5042. >>> torch.logical_xor(a, b, out=torch.empty(4, dtype=torch.bool))
  5043. tensor([ True, True, False, False])
  5044. """.format(
  5045. **common_args
  5046. ),
  5047. )
  5048. add_docstr(
  5049. torch.logspace,
  5050. """
  5051. logspace(start, end, steps, base=10.0, *, \
  5052. out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  5053. """
  5054. + r"""
  5055. Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
  5056. spaced from :math:`{{\text{{base}}}}^{{\text{{start}}}}` to
  5057. :math:`{{\text{{base}}}}^{{\text{{end}}}}`, inclusive, on a logarithmic scale
  5058. with base :attr:`base`. That is, the values are:
  5059. .. math::
  5060. (\text{base}^{\text{start}},
  5061. \text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
  5062. \ldots,
  5063. \text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
  5064. \text{base}^{\text{end}})
  5065. """
  5066. + """
  5067. From PyTorch 1.11 logspace requires the steps argument. Use steps=100 to restore the previous behavior.
  5068. Args:
  5069. start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
  5070. end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
  5071. steps (int): size of the constructed tensor
  5072. base (float, optional): base of the logarithm function. Default: ``10.0``.
  5073. Keyword arguments:
  5074. {out}
  5075. dtype (torch.dtype, optional): the data type to perform the computation in.
  5076. Default: if None, uses the global default dtype (see torch.get_default_dtype())
  5077. when both :attr:`start` and :attr:`end` are real,
  5078. and corresponding complex dtype when either is complex.
  5079. {layout}
  5080. {device}
  5081. {requires_grad}
  5082. Example::
  5083. >>> torch.logspace(start=-10, end=10, steps=5)
  5084. tensor([ 1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10])
  5085. >>> torch.logspace(start=0.1, end=1.0, steps=5)
  5086. tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000])
  5087. >>> torch.logspace(start=0.1, end=1.0, steps=1)
  5088. tensor([1.2589])
  5089. >>> torch.logspace(start=2, end=2, steps=1, base=2)
  5090. tensor([4.0])
  5091. """.format(
  5092. **factory_common_args
  5093. ),
  5094. )
  5095. add_docstr(
  5096. torch.logsumexp,
  5097. r"""
  5098. logsumexp(input, dim, keepdim=False, *, out=None)
  5099. Returns the log of summed exponentials of each row of the :attr:`input`
  5100. tensor in the given dimension :attr:`dim`. The computation is numerically
  5101. stabilized.
  5102. For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is
  5103. .. math::
  5104. \text{{logsumexp}}(x)_{{i}} = \log \sum_j \exp(x_{{ij}})
  5105. {keepdim_details}
  5106. Args:
  5107. {input}
  5108. {opt_dim}
  5109. {keepdim}
  5110. Keyword args:
  5111. {out}
  5112. Example::
  5113. >>> a = torch.randn(3, 3)
  5114. >>> torch.logsumexp(a, 1)
  5115. tensor([1.4907, 1.0593, 1.5696])
  5116. >>> torch.dist(torch.logsumexp(a, 1), torch.log(torch.sum(torch.exp(a), 1)))
  5117. tensor(1.6859e-07)
  5118. """.format(
  5119. **multi_dim_common
  5120. ),
  5121. )
  5122. add_docstr(
  5123. torch.lt,
  5124. r"""
  5125. lt(input, other, *, out=None) -> Tensor
  5126. Computes :math:`\text{input} < \text{other}` element-wise.
  5127. """
  5128. + r"""
  5129. The second argument can be a number or a tensor whose shape is
  5130. :ref:`broadcastable <broadcasting-semantics>` with the first argument.
  5131. Args:
  5132. input (Tensor): the tensor to compare
  5133. other (Tensor or float): the tensor or value to compare
  5134. Keyword args:
  5135. {out}
  5136. Returns:
  5137. A boolean tensor that is True where :attr:`input` is less than :attr:`other` and False elsewhere
  5138. Example::
  5139. >>> torch.lt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
  5140. tensor([[False, False], [True, False]])
  5141. """.format(
  5142. **common_args
  5143. ),
  5144. )
  5145. add_docstr(
  5146. torch.lu_unpack,
  5147. r"""
  5148. lu_unpack(LU_data, LU_pivots, unpack_data=True, unpack_pivots=True, *, out=None) -> (Tensor, Tensor, Tensor)
  5149. Unpacks the LU decomposition returned by :func:`~linalg.lu_factor` into the `P, L, U` matrices.
  5150. .. seealso::
  5151. :func:`~linalg.lu` returns the matrices from the LU decomposition. Its gradient formula is more efficient
  5152. than that of doing :func:`~linalg.lu_factor` followed by :func:`~linalg.lu_unpack`.
  5153. Args:
  5154. LU_data (Tensor): the packed LU factorization data
  5155. LU_pivots (Tensor): the packed LU factorization pivots
  5156. unpack_data (bool): flag indicating if the data should be unpacked.
  5157. If ``False``, then the returned ``L`` and ``U`` are empty tensors.
  5158. Default: ``True``
  5159. unpack_pivots (bool): flag indicating if the pivots should be unpacked into a permutation matrix ``P``.
  5160. If ``False``, then the returned ``P`` is an empty tensor.
  5161. Default: ``True``
  5162. Keyword args:
  5163. out (tuple, optional): output tuple of three tensors. Ignored if `None`.
  5164. Returns:
  5165. A namedtuple ``(P, L, U)``
  5166. Examples::
  5167. >>> A = torch.randn(2, 3, 3)
  5168. >>> LU, pivots = torch.linalg.lu_factor(A)
  5169. >>> P, L, U = torch.lu_unpack(LU, pivots)
  5170. >>> # We can recover A from the factorization
  5171. >>> A_ = P @ L @ U
  5172. >>> torch.allclose(A, A_)
  5173. True
  5174. >>> # LU factorization of a rectangular matrix:
  5175. >>> A = torch.randn(2, 3, 2)
  5176. >>> LU, pivots = torch.linalg.lu_factor(A)
  5177. >>> P, L, U = torch.lu_unpack(LU, pivots)
  5178. >>> # P, L, U are the same as returned by linalg.lu
  5179. >>> P_, L_, U_ = torch.linalg.lu(A)
  5180. >>> torch.allclose(P, P_) and torch.allclose(L, L_) and torch.allclose(U, U_)
  5181. True
  5182. """.format(
  5183. **common_args
  5184. ),
  5185. )
  5186. add_docstr(
  5187. torch.less,
  5188. r"""
  5189. less(input, other, *, out=None) -> Tensor
  5190. Alias for :func:`torch.lt`.
  5191. """,
  5192. )
  5193. add_docstr(
  5194. torch.lu_solve,
  5195. r"""
  5196. lu_solve(b, LU_data, LU_pivots, *, out=None) -> Tensor
  5197. Returns the LU solve of the linear system :math:`Ax = b` using the partially pivoted
  5198. LU factorization of A from :func:`~linalg.lu_factor`.
  5199. This function supports ``float``, ``double``, ``cfloat`` and ``cdouble`` dtypes for :attr:`input`.
  5200. .. warning::
  5201. :func:`torch.lu_solve` is deprecated in favor of :func:`torch.linalg.lu_solve`.
  5202. :func:`torch.lu_solve` will be removed in a future PyTorch release.
  5203. ``X = torch.lu_solve(B, LU, pivots)`` should be replaced with
  5204. .. code:: python
  5205. X = linalg.lu_solve(LU, pivots, B)
  5206. Arguments:
  5207. b (Tensor): the RHS tensor of size :math:`(*, m, k)`, where :math:`*`
  5208. is zero or more batch dimensions.
  5209. LU_data (Tensor): the pivoted LU factorization of A from :meth:`~linalg.lu_factor` of size :math:`(*, m, m)`,
  5210. where :math:`*` is zero or more batch dimensions.
  5211. LU_pivots (IntTensor): the pivots of the LU factorization from :meth:`~linalg.lu_factor` of size :math:`(*, m)`,
  5212. where :math:`*` is zero or more batch dimensions.
  5213. The batch dimensions of :attr:`LU_pivots` must be equal to the batch dimensions of
  5214. :attr:`LU_data`.
  5215. Keyword args:
  5216. {out}
  5217. Example::
  5218. >>> A = torch.randn(2, 3, 3)
  5219. >>> b = torch.randn(2, 3, 1)
  5220. >>> LU, pivots = torch.linalg.lu_factor(A)
  5221. >>> x = torch.lu_solve(b, LU, pivots)
  5222. >>> torch.dist(A @ x, b)
  5223. tensor(1.00000e-07 *
  5224. 2.8312)
  5225. """.format(
  5226. **common_args
  5227. ),
  5228. )
  5229. add_docstr(
  5230. torch.masked_select,
  5231. r"""
  5232. masked_select(input, mask, *, out=None) -> Tensor
  5233. Returns a new 1-D tensor which indexes the :attr:`input` tensor according to
  5234. the boolean mask :attr:`mask` which is a `BoolTensor`.
  5235. The shapes of the :attr:`mask` tensor and the :attr:`input` tensor don't need
  5236. to match, but they must be :ref:`broadcastable <broadcasting-semantics>`.
  5237. .. note:: The returned tensor does **not** use the same storage
  5238. as the original tensor
  5239. Args:
  5240. {input}
  5241. mask (BoolTensor): the tensor containing the binary mask to index with
  5242. Keyword args:
  5243. {out}
  5244. Example::
  5245. >>> x = torch.randn(3, 4)
  5246. >>> x
  5247. tensor([[ 0.3552, -2.3825, -0.8297, 0.3477],
  5248. [-1.2035, 1.2252, 0.5002, 0.6248],
  5249. [ 0.1307, -2.0608, 0.1244, 2.0139]])
  5250. >>> mask = x.ge(0.5)
  5251. >>> mask
  5252. tensor([[False, False, False, False],
  5253. [False, True, True, True],
  5254. [False, False, False, True]])
  5255. >>> torch.masked_select(x, mask)
  5256. tensor([ 1.2252, 0.5002, 0.6248, 2.0139])
  5257. """.format(
  5258. **common_args
  5259. ),
  5260. )
  5261. add_docstr(
  5262. torch.matrix_power,
  5263. r"""
  5264. matrix_power(input, n, *, out=None) -> Tensor
  5265. Alias for :func:`torch.linalg.matrix_power`
  5266. """,
  5267. )
  5268. add_docstr(
  5269. torch.matrix_exp,
  5270. r"""
  5271. matrix_exp(A) -> Tensor
  5272. Alias for :func:`torch.linalg.matrix_exp`.
  5273. """,
  5274. )
  5275. add_docstr(
  5276. torch.max,
  5277. r"""
  5278. max(input) -> Tensor
  5279. Returns the maximum value of all elements in the ``input`` tensor.
  5280. .. warning::
  5281. This function produces deterministic (sub)gradients unlike ``max(dim=0)``
  5282. Args:
  5283. {input}
  5284. Example::
  5285. >>> a = torch.randn(1, 3)
  5286. >>> a
  5287. tensor([[ 0.6763, 0.7445, -2.2369]])
  5288. >>> torch.max(a)
  5289. tensor(0.7445)
  5290. .. function:: max(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
  5291. :noindex:
  5292. Returns a namedtuple ``(values, indices)`` where ``values`` is the maximum
  5293. value of each row of the :attr:`input` tensor in the given dimension
  5294. :attr:`dim`. And ``indices`` is the index location of each maximum value found
  5295. (argmax).
  5296. If ``keepdim`` is ``True``, the output tensors are of the same size
  5297. as ``input`` except in the dimension ``dim`` where they are of size 1.
  5298. Otherwise, ``dim`` is squeezed (see :func:`torch.squeeze`), resulting
  5299. in the output tensors having 1 fewer dimension than ``input``.
  5300. .. note:: If there are multiple maximal values in a reduced row then
  5301. the indices of the first maximal value are returned.
  5302. Args:
  5303. {input}
  5304. {dim}
  5305. {keepdim} Default: ``False``.
  5306. Keyword args:
  5307. out (tuple, optional): the result tuple of two output tensors (max, max_indices)
  5308. Example::
  5309. >>> a = torch.randn(4, 4)
  5310. >>> a
  5311. tensor([[-1.2360, -0.2942, -0.1222, 0.8475],
  5312. [ 1.1949, -1.1127, -2.2379, -0.6702],
  5313. [ 1.5717, -0.9207, 0.1297, -1.8768],
  5314. [-0.6172, 1.0036, -0.6060, -0.2432]])
  5315. >>> torch.max(a, 1)
  5316. torch.return_types.max(values=tensor([0.8475, 1.1949, 1.5717, 1.0036]), indices=tensor([3, 0, 0, 1]))
  5317. .. function:: max(input, other, *, out=None) -> Tensor
  5318. :noindex:
  5319. See :func:`torch.maximum`.
  5320. """.format(
  5321. **single_dim_common
  5322. ),
  5323. )
  5324. add_docstr(
  5325. torch.maximum,
  5326. r"""
  5327. maximum(input, other, *, out=None) -> Tensor
  5328. Computes the element-wise maximum of :attr:`input` and :attr:`other`.
  5329. .. note::
  5330. If one of the elements being compared is a NaN, then that element is returned.
  5331. :func:`maximum` is not supported for tensors with complex dtypes.
  5332. Args:
  5333. {input}
  5334. other (Tensor): the second input tensor
  5335. Keyword args:
  5336. {out}
  5337. Example::
  5338. >>> a = torch.tensor((1, 2, -1))
  5339. >>> b = torch.tensor((3, 0, 4))
  5340. >>> torch.maximum(a, b)
  5341. tensor([3, 2, 4])
  5342. """.format(
  5343. **common_args
  5344. ),
  5345. )
  5346. add_docstr(
  5347. torch.fmax,
  5348. r"""
  5349. fmax(input, other, *, out=None) -> Tensor
  5350. Computes the element-wise maximum of :attr:`input` and :attr:`other`.
  5351. This is like :func:`torch.maximum` except it handles NaNs differently:
  5352. if exactly one of the two elements being compared is a NaN then the non-NaN element is taken as the maximum.
  5353. Only if both elements are NaN is NaN propagated.
  5354. This function is a wrapper around C++'s ``std::fmax`` and is similar to NumPy's ``fmax`` function.
  5355. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
  5356. :ref:`type promotion <type-promotion-doc>`, and integer and floating-point inputs.
  5357. Args:
  5358. {input}
  5359. other (Tensor): the second input tensor
  5360. Keyword args:
  5361. {out}
  5362. Example::
  5363. >>> a = torch.tensor([9.7, float('nan'), 3.1, float('nan')])
  5364. >>> b = torch.tensor([-2.2, 0.5, float('nan'), float('nan')])
  5365. >>> torch.fmax(a, b)
  5366. tensor([9.7000, 0.5000, 3.1000, nan])
  5367. """.format(
  5368. **common_args
  5369. ),
  5370. )
  5371. add_docstr(
  5372. torch.amax,
  5373. r"""
  5374. amax(input, dim, keepdim=False, *, out=None) -> Tensor
  5375. Returns the maximum value of each slice of the :attr:`input` tensor in the given
  5376. dimension(s) :attr:`dim`.
  5377. .. note::
  5378. The difference between ``max``/``min`` and ``amax``/``amin`` is:
  5379. - ``amax``/``amin`` supports reducing on multiple dimensions,
  5380. - ``amax``/``amin`` does not return indices,
  5381. - ``amax``/``amin`` evenly distributes gradient between equal values,
  5382. while ``max(dim)``/``min(dim)`` propagates gradient only to a single
  5383. index in the source tensor.
  5384. {keepdim_details}
  5385. Args:
  5386. {input}
  5387. {dim}
  5388. {keepdim}
  5389. Keyword args:
  5390. {out}
  5391. Example::
  5392. >>> a = torch.randn(4, 4)
  5393. >>> a
  5394. tensor([[ 0.8177, 1.4878, -0.2491, 0.9130],
  5395. [-0.7158, 1.1775, 2.0992, 0.4817],
  5396. [-0.0053, 0.0164, -1.3738, -0.0507],
  5397. [ 1.9700, 1.1106, -1.0318, -1.0816]])
  5398. >>> torch.amax(a, 1)
  5399. tensor([1.4878, 2.0992, 0.0164, 1.9700])
  5400. """.format(
  5401. **multi_dim_common
  5402. ),
  5403. )
  5404. add_docstr(
  5405. torch.argmax,
  5406. r"""
  5407. argmax(input) -> LongTensor
  5408. Returns the indices of the maximum value of all elements in the :attr:`input` tensor.
  5409. This is the second value returned by :meth:`torch.max`. See its
  5410. documentation for the exact semantics of this method.
  5411. .. note:: If there are multiple maximal values then the indices of the first maximal value are returned.
  5412. Args:
  5413. {input}
  5414. Example::
  5415. >>> a = torch.randn(4, 4)
  5416. >>> a
  5417. tensor([[ 1.3398, 0.2663, -0.2686, 0.2450],
  5418. [-0.7401, -0.8805, -0.3402, -1.1936],
  5419. [ 0.4907, -1.3948, -1.0691, -0.3132],
  5420. [-1.6092, 0.5419, -0.2993, 0.3195]])
  5421. >>> torch.argmax(a)
  5422. tensor(0)
  5423. .. function:: argmax(input, dim, keepdim=False) -> LongTensor
  5424. :noindex:
  5425. Returns the indices of the maximum values of a tensor across a dimension.
  5426. This is the second value returned by :meth:`torch.max`. See its
  5427. documentation for the exact semantics of this method.
  5428. Args:
  5429. {input}
  5430. {dim} If ``None``, the argmax of the flattened input is returned.
  5431. {keepdim}
  5432. Example::
  5433. >>> a = torch.randn(4, 4)
  5434. >>> a
  5435. tensor([[ 1.3398, 0.2663, -0.2686, 0.2450],
  5436. [-0.7401, -0.8805, -0.3402, -1.1936],
  5437. [ 0.4907, -1.3948, -1.0691, -0.3132],
  5438. [-1.6092, 0.5419, -0.2993, 0.3195]])
  5439. >>> torch.argmax(a, dim=1)
  5440. tensor([ 0, 2, 0, 1])
  5441. """.format(
  5442. **single_dim_common
  5443. ),
  5444. )
  5445. add_docstr(
  5446. torch.argwhere,
  5447. r"""
  5448. argwhere(input) -> Tensor
  5449. Returns a tensor containing the indices of all non-zero elements of
  5450. :attr:`input`. Each row in the result contains the indices of a non-zero
  5451. element in :attr:`input`. The result is sorted lexicographically, with
  5452. the last index changing the fastest (C-style).
  5453. If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor
  5454. :attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of
  5455. non-zero elements in the :attr:`input` tensor.
  5456. .. note::
  5457. This function is similar to NumPy's `argwhere`.
  5458. When :attr:`input` is on CUDA, this function causes host-device synchronization.
  5459. Args:
  5460. {input}
  5461. Example::
  5462. >>> t = torch.tensor([1, 0, 1])
  5463. >>> torch.argwhere(t)
  5464. tensor([[0],
  5465. [2]])
  5466. >>> t = torch.tensor([[1, 0, 1], [0, 1, 1]])
  5467. >>> torch.argwhere(t)
  5468. tensor([[0, 0],
  5469. [0, 2],
  5470. [1, 1],
  5471. [1, 2]])
  5472. """,
  5473. )
  5474. add_docstr(
  5475. torch.mean,
  5476. r"""
  5477. mean(input, *, dtype=None) -> Tensor
  5478. Returns the mean value of all elements in the :attr:`input` tensor. Input must be floating point or complex.
  5479. Args:
  5480. input (Tensor):
  5481. the input tensor, either of floating point or complex dtype
  5482. Keyword args:
  5483. {dtype}
  5484. Example::
  5485. >>> a = torch.randn(1, 3)
  5486. >>> a
  5487. tensor([[ 0.2294, -0.5481, 1.3288]])
  5488. >>> torch.mean(a)
  5489. tensor(0.3367)
  5490. .. function:: mean(input, dim, keepdim=False, *, dtype=None, out=None) -> Tensor
  5491. :noindex:
  5492. Returns the mean value of each row of the :attr:`input` tensor in the given
  5493. dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
  5494. reduce over all of them.
  5495. {keepdim_details}
  5496. Args:
  5497. {input}
  5498. {dim}
  5499. {keepdim}
  5500. Keyword args:
  5501. {dtype}
  5502. {out}
  5503. .. seealso::
  5504. :func:`torch.nanmean` computes the mean value of `non-NaN` elements.
  5505. Example::
  5506. >>> a = torch.randn(4, 4)
  5507. >>> a
  5508. tensor([[-0.3841, 0.6320, 0.4254, -0.7384],
  5509. [-0.9644, 1.0131, -0.6549, -1.4279],
  5510. [-0.2951, -1.3350, -0.7694, 0.5600],
  5511. [ 1.0842, -0.9580, 0.3623, 0.2343]])
  5512. >>> torch.mean(a, 1)
  5513. tensor([-0.0163, -0.5085, -0.4599, 0.1807])
  5514. >>> torch.mean(a, 1, True)
  5515. tensor([[-0.0163],
  5516. [-0.5085],
  5517. [-0.4599],
  5518. [ 0.1807]])
  5519. """.format(
  5520. **multi_dim_common
  5521. ),
  5522. )
  5523. add_docstr(
  5524. torch.nanmean,
  5525. r"""
  5526. nanmean(input, dim=None, keepdim=False, *, dtype=None, out=None) -> Tensor
  5527. Computes the mean of all `non-NaN` elements along the specified dimensions.
  5528. This function is identical to :func:`torch.mean` when there are no `NaN` values
  5529. in the :attr:`input` tensor. In the presence of `NaN`, :func:`torch.mean` will
  5530. propagate the `NaN` to the output whereas :func:`torch.nanmean` will ignore the
  5531. `NaN` values (`torch.nanmean(a)` is equivalent to `torch.mean(a[~a.isnan()])`).
  5532. {keepdim_details}
  5533. Args:
  5534. {input}
  5535. {opt_dim}
  5536. {keepdim}
  5537. Keyword args:
  5538. {dtype}
  5539. {out}
  5540. .. seealso::
  5541. :func:`torch.mean` computes the mean value, propagating `NaN`.
  5542. Example::
  5543. >>> x = torch.tensor([[torch.nan, 1, 2], [1, 2, 3]])
  5544. >>> x.mean()
  5545. tensor(nan)
  5546. >>> x.nanmean()
  5547. tensor(1.8000)
  5548. >>> x.mean(dim=0)
  5549. tensor([ nan, 1.5000, 2.5000])
  5550. >>> x.nanmean(dim=0)
  5551. tensor([1.0000, 1.5000, 2.5000])
  5552. # If all elements in the reduced dimensions are NaN then the result is NaN
  5553. >>> torch.tensor([torch.nan]).nanmean()
  5554. tensor(nan)
  5555. """.format(
  5556. **multi_dim_common
  5557. ),
  5558. )
  5559. add_docstr(
  5560. torch.median,
  5561. r"""
  5562. median(input) -> Tensor
  5563. Returns the median of the values in :attr:`input`.
  5564. .. note::
  5565. The median is not unique for :attr:`input` tensors with an even number
  5566. of elements. In this case the lower of the two medians is returned. To
  5567. compute the mean of both medians, use :func:`torch.quantile` with ``q=0.5`` instead.
  5568. .. warning::
  5569. This function produces deterministic (sub)gradients unlike ``median(dim=0)``
  5570. Args:
  5571. {input}
  5572. Example::
  5573. >>> a = torch.randn(1, 3)
  5574. >>> a
  5575. tensor([[ 1.5219, -1.5212, 0.2202]])
  5576. >>> torch.median(a)
  5577. tensor(0.2202)
  5578. .. function:: median(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
  5579. :noindex:
  5580. Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input`
  5581. in the dimension :attr:`dim`, and ``indices`` contains the index of the median values found in the dimension :attr:`dim`.
  5582. By default, :attr:`dim` is the last dimension of the :attr:`input` tensor.
  5583. If :attr:`keepdim` is ``True``, the output tensors are of the same size
  5584. as :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
  5585. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
  5586. the outputs tensor having 1 fewer dimension than :attr:`input`.
  5587. .. note::
  5588. The median is not unique for :attr:`input` tensors with an even number
  5589. of elements in the dimension :attr:`dim`. In this case the lower of the
  5590. two medians is returned. To compute the mean of both medians in
  5591. :attr:`input`, use :func:`torch.quantile` with ``q=0.5`` instead.
  5592. .. warning::
  5593. ``indices`` does not necessarily contain the first occurrence of each
  5594. median value found, unless it is unique.
  5595. The exact implementation details are device-specific.
  5596. Do not expect the same result when run on CPU and GPU in general.
  5597. For the same reason do not expect the gradients to be deterministic.
  5598. Args:
  5599. {input}
  5600. {dim}
  5601. {keepdim}
  5602. Keyword args:
  5603. out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second
  5604. tensor, which must have dtype long, with their indices in the dimension
  5605. :attr:`dim` of :attr:`input`.
  5606. Example::
  5607. >>> a = torch.randn(4, 5)
  5608. >>> a
  5609. tensor([[ 0.2505, -0.3982, -0.9948, 0.3518, -1.3131],
  5610. [ 0.3180, -0.6993, 1.0436, 0.0438, 0.2270],
  5611. [-0.2751, 0.7303, 0.2192, 0.3321, 0.2488],
  5612. [ 1.0778, -1.9510, 0.7048, 0.4742, -0.7125]])
  5613. >>> torch.median(a, 1)
  5614. torch.return_types.median(values=tensor([-0.3982, 0.2270, 0.2488, 0.4742]), indices=tensor([1, 4, 4, 3]))
  5615. """.format(
  5616. **single_dim_common
  5617. ),
  5618. )
  5619. add_docstr(
  5620. torch.nanmedian,
  5621. r"""
  5622. nanmedian(input) -> Tensor
  5623. Returns the median of the values in :attr:`input`, ignoring ``NaN`` values.
  5624. This function is identical to :func:`torch.median` when there are no ``NaN`` values in :attr:`input`.
  5625. When :attr:`input` has one or more ``NaN`` values, :func:`torch.median` will always return ``NaN``,
  5626. while this function will return the median of the non-``NaN`` elements in :attr:`input`.
  5627. If all the elements in :attr:`input` are ``NaN`` it will also return ``NaN``.
  5628. Args:
  5629. {input}
  5630. Example::
  5631. >>> a = torch.tensor([1, float('nan'), 3, 2])
  5632. >>> a.median()
  5633. tensor(nan)
  5634. >>> a.nanmedian()
  5635. tensor(2.)
  5636. .. function:: nanmedian(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
  5637. :noindex:
  5638. Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input`
  5639. in the dimension :attr:`dim`, ignoring ``NaN`` values, and ``indices`` contains the index of the median values
  5640. found in the dimension :attr:`dim`.
  5641. This function is identical to :func:`torch.median` when there are no ``NaN`` values in a reduced row. When a reduced row has
  5642. one or more ``NaN`` values, :func:`torch.median` will always reduce it to ``NaN``, while this function will reduce it to the
  5643. median of the non-``NaN`` elements. If all the elements in a reduced row are ``NaN`` then it will be reduced to ``NaN``, too.
  5644. Args:
  5645. {input}
  5646. {dim}
  5647. {keepdim}
  5648. Keyword args:
  5649. out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second
  5650. tensor, which must have dtype long, with their indices in the dimension
  5651. :attr:`dim` of :attr:`input`.
  5652. Example::
  5653. >>> a = torch.tensor([[2, 3, 1], [float('nan'), 1, float('nan')]])
  5654. >>> a
  5655. tensor([[2., 3., 1.],
  5656. [nan, 1., nan]])
  5657. >>> a.median(0)
  5658. torch.return_types.median(values=tensor([nan, 1., nan]), indices=tensor([1, 1, 1]))
  5659. >>> a.nanmedian(0)
  5660. torch.return_types.nanmedian(values=tensor([2., 1., 1.]), indices=tensor([0, 1, 0]))
  5661. """.format(
  5662. **single_dim_common
  5663. ),
  5664. )
  5665. add_docstr(
  5666. torch.quantile,
  5667. r"""
  5668. quantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor
  5669. Computes the q-th quantiles of each row of the :attr:`input` tensor along the dimension :attr:`dim`.
  5670. To compute the quantile, we map q in [0, 1] to the range of indices [0, n] to find the location
  5671. of the quantile in the sorted input. If the quantile lies between two data points ``a < b`` with
  5672. indices ``i`` and ``j`` in the sorted order, result is computed according to the given
  5673. :attr:`interpolation` method as follows:
  5674. - ``linear``: ``a + (b - a) * fraction``, where ``fraction`` is the fractional part of the computed quantile index.
  5675. - ``lower``: ``a``.
  5676. - ``higher``: ``b``.
  5677. - ``nearest``: ``a`` or ``b``, whichever's index is closer to the computed quantile index (rounding down for .5 fractions).
  5678. - ``midpoint``: ``(a + b) / 2``.
  5679. If :attr:`q` is a 1D tensor, the first dimension of the output represents the quantiles and has size
  5680. equal to the size of :attr:`q`, the remaining dimensions are what remains from the reduction.
  5681. .. note::
  5682. By default :attr:`dim` is ``None`` resulting in the :attr:`input` tensor being flattened before computation.
  5683. Args:
  5684. {input}
  5685. q (float or Tensor): a scalar or 1D tensor of values in the range [0, 1].
  5686. {dim}
  5687. {keepdim}
  5688. Keyword arguments:
  5689. interpolation (str): interpolation method to use when the desired quantile lies between two data points.
  5690. Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``.
  5691. Default is ``linear``.
  5692. {out}
  5693. Example::
  5694. >>> a = torch.randn(2, 3)
  5695. >>> a
  5696. tensor([[ 0.0795, -1.2117, 0.9765],
  5697. [ 1.1707, 0.6706, 0.4884]])
  5698. >>> q = torch.tensor([0.25, 0.5, 0.75])
  5699. >>> torch.quantile(a, q, dim=1, keepdim=True)
  5700. tensor([[[-0.5661],
  5701. [ 0.5795]],
  5702. [[ 0.0795],
  5703. [ 0.6706]],
  5704. [[ 0.5280],
  5705. [ 0.9206]]])
  5706. >>> torch.quantile(a, q, dim=1, keepdim=True).shape
  5707. torch.Size([3, 2, 1])
  5708. >>> a = torch.arange(4.)
  5709. >>> a
  5710. tensor([0., 1., 2., 3.])
  5711. >>> torch.quantile(a, 0.6, interpolation='linear')
  5712. tensor(1.8000)
  5713. >>> torch.quantile(a, 0.6, interpolation='lower')
  5714. tensor(1.)
  5715. >>> torch.quantile(a, 0.6, interpolation='higher')
  5716. tensor(2.)
  5717. >>> torch.quantile(a, 0.6, interpolation='midpoint')
  5718. tensor(1.5000)
  5719. >>> torch.quantile(a, 0.6, interpolation='nearest')
  5720. tensor(2.)
  5721. >>> torch.quantile(a, 0.4, interpolation='nearest')
  5722. tensor(1.)
  5723. """.format(
  5724. **single_dim_common
  5725. ),
  5726. )
  5727. add_docstr(
  5728. torch.nanquantile,
  5729. r"""
  5730. nanquantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor
  5731. This is a variant of :func:`torch.quantile` that "ignores" ``NaN`` values,
  5732. computing the quantiles :attr:`q` as if ``NaN`` values in :attr:`input` did
  5733. not exist. If all values in a reduced row are ``NaN`` then the quantiles for
  5734. that reduction will be ``NaN``. See the documentation for :func:`torch.quantile`.
  5735. Args:
  5736. {input}
  5737. q (float or Tensor): a scalar or 1D tensor of quantile values in the range [0, 1]
  5738. {dim}
  5739. {keepdim}
  5740. Keyword arguments:
  5741. interpolation (str): interpolation method to use when the desired quantile lies between two data points.
  5742. Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``.
  5743. Default is ``linear``.
  5744. {out}
  5745. Example::
  5746. >>> t = torch.tensor([float('nan'), 1, 2])
  5747. >>> t.quantile(0.5)
  5748. tensor(nan)
  5749. >>> t.nanquantile(0.5)
  5750. tensor(1.5000)
  5751. >>> t = torch.tensor([[float('nan'), float('nan')], [1, 2]])
  5752. >>> t
  5753. tensor([[nan, nan],
  5754. [1., 2.]])
  5755. >>> t.nanquantile(0.5, dim=0)
  5756. tensor([1., 2.])
  5757. >>> t.nanquantile(0.5, dim=1)
  5758. tensor([ nan, 1.5000])
  5759. """.format(
  5760. **single_dim_common
  5761. ),
  5762. )
  5763. add_docstr(
  5764. torch.min,
  5765. r"""
  5766. min(input) -> Tensor
  5767. Returns the minimum value of all elements in the :attr:`input` tensor.
  5768. .. warning::
  5769. This function produces deterministic (sub)gradients unlike ``min(dim=0)``
  5770. Args:
  5771. {input}
  5772. Example::
  5773. >>> a = torch.randn(1, 3)
  5774. >>> a
  5775. tensor([[ 0.6750, 1.0857, 1.7197]])
  5776. >>> torch.min(a)
  5777. tensor(0.6750)
  5778. .. function:: min(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
  5779. :noindex:
  5780. Returns a namedtuple ``(values, indices)`` where ``values`` is the minimum
  5781. value of each row of the :attr:`input` tensor in the given dimension
  5782. :attr:`dim`. And ``indices`` is the index location of each minimum value found
  5783. (argmin).
  5784. If :attr:`keepdim` is ``True``, the output tensors are of the same size as
  5785. :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
  5786. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
  5787. the output tensors having 1 fewer dimension than :attr:`input`.
  5788. .. note:: If there are multiple minimal values in a reduced row then
  5789. the indices of the first minimal value are returned.
  5790. Args:
  5791. {input}
  5792. {dim}
  5793. {keepdim}
  5794. Keyword args:
  5795. out (tuple, optional): the tuple of two output tensors (min, min_indices)
  5796. Example::
  5797. >>> a = torch.randn(4, 4)
  5798. >>> a
  5799. tensor([[-0.6248, 1.1334, -1.1899, -0.2803],
  5800. [-1.4644, -0.2635, -0.3651, 0.6134],
  5801. [ 0.2457, 0.0384, 1.0128, 0.7015],
  5802. [-0.1153, 2.9849, 2.1458, 0.5788]])
  5803. >>> torch.min(a, 1)
  5804. torch.return_types.min(values=tensor([-1.1899, -1.4644, 0.0384, -0.1153]), indices=tensor([2, 0, 1, 0]))
  5805. .. function:: min(input, other, *, out=None) -> Tensor
  5806. :noindex:
  5807. See :func:`torch.minimum`.
  5808. """.format(
  5809. **single_dim_common
  5810. ),
  5811. )
  5812. add_docstr(
  5813. torch.minimum,
  5814. r"""
  5815. minimum(input, other, *, out=None) -> Tensor
  5816. Computes the element-wise minimum of :attr:`input` and :attr:`other`.
  5817. .. note::
  5818. If one of the elements being compared is a NaN, then that element is returned.
  5819. :func:`minimum` is not supported for tensors with complex dtypes.
  5820. Args:
  5821. {input}
  5822. other (Tensor): the second input tensor
  5823. Keyword args:
  5824. {out}
  5825. Example::
  5826. >>> a = torch.tensor((1, 2, -1))
  5827. >>> b = torch.tensor((3, 0, 4))
  5828. >>> torch.minimum(a, b)
  5829. tensor([1, 0, -1])
  5830. """.format(
  5831. **common_args
  5832. ),
  5833. )
  5834. add_docstr(
  5835. torch.fmin,
  5836. r"""
  5837. fmin(input, other, *, out=None) -> Tensor
  5838. Computes the element-wise minimum of :attr:`input` and :attr:`other`.
  5839. This is like :func:`torch.minimum` except it handles NaNs differently:
  5840. if exactly one of the two elements being compared is a NaN then the non-NaN element is taken as the minimum.
  5841. Only if both elements are NaN is NaN propagated.
  5842. This function is a wrapper around C++'s ``std::fmin`` and is similar to NumPy's ``fmin`` function.
  5843. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
  5844. :ref:`type promotion <type-promotion-doc>`, and integer and floating-point inputs.
  5845. Args:
  5846. {input}
  5847. other (Tensor): the second input tensor
  5848. Keyword args:
  5849. {out}
  5850. Example::
  5851. >>> a = torch.tensor([2.2, float('nan'), 2.1, float('nan')])
  5852. >>> b = torch.tensor([-9.3, 0.1, float('nan'), float('nan')])
  5853. >>> torch.fmin(a, b)
  5854. tensor([-9.3000, 0.1000, 2.1000, nan])
  5855. """.format(
  5856. **common_args
  5857. ),
  5858. )
  5859. add_docstr(
  5860. torch.amin,
  5861. r"""
  5862. amin(input, dim, keepdim=False, *, out=None) -> Tensor
  5863. Returns the minimum value of each slice of the :attr:`input` tensor in the given
  5864. dimension(s) :attr:`dim`.
  5865. .. note::
  5866. The difference between ``max``/``min`` and ``amax``/``amin`` is:
  5867. - ``amax``/``amin`` supports reducing on multiple dimensions,
  5868. - ``amax``/``amin`` does not return indices,
  5869. - ``amax``/``amin`` evenly distributes gradient between equal values,
  5870. while ``max(dim)``/``min(dim)`` propagates gradient only to a single
  5871. index in the source tensor.
  5872. {keepdim_details}
  5873. Args:
  5874. {input}
  5875. {dim}
  5876. {keepdim}
  5877. Keyword args:
  5878. {out}
  5879. Example::
  5880. >>> a = torch.randn(4, 4)
  5881. >>> a
  5882. tensor([[ 0.6451, -0.4866, 0.2987, -1.3312],
  5883. [-0.5744, 1.2980, 1.8397, -0.2713],
  5884. [ 0.9128, 0.9214, -1.7268, -0.2995],
  5885. [ 0.9023, 0.4853, 0.9075, -1.6165]])
  5886. >>> torch.amin(a, 1)
  5887. tensor([-1.3312, -0.5744, -1.7268, -1.6165])
  5888. """.format(
  5889. **multi_dim_common
  5890. ),
  5891. )
  5892. add_docstr(
  5893. torch.aminmax,
  5894. r"""
  5895. aminmax(input, *, dim=None, keepdim=False, out=None) -> (Tensor min, Tensor max)
  5896. Computes the minimum and maximum values of the :attr:`input` tensor.
  5897. Args:
  5898. input (Tensor):
  5899. The input tensor
  5900. Keyword Args:
  5901. dim (Optional[int]):
  5902. The dimension along which to compute the values. If `None`,
  5903. computes the values over the entire :attr:`input` tensor.
  5904. Default is `None`.
  5905. keepdim (bool):
  5906. If `True`, the reduced dimensions will be kept in the output
  5907. tensor as dimensions with size 1 for broadcasting, otherwise
  5908. they will be removed, as if calling (:func:`torch.squeeze`).
  5909. Default is `False`.
  5910. out (Optional[Tuple[Tensor, Tensor]]):
  5911. Optional tensors on which to write the result. Must have the same
  5912. shape and dtype as the expected output.
  5913. Default is `None`.
  5914. Returns:
  5915. A named tuple `(min, max)` containing the minimum and maximum values.
  5916. Raises:
  5917. RuntimeError
  5918. If any of the dimensions to compute the values over has size 0.
  5919. .. note::
  5920. NaN values are propagated to the output if at least one value is NaN.
  5921. .. seealso::
  5922. :func:`torch.amin` computes just the minimum value
  5923. :func:`torch.amax` computes just the maximum value
  5924. Example::
  5925. >>> torch.aminmax(torch.tensor([1, -3, 5]))
  5926. torch.return_types.aminmax(
  5927. min=tensor(-3),
  5928. max=tensor(5))
  5929. >>> # aminmax propagates NaNs
  5930. >>> torch.aminmax(torch.tensor([1, -3, 5, torch.nan]))
  5931. torch.return_types.aminmax(
  5932. min=tensor(nan),
  5933. max=tensor(nan))
  5934. >>> t = torch.arange(10).view(2, 5)
  5935. >>> t
  5936. tensor([[0, 1, 2, 3, 4],
  5937. [5, 6, 7, 8, 9]])
  5938. >>> t.aminmax(dim=0, keepdim=True)
  5939. torch.return_types.aminmax(
  5940. min=tensor([[0, 1, 2, 3, 4]]),
  5941. max=tensor([[5, 6, 7, 8, 9]]))
  5942. """,
  5943. )
  5944. add_docstr(
  5945. torch.argmin,
  5946. r"""
  5947. argmin(input, dim=None, keepdim=False) -> LongTensor
  5948. Returns the indices of the minimum value(s) of the flattened tensor or along a dimension
  5949. This is the second value returned by :meth:`torch.min`. See its
  5950. documentation for the exact semantics of this method.
  5951. .. note:: If there are multiple minimal values then the indices of the first minimal value are returned.
  5952. Args:
  5953. {input}
  5954. {dim} If ``None``, the argmin of the flattened input is returned.
  5955. {keepdim}
  5956. Example::
  5957. >>> a = torch.randn(4, 4)
  5958. >>> a
  5959. tensor([[ 0.1139, 0.2254, -0.1381, 0.3687],
  5960. [ 1.0100, -1.1975, -0.0102, -0.4732],
  5961. [-0.9240, 0.1207, -0.7506, -1.0213],
  5962. [ 1.7809, -1.2960, 0.9384, 0.1438]])
  5963. >>> torch.argmin(a)
  5964. tensor(13)
  5965. >>> torch.argmin(a, dim=1)
  5966. tensor([ 2, 1, 3, 1])
  5967. >>> torch.argmin(a, dim=1, keepdim=True)
  5968. tensor([[2],
  5969. [1],
  5970. [3],
  5971. [1]])
  5972. """.format(
  5973. **single_dim_common
  5974. ),
  5975. )
  5976. add_docstr(
  5977. torch.mm,
  5978. r"""
  5979. mm(input, mat2, *, out=None) -> Tensor
  5980. Performs a matrix multiplication of the matrices :attr:`input` and :attr:`mat2`.
  5981. If :attr:`input` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
  5982. :math:`(m \times p)` tensor, :attr:`out` will be a :math:`(n \times p)` tensor.
  5983. .. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
  5984. For broadcasting matrix products, see :func:`torch.matmul`.
  5985. Supports strided and sparse 2-D tensors as inputs, autograd with
  5986. respect to strided inputs.
  5987. This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`.
  5988. If :attr:`out` is provided its layout will be used. Otherwise, the result
  5989. layout will be deduced from that of :attr:`input`.
  5990. {sparse_beta_warning}
  5991. {tf32_note}
  5992. {rocm_fp16_note}
  5993. Args:
  5994. input (Tensor): the first matrix to be matrix multiplied
  5995. mat2 (Tensor): the second matrix to be matrix multiplied
  5996. Keyword args:
  5997. {out}
  5998. Example::
  5999. >>> mat1 = torch.randn(2, 3)
  6000. >>> mat2 = torch.randn(3, 3)
  6001. >>> torch.mm(mat1, mat2)
  6002. tensor([[ 0.4851, 0.5037, -0.3633],
  6003. [-0.0760, -3.6705, 2.4784]])
  6004. """.format(
  6005. **common_args, **tf32_notes, **rocm_fp16_notes, **sparse_support_notes
  6006. ),
  6007. )
  6008. add_docstr(
  6009. torch.hspmm,
  6010. r"""
  6011. hspmm(mat1, mat2, *, out=None) -> Tensor
  6012. Performs a matrix multiplication of a :ref:`sparse COO matrix
  6013. <sparse-coo-docs>` :attr:`mat1` and a strided matrix :attr:`mat2`. The
  6014. result is a (1 + 1)-dimensional :ref:`hybrid COO matrix
  6015. <sparse-hybrid-coo-docs>`.
  6016. Args:
  6017. mat1 (Tensor): the first sparse matrix to be matrix multiplied
  6018. mat2 (Tensor): the second strided matrix to be matrix multiplied
  6019. Keyword args:
  6020. {out}
  6021. """.format(
  6022. **common_args
  6023. ),
  6024. )
  6025. add_docstr(
  6026. torch.matmul,
  6027. r"""
  6028. matmul(input, other, *, out=None) -> Tensor
  6029. Matrix product of two tensors.
  6030. The behavior depends on the dimensionality of the tensors as follows:
  6031. - If both tensors are 1-dimensional, the dot product (scalar) is returned.
  6032. - If both arguments are 2-dimensional, the matrix-matrix product is returned.
  6033. - If the first argument is 1-dimensional and the second argument is 2-dimensional,
  6034. a 1 is prepended to its dimension for the purpose of the matrix multiply.
  6035. After the matrix multiply, the prepended dimension is removed.
  6036. - If the first argument is 2-dimensional and the second argument is 1-dimensional,
  6037. the matrix-vector product is returned.
  6038. - If both arguments are at least 1-dimensional and at least one argument is
  6039. N-dimensional (where N > 2), then a batched matrix multiply is returned. If the first
  6040. argument is 1-dimensional, a 1 is prepended to its dimension for the purpose of the
  6041. batched matrix multiply and removed after. If the second argument is 1-dimensional, a
  6042. 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after.
  6043. The non-matrix (i.e. batch) dimensions are :ref:`broadcasted <broadcasting-semantics>` (and thus
  6044. must be broadcastable). For example, if :attr:`input` is a
  6045. :math:`(j \times 1 \times n \times n)` tensor and :attr:`other` is a :math:`(k \times n \times n)`
  6046. tensor, :attr:`out` will be a :math:`(j \times k \times n \times n)` tensor.
  6047. Note that the broadcasting logic only looks at the batch dimensions when determining if the inputs
  6048. are broadcastable, and not the matrix dimensions. For example, if :attr:`input` is a
  6049. :math:`(j \times 1 \times n \times m)` tensor and :attr:`other` is a :math:`(k \times m \times p)`
  6050. tensor, these inputs are valid for broadcasting even though the final two dimensions (i.e. the
  6051. matrix dimensions) are different. :attr:`out` will be a :math:`(j \times k \times n \times p)` tensor.
  6052. This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`. In particular the
  6053. matrix-matrix (both arguments 2-dimensional) supports sparse arguments with the same restrictions
  6054. as :func:`torch.mm`
  6055. {sparse_beta_warning}
  6056. {tf32_note}
  6057. {rocm_fp16_note}
  6058. .. note::
  6059. The 1-dimensional dot product version of this function does not support an :attr:`out` parameter.
  6060. Arguments:
  6061. input (Tensor): the first tensor to be multiplied
  6062. other (Tensor): the second tensor to be multiplied
  6063. Keyword args:
  6064. {out}
  6065. Example::
  6066. >>> # vector x vector
  6067. >>> tensor1 = torch.randn(3)
  6068. >>> tensor2 = torch.randn(3)
  6069. >>> torch.matmul(tensor1, tensor2).size()
  6070. torch.Size([])
  6071. >>> # matrix x vector
  6072. >>> tensor1 = torch.randn(3, 4)
  6073. >>> tensor2 = torch.randn(4)
  6074. >>> torch.matmul(tensor1, tensor2).size()
  6075. torch.Size([3])
  6076. >>> # batched matrix x broadcasted vector
  6077. >>> tensor1 = torch.randn(10, 3, 4)
  6078. >>> tensor2 = torch.randn(4)
  6079. >>> torch.matmul(tensor1, tensor2).size()
  6080. torch.Size([10, 3])
  6081. >>> # batched matrix x batched matrix
  6082. >>> tensor1 = torch.randn(10, 3, 4)
  6083. >>> tensor2 = torch.randn(10, 4, 5)
  6084. >>> torch.matmul(tensor1, tensor2).size()
  6085. torch.Size([10, 3, 5])
  6086. >>> # batched matrix x broadcasted matrix
  6087. >>> tensor1 = torch.randn(10, 3, 4)
  6088. >>> tensor2 = torch.randn(4, 5)
  6089. >>> torch.matmul(tensor1, tensor2).size()
  6090. torch.Size([10, 3, 5])
  6091. """.format(
  6092. **common_args, **tf32_notes, **rocm_fp16_notes, **sparse_support_notes
  6093. ),
  6094. )
  6095. add_docstr(
  6096. torch.mode,
  6097. r"""
  6098. mode(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
  6099. Returns a namedtuple ``(values, indices)`` where ``values`` is the mode
  6100. value of each row of the :attr:`input` tensor in the given dimension
  6101. :attr:`dim`, i.e. a value which appears most often
  6102. in that row, and ``indices`` is the index location of each mode value found.
  6103. By default, :attr:`dim` is the last dimension of the :attr:`input` tensor.
  6104. If :attr:`keepdim` is ``True``, the output tensors are of the same size as
  6105. :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
  6106. Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting
  6107. in the output tensors having 1 fewer dimension than :attr:`input`.
  6108. .. note:: This function is not defined for ``torch.cuda.Tensor`` yet.
  6109. Args:
  6110. {input}
  6111. {dim}
  6112. {keepdim}
  6113. Keyword args:
  6114. out (tuple, optional): the result tuple of two output tensors (values, indices)
  6115. Example::
  6116. >>> b = torch.tensor(
  6117. [[0, 0, 0, 2, 0, 0, 2],
  6118. [0, 3, 0, 0, 2, 0, 1],
  6119. [2, 2, 2, 0, 0, 0, 3],
  6120. [2, 2, 3, 0, 1, 1, 0],
  6121. [1, 1, 0, 0, 2, 0, 2]])
  6122. >>> torch.mode(b, 0)
  6123. torch.return_types.mode(
  6124. values=tensor([0, 2, 0, 0, 0, 0, 2]),
  6125. indices=tensor([1, 3, 4, 4, 2, 4, 4]))
  6126. """.format(
  6127. **single_dim_common
  6128. ),
  6129. )
  6130. add_docstr(
  6131. torch.mul,
  6132. r"""
  6133. mul(input, other, *, out=None) -> Tensor
  6134. Multiplies :attr:`input` by :attr:`other`.
  6135. .. math::
  6136. \text{out}_i = \text{input}_i \times \text{other}_i
  6137. """
  6138. + r"""
  6139. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
  6140. :ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
  6141. Args:
  6142. {input}
  6143. other (Tensor or Number) - the tensor or number to multiply input by.
  6144. Keyword args:
  6145. {out}
  6146. Examples::
  6147. >>> a = torch.randn(3)
  6148. >>> a
  6149. tensor([ 0.2015, -0.4255, 2.6087])
  6150. >>> torch.mul(a, 100)
  6151. tensor([ 20.1494, -42.5491, 260.8663])
  6152. >>> b = torch.randn(4, 1)
  6153. >>> b
  6154. tensor([[ 1.1207],
  6155. [-0.3137],
  6156. [ 0.0700],
  6157. [ 0.8378]])
  6158. >>> c = torch.randn(1, 4)
  6159. >>> c
  6160. tensor([[ 0.5146, 0.1216, -0.5244, 2.2382]])
  6161. >>> torch.mul(b, c)
  6162. tensor([[ 0.5767, 0.1363, -0.5877, 2.5083],
  6163. [-0.1614, -0.0382, 0.1645, -0.7021],
  6164. [ 0.0360, 0.0085, -0.0367, 0.1567],
  6165. [ 0.4312, 0.1019, -0.4394, 1.8753]])
  6166. """.format(
  6167. **common_args
  6168. ),
  6169. )
  6170. add_docstr(
  6171. torch.multiply,
  6172. r"""
  6173. multiply(input, other, *, out=None)
  6174. Alias for :func:`torch.mul`.
  6175. """,
  6176. )
  6177. add_docstr(
  6178. torch.multinomial,
  6179. r"""
  6180. multinomial(input, num_samples, replacement=False, *, generator=None, out=None) -> LongTensor
  6181. Returns a tensor where each row contains :attr:`num_samples` indices sampled
  6182. from the multinomial (a stricter definition would be multivariate,
  6183. refer to torch.distributions.multinomial.Multinomial for more details)
  6184. probability distribution located in the corresponding row
  6185. of tensor :attr:`input`.
  6186. .. note::
  6187. The rows of :attr:`input` do not need to sum to one (in which case we use
  6188. the values as weights), but must be non-negative, finite and have
  6189. a non-zero sum.
  6190. Indices are ordered from left to right according to when each was sampled
  6191. (first samples are placed in first column).
  6192. If :attr:`input` is a vector, :attr:`out` is a vector of size :attr:`num_samples`.
  6193. If :attr:`input` is a matrix with `m` rows, :attr:`out` is an matrix of shape
  6194. :math:`(m \times \text{{num\_samples}})`.
  6195. If replacement is ``True``, samples are drawn with replacement.
  6196. If not, they are drawn without replacement, which means that when a
  6197. sample index is drawn for a row, it cannot be drawn again for that row.
  6198. .. note::
  6199. When drawn without replacement, :attr:`num_samples` must be lower than
  6200. number of non-zero elements in :attr:`input` (or the min number of non-zero
  6201. elements in each row of :attr:`input` if it is a matrix).
  6202. Args:
  6203. input (Tensor): the input tensor containing probabilities
  6204. num_samples (int): number of samples to draw
  6205. replacement (bool, optional): whether to draw with replacement or not
  6206. Keyword args:
  6207. {generator}
  6208. {out}
  6209. Example::
  6210. >>> weights = torch.tensor([0, 10, 3, 0], dtype=torch.float) # create a tensor of weights
  6211. >>> torch.multinomial(weights, 2)
  6212. tensor([1, 2])
  6213. >>> torch.multinomial(weights, 5) # ERROR!
  6214. RuntimeError: cannot sample n_sample > prob_dist.size(-1) samples without replacement
  6215. >>> torch.multinomial(weights, 4, replacement=True)
  6216. tensor([ 2, 1, 1, 1])
  6217. """.format(
  6218. **common_args
  6219. ),
  6220. )
  6221. add_docstr(
  6222. torch.mv,
  6223. r"""
  6224. mv(input, vec, *, out=None) -> Tensor
  6225. Performs a matrix-vector product of the matrix :attr:`input` and the vector
  6226. :attr:`vec`.
  6227. If :attr:`input` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
  6228. size :math:`m`, :attr:`out` will be 1-D of size :math:`n`.
  6229. .. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
  6230. Args:
  6231. input (Tensor): matrix to be multiplied
  6232. vec (Tensor): vector to be multiplied
  6233. Keyword args:
  6234. {out}
  6235. Example::
  6236. >>> mat = torch.randn(2, 3)
  6237. >>> vec = torch.randn(3)
  6238. >>> torch.mv(mat, vec)
  6239. tensor([ 1.0404, -0.6361])
  6240. """.format(
  6241. **common_args
  6242. ),
  6243. )
  6244. add_docstr(
  6245. torch.mvlgamma,
  6246. r"""
  6247. mvlgamma(input, p, *, out=None) -> Tensor
  6248. Alias for :func:`torch.special.multigammaln`.
  6249. """,
  6250. )
  6251. add_docstr(
  6252. torch.movedim,
  6253. r"""
  6254. movedim(input, source, destination) -> Tensor
  6255. Moves the dimension(s) of :attr:`input` at the position(s) in :attr:`source`
  6256. to the position(s) in :attr:`destination`.
  6257. Other dimensions of :attr:`input` that are not explicitly moved remain in
  6258. their original order and appear at the positions not specified in :attr:`destination`.
  6259. Args:
  6260. {input}
  6261. source (int or tuple of ints): Original positions of the dims to move. These must be unique.
  6262. destination (int or tuple of ints): Destination positions for each of the original dims. These must also be unique.
  6263. Examples::
  6264. >>> t = torch.randn(3,2,1)
  6265. >>> t
  6266. tensor([[[-0.3362],
  6267. [-0.8437]],
  6268. [[-0.9627],
  6269. [ 0.1727]],
  6270. [[ 0.5173],
  6271. [-0.1398]]])
  6272. >>> torch.movedim(t, 1, 0).shape
  6273. torch.Size([2, 3, 1])
  6274. >>> torch.movedim(t, 1, 0)
  6275. tensor([[[-0.3362],
  6276. [-0.9627],
  6277. [ 0.5173]],
  6278. [[-0.8437],
  6279. [ 0.1727],
  6280. [-0.1398]]])
  6281. >>> torch.movedim(t, (1, 2), (0, 1)).shape
  6282. torch.Size([2, 1, 3])
  6283. >>> torch.movedim(t, (1, 2), (0, 1))
  6284. tensor([[[-0.3362, -0.9627, 0.5173]],
  6285. [[-0.8437, 0.1727, -0.1398]]])
  6286. """.format(
  6287. **common_args
  6288. ),
  6289. )
  6290. add_docstr(
  6291. torch.moveaxis,
  6292. r"""
  6293. moveaxis(input, source, destination) -> Tensor
  6294. Alias for :func:`torch.movedim`.
  6295. This function is equivalent to NumPy's moveaxis function.
  6296. Examples::
  6297. >>> t = torch.randn(3,2,1)
  6298. >>> t
  6299. tensor([[[-0.3362],
  6300. [-0.8437]],
  6301. [[-0.9627],
  6302. [ 0.1727]],
  6303. [[ 0.5173],
  6304. [-0.1398]]])
  6305. >>> torch.moveaxis(t, 1, 0).shape
  6306. torch.Size([2, 3, 1])
  6307. >>> torch.moveaxis(t, 1, 0)
  6308. tensor([[[-0.3362],
  6309. [-0.9627],
  6310. [ 0.5173]],
  6311. [[-0.8437],
  6312. [ 0.1727],
  6313. [-0.1398]]])
  6314. >>> torch.moveaxis(t, (1, 2), (0, 1)).shape
  6315. torch.Size([2, 1, 3])
  6316. >>> torch.moveaxis(t, (1, 2), (0, 1))
  6317. tensor([[[-0.3362, -0.9627, 0.5173]],
  6318. [[-0.8437, 0.1727, -0.1398]]])
  6319. """.format(
  6320. **common_args
  6321. ),
  6322. )
  6323. add_docstr(
  6324. torch.swapdims,
  6325. r"""
  6326. swapdims(input, dim0, dim1) -> Tensor
  6327. Alias for :func:`torch.transpose`.
  6328. This function is equivalent to NumPy's swapaxes function.
  6329. Examples::
  6330. >>> x = torch.tensor([[[0,1],[2,3]],[[4,5],[6,7]]])
  6331. >>> x
  6332. tensor([[[0, 1],
  6333. [2, 3]],
  6334. [[4, 5],
  6335. [6, 7]]])
  6336. >>> torch.swapdims(x, 0, 1)
  6337. tensor([[[0, 1],
  6338. [4, 5]],
  6339. [[2, 3],
  6340. [6, 7]]])
  6341. >>> torch.swapdims(x, 0, 2)
  6342. tensor([[[0, 4],
  6343. [2, 6]],
  6344. [[1, 5],
  6345. [3, 7]]])
  6346. """.format(
  6347. **common_args
  6348. ),
  6349. )
  6350. add_docstr(
  6351. torch.swapaxes,
  6352. r"""
  6353. swapaxes(input, axis0, axis1) -> Tensor
  6354. Alias for :func:`torch.transpose`.
  6355. This function is equivalent to NumPy's swapaxes function.
  6356. Examples::
  6357. >>> x = torch.tensor([[[0,1],[2,3]],[[4,5],[6,7]]])
  6358. >>> x
  6359. tensor([[[0, 1],
  6360. [2, 3]],
  6361. [[4, 5],
  6362. [6, 7]]])
  6363. >>> torch.swapaxes(x, 0, 1)
  6364. tensor([[[0, 1],
  6365. [4, 5]],
  6366. [[2, 3],
  6367. [6, 7]]])
  6368. >>> torch.swapaxes(x, 0, 2)
  6369. tensor([[[0, 4],
  6370. [2, 6]],
  6371. [[1, 5],
  6372. [3, 7]]])
  6373. """.format(
  6374. **common_args
  6375. ),
  6376. )
  6377. add_docstr(
  6378. torch.narrow,
  6379. r"""
  6380. narrow(input, dim, start, length) -> Tensor
  6381. Returns a new tensor that is a narrowed version of :attr:`input` tensor. The
  6382. dimension :attr:`dim` is input from :attr:`start` to ``start + length``. The
  6383. returned tensor and :attr:`input` tensor share the same underlying storage.
  6384. Args:
  6385. input (Tensor): the tensor to narrow
  6386. dim (int): the dimension along which to narrow
  6387. start (int or Tensor): index of the element to start the narrowed dimension
  6388. from. Can be negative, which means indexing from the end of `dim`. If
  6389. `Tensor`, it must be an 0-dim integral `Tensor` (bools not allowed)
  6390. length (int): length of the narrowed dimension, must be weakly positive
  6391. Example::
  6392. >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
  6393. >>> torch.narrow(x, 0, 0, 2)
  6394. tensor([[ 1, 2, 3],
  6395. [ 4, 5, 6]])
  6396. >>> torch.narrow(x, 1, 1, 2)
  6397. tensor([[ 2, 3],
  6398. [ 5, 6],
  6399. [ 8, 9]])
  6400. >>> torch.narrow(x, -1, torch.tensor(-1), 1)
  6401. tensor([[3],
  6402. [6],
  6403. [9]])
  6404. """,
  6405. )
  6406. add_docstr(
  6407. torch.narrow_copy,
  6408. r"""
  6409. narrow_copy(input, dim, start, length, *, out=None) -> Tensor
  6410. Same as :meth:`Tensor.narrow` except this returns a copy rather
  6411. than shared storage. This is primarily for sparse tensors, which
  6412. do not have a shared-storage narrow method.
  6413. Args:
  6414. input (Tensor): the tensor to narrow
  6415. dim (int): the dimension along which to narrow
  6416. start (int): index of the element to start the narrowed dimension from. Can
  6417. be negative, which means indexing from the end of `dim`
  6418. length (int): length of the narrowed dimension, must be weakly positive
  6419. Keyword args:
  6420. {out}
  6421. Example::
  6422. >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
  6423. >>> torch.narrow_copy(x, 0, 0, 2)
  6424. tensor([[ 1, 2, 3],
  6425. [ 4, 5, 6]])
  6426. >>> torch.narrow_copy(x, 1, 1, 2)
  6427. tensor([[ 2, 3],
  6428. [ 5, 6],
  6429. [ 8, 9]])
  6430. >>> s = torch.arange(16).reshape(2, 2, 2, 2).to_sparse(2)
  6431. >>> torch.narrow_copy(s, 0, 0, 1)
  6432. tensor(indices=tensor([[0, 0],
  6433. [0, 1]]),
  6434. values=tensor([[[0, 1],
  6435. [2, 3]],
  6436. [[4, 5],
  6437. [6, 7]]]),
  6438. size=(1, 2, 2, 2), nnz=2, layout=torch.sparse_coo)
  6439. .. seealso::
  6440. :func:`torch.narrow` for a non copy variant
  6441. """.format(
  6442. **common_args
  6443. ),
  6444. )
  6445. add_docstr(
  6446. torch.nan_to_num,
  6447. r"""
  6448. nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None) -> Tensor
  6449. Replaces :literal:`NaN`, positive infinity, and negative infinity values in :attr:`input`
  6450. with the values specified by :attr:`nan`, :attr:`posinf`, and :attr:`neginf`, respectively.
  6451. By default, :literal:`NaN`\ s are replaced with zero, positive infinity is replaced with the
  6452. greatest finite value representable by :attr:`input`'s dtype, and negative infinity
  6453. is replaced with the least finite value representable by :attr:`input`'s dtype.
  6454. Args:
  6455. {input}
  6456. nan (Number, optional): the value to replace :literal:`NaN`\s with. Default is zero.
  6457. posinf (Number, optional): if a Number, the value to replace positive infinity values with.
  6458. If None, positive infinity values are replaced with the greatest finite value representable by :attr:`input`'s dtype.
  6459. Default is None.
  6460. neginf (Number, optional): if a Number, the value to replace negative infinity values with.
  6461. If None, negative infinity values are replaced with the lowest finite value representable by :attr:`input`'s dtype.
  6462. Default is None.
  6463. Keyword args:
  6464. {out}
  6465. Example::
  6466. >>> x = torch.tensor([float('nan'), float('inf'), -float('inf'), 3.14])
  6467. >>> torch.nan_to_num(x)
  6468. tensor([ 0.0000e+00, 3.4028e+38, -3.4028e+38, 3.1400e+00])
  6469. >>> torch.nan_to_num(x, nan=2.0)
  6470. tensor([ 2.0000e+00, 3.4028e+38, -3.4028e+38, 3.1400e+00])
  6471. >>> torch.nan_to_num(x, nan=2.0, posinf=1.0)
  6472. tensor([ 2.0000e+00, 1.0000e+00, -3.4028e+38, 3.1400e+00])
  6473. """.format(
  6474. **common_args
  6475. ),
  6476. )
  6477. add_docstr(
  6478. torch.ne,
  6479. r"""
  6480. ne(input, other, *, out=None) -> Tensor
  6481. Computes :math:`\text{input} \neq \text{other}` element-wise.
  6482. """
  6483. + r"""
  6484. The second argument can be a number or a tensor whose shape is
  6485. :ref:`broadcastable <broadcasting-semantics>` with the first argument.
  6486. Args:
  6487. input (Tensor): the tensor to compare
  6488. other (Tensor or float): the tensor or value to compare
  6489. Keyword args:
  6490. {out}
  6491. Returns:
  6492. A boolean tensor that is True where :attr:`input` is not equal to :attr:`other` and False elsewhere
  6493. Example::
  6494. >>> torch.ne(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
  6495. tensor([[False, True], [True, False]])
  6496. """.format(
  6497. **common_args
  6498. ),
  6499. )
  6500. add_docstr(
  6501. torch.not_equal,
  6502. r"""
  6503. not_equal(input, other, *, out=None) -> Tensor
  6504. Alias for :func:`torch.ne`.
  6505. """,
  6506. )
  6507. add_docstr(
  6508. torch.neg,
  6509. r"""
  6510. neg(input, *, out=None) -> Tensor
  6511. Returns a new tensor with the negative of the elements of :attr:`input`.
  6512. .. math::
  6513. \text{out} = -1 \times \text{input}
  6514. """
  6515. + r"""
  6516. Args:
  6517. {input}
  6518. Keyword args:
  6519. {out}
  6520. Example::
  6521. >>> a = torch.randn(5)
  6522. >>> a
  6523. tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940])
  6524. >>> torch.neg(a)
  6525. tensor([-0.0090, 0.2262, 0.0682, 0.2866, -0.3940])
  6526. """.format(
  6527. **common_args
  6528. ),
  6529. )
  6530. add_docstr(
  6531. torch.negative,
  6532. r"""
  6533. negative(input, *, out=None) -> Tensor
  6534. Alias for :func:`torch.neg`
  6535. """,
  6536. )
  6537. add_docstr(
  6538. torch.nextafter,
  6539. r"""
  6540. nextafter(input, other, *, out=None) -> Tensor
  6541. Return the next floating-point value after :attr:`input` towards :attr:`other`, elementwise.
  6542. The shapes of ``input`` and ``other`` must be
  6543. :ref:`broadcastable <broadcasting-semantics>`.
  6544. Args:
  6545. input (Tensor): the first input tensor
  6546. other (Tensor): the second input tensor
  6547. Keyword args:
  6548. {out}
  6549. Example::
  6550. >>> eps = torch.finfo(torch.float32).eps
  6551. >>> torch.nextafter(torch.tensor([1.0, 2.0]), torch.tensor([2.0, 1.0])) == torch.tensor([eps + 1, 2 - eps])
  6552. tensor([True, True])
  6553. """.format(
  6554. **common_args
  6555. ),
  6556. )
  6557. add_docstr(
  6558. torch.nonzero,
  6559. r"""
  6560. nonzero(input, *, out=None, as_tuple=False) -> LongTensor or tuple of LongTensors
  6561. .. note::
  6562. :func:`torch.nonzero(..., as_tuple=False) <torch.nonzero>` (default) returns a
  6563. 2-D tensor where each row is the index for a nonzero value.
  6564. :func:`torch.nonzero(..., as_tuple=True) <torch.nonzero>` returns a tuple of 1-D
  6565. index tensors, allowing for advanced indexing, so ``x[x.nonzero(as_tuple=True)]``
  6566. gives all nonzero values of tensor ``x``. Of the returned tuple, each index tensor
  6567. contains nonzero indices for a certain dimension.
  6568. See below for more details on the two behaviors.
  6569. When :attr:`input` is on CUDA, :func:`torch.nonzero() <torch.nonzero>` causes
  6570. host-device synchronization.
  6571. **When** :attr:`as_tuple` **is** ``False`` **(default)**:
  6572. Returns a tensor containing the indices of all non-zero elements of
  6573. :attr:`input`. Each row in the result contains the indices of a non-zero
  6574. element in :attr:`input`. The result is sorted lexicographically, with
  6575. the last index changing the fastest (C-style).
  6576. If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor
  6577. :attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of
  6578. non-zero elements in the :attr:`input` tensor.
  6579. **When** :attr:`as_tuple` **is** ``True``:
  6580. Returns a tuple of 1-D tensors, one for each dimension in :attr:`input`,
  6581. each containing the indices (in that dimension) of all non-zero elements of
  6582. :attr:`input` .
  6583. If :attr:`input` has :math:`n` dimensions, then the resulting tuple contains :math:`n`
  6584. tensors of size :math:`z`, where :math:`z` is the total number of
  6585. non-zero elements in the :attr:`input` tensor.
  6586. As a special case, when :attr:`input` has zero dimensions and a nonzero scalar
  6587. value, it is treated as a one-dimensional tensor with one element.
  6588. Args:
  6589. {input}
  6590. Keyword args:
  6591. out (LongTensor, optional): the output tensor containing indices
  6592. Returns:
  6593. LongTensor or tuple of LongTensor: If :attr:`as_tuple` is ``False``, the output
  6594. tensor containing indices. If :attr:`as_tuple` is ``True``, one 1-D tensor for
  6595. each dimension, containing the indices of each nonzero element along that
  6596. dimension.
  6597. Example::
  6598. >>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]))
  6599. tensor([[ 0],
  6600. [ 1],
  6601. [ 2],
  6602. [ 4]])
  6603. >>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0],
  6604. ... [0.0, 0.4, 0.0, 0.0],
  6605. ... [0.0, 0.0, 1.2, 0.0],
  6606. ... [0.0, 0.0, 0.0,-0.4]]))
  6607. tensor([[ 0, 0],
  6608. [ 1, 1],
  6609. [ 2, 2],
  6610. [ 3, 3]])
  6611. >>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]), as_tuple=True)
  6612. (tensor([0, 1, 2, 4]),)
  6613. >>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0],
  6614. ... [0.0, 0.4, 0.0, 0.0],
  6615. ... [0.0, 0.0, 1.2, 0.0],
  6616. ... [0.0, 0.0, 0.0,-0.4]]), as_tuple=True)
  6617. (tensor([0, 1, 2, 3]), tensor([0, 1, 2, 3]))
  6618. >>> torch.nonzero(torch.tensor(5), as_tuple=True)
  6619. (tensor([0]),)
  6620. """.format(
  6621. **common_args
  6622. ),
  6623. )
  6624. add_docstr(
  6625. torch.normal,
  6626. r"""
  6627. normal(mean, std, *, generator=None, out=None) -> Tensor
  6628. Returns a tensor of random numbers drawn from separate normal distributions
  6629. whose mean and standard deviation are given.
  6630. The :attr:`mean` is a tensor with the mean of
  6631. each output element's normal distribution
  6632. The :attr:`std` is a tensor with the standard deviation of
  6633. each output element's normal distribution
  6634. The shapes of :attr:`mean` and :attr:`std` don't need to match, but the
  6635. total number of elements in each tensor need to be the same.
  6636. .. note:: When the shapes do not match, the shape of :attr:`mean`
  6637. is used as the shape for the returned output tensor
  6638. .. note:: When :attr:`std` is a CUDA tensor, this function synchronizes
  6639. its device with the CPU.
  6640. Args:
  6641. mean (Tensor): the tensor of per-element means
  6642. std (Tensor): the tensor of per-element standard deviations
  6643. Keyword args:
  6644. {generator}
  6645. {out}
  6646. Example::
  6647. >>> torch.normal(mean=torch.arange(1., 11.), std=torch.arange(1, 0, -0.1))
  6648. tensor([ 1.0425, 3.5672, 2.7969, 4.2925, 4.7229, 6.2134,
  6649. 8.0505, 8.1408, 9.0563, 10.0566])
  6650. .. function:: normal(mean=0.0, std, *, out=None) -> Tensor
  6651. :noindex:
  6652. Similar to the function above, but the means are shared among all drawn
  6653. elements.
  6654. Args:
  6655. mean (float, optional): the mean for all distributions
  6656. std (Tensor): the tensor of per-element standard deviations
  6657. Keyword args:
  6658. {out}
  6659. Example::
  6660. >>> torch.normal(mean=0.5, std=torch.arange(1., 6.))
  6661. tensor([-1.2793, -1.0732, -2.0687, 5.1177, -1.2303])
  6662. .. function:: normal(mean, std=1.0, *, out=None) -> Tensor
  6663. :noindex:
  6664. Similar to the function above, but the standard deviations are shared among
  6665. all drawn elements.
  6666. Args:
  6667. mean (Tensor): the tensor of per-element means
  6668. std (float, optional): the standard deviation for all distributions
  6669. Keyword args:
  6670. out (Tensor, optional): the output tensor
  6671. Example::
  6672. >>> torch.normal(mean=torch.arange(1., 6.))
  6673. tensor([ 1.1552, 2.6148, 2.6535, 5.8318, 4.2361])
  6674. .. function:: normal(mean, std, size, *, out=None) -> Tensor
  6675. :noindex:
  6676. Similar to the function above, but the means and standard deviations are shared
  6677. among all drawn elements. The resulting tensor has size given by :attr:`size`.
  6678. Args:
  6679. mean (float): the mean for all distributions
  6680. std (float): the standard deviation for all distributions
  6681. size (int...): a sequence of integers defining the shape of the output tensor.
  6682. Keyword args:
  6683. {out}
  6684. Example::
  6685. >>> torch.normal(2, 3, size=(1, 4))
  6686. tensor([[-1.3987, -1.9544, 3.6048, 0.7909]])
  6687. """.format(
  6688. **common_args
  6689. ),
  6690. )
  6691. add_docstr(
  6692. torch.numel,
  6693. r"""
  6694. numel(input) -> int
  6695. Returns the total number of elements in the :attr:`input` tensor.
  6696. Args:
  6697. {input}
  6698. Example::
  6699. >>> a = torch.randn(1, 2, 3, 4, 5)
  6700. >>> torch.numel(a)
  6701. 120
  6702. >>> a = torch.zeros(4,4)
  6703. >>> torch.numel(a)
  6704. 16
  6705. """.format(
  6706. **common_args
  6707. ),
  6708. )
  6709. add_docstr(
  6710. torch.ones,
  6711. r"""
  6712. ones(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  6713. Returns a tensor filled with the scalar value `1`, with the shape defined
  6714. by the variable argument :attr:`size`.
  6715. Args:
  6716. size (int...): a sequence of integers defining the shape of the output tensor.
  6717. Can be a variable number of arguments or a collection like a list or tuple.
  6718. Keyword arguments:
  6719. {out}
  6720. {dtype}
  6721. {layout}
  6722. {device}
  6723. {requires_grad}
  6724. Example::
  6725. >>> torch.ones(2, 3)
  6726. tensor([[ 1., 1., 1.],
  6727. [ 1., 1., 1.]])
  6728. >>> torch.ones(5)
  6729. tensor([ 1., 1., 1., 1., 1.])
  6730. """.format(
  6731. **factory_common_args
  6732. ),
  6733. )
  6734. add_docstr(
  6735. torch.ones_like,
  6736. r"""
  6737. ones_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
  6738. Returns a tensor filled with the scalar value `1`, with the same size as
  6739. :attr:`input`. ``torch.ones_like(input)`` is equivalent to
  6740. ``torch.ones(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
  6741. .. warning::
  6742. As of 0.4, this function does not support an :attr:`out` keyword. As an alternative,
  6743. the old ``torch.ones_like(input, out=output)`` is equivalent to
  6744. ``torch.ones(input.size(), out=output)``.
  6745. Args:
  6746. {input}
  6747. Keyword arguments:
  6748. {dtype}
  6749. {layout}
  6750. {device}
  6751. {requires_grad}
  6752. {memory_format}
  6753. Example::
  6754. >>> input = torch.empty(2, 3)
  6755. >>> torch.ones_like(input)
  6756. tensor([[ 1., 1., 1.],
  6757. [ 1., 1., 1.]])
  6758. """.format(
  6759. **factory_like_common_args
  6760. ),
  6761. )
  6762. add_docstr(
  6763. torch.orgqr,
  6764. r"""
  6765. orgqr(input, tau) -> Tensor
  6766. Alias for :func:`torch.linalg.householder_product`.
  6767. """,
  6768. )
  6769. add_docstr(
  6770. torch.ormqr,
  6771. r"""
  6772. ormqr(input, tau, other, left=True, transpose=False, *, out=None) -> Tensor
  6773. Computes the matrix-matrix multiplication of a product of Householder matrices with a general matrix.
  6774. Multiplies a :math:`m \times n` matrix `C` (given by :attr:`other`) with a matrix `Q`,
  6775. where `Q` is represented using Householder reflectors `(input, tau)`.
  6776. See `Representation of Orthogonal or Unitary Matrices`_ for further details.
  6777. If :attr:`left` is `True` then `op(Q)` times `C` is computed, otherwise the result is `C` times `op(Q)`.
  6778. When :attr:`left` is `True`, the implicit matrix `Q` has size :math:`m \times m`.
  6779. It has size :math:`n \times n` otherwise.
  6780. If :attr:`transpose` is `True` then `op` is the conjugate transpose operation, otherwise it's a no-op.
  6781. Supports inputs of float, double, cfloat and cdouble dtypes.
  6782. Also supports batched inputs, and, if the input is batched, the output is batched with the same dimensions.
  6783. .. seealso::
  6784. :func:`torch.geqrf` can be used to form the Householder representation `(input, tau)` of matrix `Q`
  6785. from the QR decomposition.
  6786. .. note::
  6787. This function supports backward but it is only fast when ``(input, tau)`` do not require gradients
  6788. and/or ``tau.size(-1)`` is very small.
  6789. ``
  6790. Args:
  6791. input (Tensor): tensor of shape `(*, mn, k)` where `*` is zero or more batch dimensions
  6792. and `mn` equals to `m` or `n` depending on the :attr:`left`.
  6793. tau (Tensor): tensor of shape `(*, min(mn, k))` where `*` is zero or more batch dimensions.
  6794. other (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
  6795. left (bool): controls the order of multiplication.
  6796. transpose (bool): controls whether the matrix `Q` is conjugate transposed or not.
  6797. Keyword args:
  6798. out (Tensor, optional): the output Tensor. Ignored if `None`. Default: `None`.
  6799. .. _Representation of Orthogonal or Unitary Matrices:
  6800. https://www.netlib.org/lapack/lug/node128.html
  6801. """,
  6802. )
  6803. add_docstr(
  6804. torch.permute,
  6805. r"""
  6806. permute(input, dims) -> Tensor
  6807. Returns a view of the original tensor :attr:`input` with its dimensions permuted.
  6808. Args:
  6809. {input}
  6810. dims (tuple of int): The desired ordering of dimensions
  6811. Example:
  6812. >>> x = torch.randn(2, 3, 5)
  6813. >>> x.size()
  6814. torch.Size([2, 3, 5])
  6815. >>> torch.permute(x, (2, 0, 1)).size()
  6816. torch.Size([5, 2, 3])
  6817. """.format(
  6818. **common_args
  6819. ),
  6820. )
  6821. add_docstr(
  6822. torch.poisson,
  6823. r"""
  6824. poisson(input, generator=None) -> Tensor
  6825. Returns a tensor of the same size as :attr:`input` with each element
  6826. sampled from a Poisson distribution with rate parameter given by the corresponding
  6827. element in :attr:`input` i.e.,
  6828. .. math::
  6829. \text{{out}}_i \sim \text{{Poisson}}(\text{{input}}_i)
  6830. :attr:`input` must be non-negative.
  6831. Args:
  6832. input (Tensor): the input tensor containing the rates of the Poisson distribution
  6833. Keyword args:
  6834. {generator}
  6835. Example::
  6836. >>> rates = torch.rand(4, 4) * 5 # rate parameter between 0 and 5
  6837. >>> torch.poisson(rates)
  6838. tensor([[9., 1., 3., 5.],
  6839. [8., 6., 6., 0.],
  6840. [0., 4., 5., 3.],
  6841. [2., 1., 4., 2.]])
  6842. """.format(
  6843. **common_args
  6844. ),
  6845. )
  6846. add_docstr(
  6847. torch.polygamma,
  6848. r"""
  6849. polygamma(n, input, *, out=None) -> Tensor
  6850. Alias for :func:`torch.special.polygamma`.
  6851. """,
  6852. )
  6853. add_docstr(
  6854. torch.positive,
  6855. r"""
  6856. positive(input) -> Tensor
  6857. Returns :attr:`input`.
  6858. Throws a runtime error if :attr:`input` is a bool tensor.
  6859. """
  6860. + r"""
  6861. Args:
  6862. {input}
  6863. Example::
  6864. >>> t = torch.randn(5)
  6865. >>> t
  6866. tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940])
  6867. >>> torch.positive(t)
  6868. tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940])
  6869. """.format(
  6870. **common_args
  6871. ),
  6872. )
  6873. add_docstr(
  6874. torch.pow,
  6875. r"""
  6876. pow(input, exponent, *, out=None) -> Tensor
  6877. Takes the power of each element in :attr:`input` with :attr:`exponent` and
  6878. returns a tensor with the result.
  6879. :attr:`exponent` can be either a single ``float`` number or a `Tensor`
  6880. with the same number of elements as :attr:`input`.
  6881. When :attr:`exponent` is a scalar value, the operation applied is:
  6882. .. math::
  6883. \text{out}_i = x_i ^ \text{exponent}
  6884. When :attr:`exponent` is a tensor, the operation applied is:
  6885. .. math::
  6886. \text{out}_i = x_i ^ {\text{exponent}_i}
  6887. """
  6888. + r"""
  6889. When :attr:`exponent` is a tensor, the shapes of :attr:`input`
  6890. and :attr:`exponent` must be :ref:`broadcastable <broadcasting-semantics>`.
  6891. Args:
  6892. {input}
  6893. exponent (float or tensor): the exponent value
  6894. Keyword args:
  6895. {out}
  6896. Example::
  6897. >>> a = torch.randn(4)
  6898. >>> a
  6899. tensor([ 0.4331, 1.2475, 0.6834, -0.2791])
  6900. >>> torch.pow(a, 2)
  6901. tensor([ 0.1875, 1.5561, 0.4670, 0.0779])
  6902. >>> exp = torch.arange(1., 5.)
  6903. >>> a = torch.arange(1., 5.)
  6904. >>> a
  6905. tensor([ 1., 2., 3., 4.])
  6906. >>> exp
  6907. tensor([ 1., 2., 3., 4.])
  6908. >>> torch.pow(a, exp)
  6909. tensor([ 1., 4., 27., 256.])
  6910. .. function:: pow(self, exponent, *, out=None) -> Tensor
  6911. :noindex:
  6912. :attr:`self` is a scalar ``float`` value, and :attr:`exponent` is a tensor.
  6913. The returned tensor :attr:`out` is of the same shape as :attr:`exponent`
  6914. The operation applied is:
  6915. .. math::
  6916. \text{{out}}_i = \text{{self}} ^ {{\text{{exponent}}_i}}
  6917. Args:
  6918. self (float): the scalar base value for the power operation
  6919. exponent (Tensor): the exponent tensor
  6920. Keyword args:
  6921. {out}
  6922. Example::
  6923. >>> exp = torch.arange(1., 5.)
  6924. >>> base = 2
  6925. >>> torch.pow(base, exp)
  6926. tensor([ 2., 4., 8., 16.])
  6927. """.format(
  6928. **common_args
  6929. ),
  6930. )
  6931. add_docstr(
  6932. torch.float_power,
  6933. r"""
  6934. float_power(input, exponent, *, out=None) -> Tensor
  6935. Raises :attr:`input` to the power of :attr:`exponent`, elementwise, in double precision.
  6936. If neither input is complex returns a ``torch.float64`` tensor,
  6937. and if one or more inputs is complex returns a ``torch.complex128`` tensor.
  6938. .. note::
  6939. This function always computes in double precision, unlike :func:`torch.pow`,
  6940. which implements more typical :ref:`type promotion <type-promotion-doc>`.
  6941. This is useful when the computation needs to be performed in a wider or more precise dtype,
  6942. or the results of the computation may contain fractional values not representable in the input dtypes,
  6943. like when an integer base is raised to a negative integer exponent.
  6944. Args:
  6945. input (Tensor or Number): the base value(s)
  6946. exponent (Tensor or Number): the exponent value(s)
  6947. Keyword args:
  6948. {out}
  6949. Example::
  6950. >>> a = torch.randint(10, (4,))
  6951. >>> a
  6952. tensor([6, 4, 7, 1])
  6953. >>> torch.float_power(a, 2)
  6954. tensor([36., 16., 49., 1.], dtype=torch.float64)
  6955. >>> a = torch.arange(1, 5)
  6956. >>> a
  6957. tensor([ 1, 2, 3, 4])
  6958. >>> exp = torch.tensor([2, -3, 4, -5])
  6959. >>> exp
  6960. tensor([ 2, -3, 4, -5])
  6961. >>> torch.float_power(a, exp)
  6962. tensor([1.0000e+00, 1.2500e-01, 8.1000e+01, 9.7656e-04], dtype=torch.float64)
  6963. """.format(
  6964. **common_args
  6965. ),
  6966. )
  6967. add_docstr(
  6968. torch.prod,
  6969. r"""
  6970. prod(input, *, dtype=None) -> Tensor
  6971. Returns the product of all elements in the :attr:`input` tensor.
  6972. Args:
  6973. {input}
  6974. Keyword args:
  6975. {dtype}
  6976. Example::
  6977. >>> a = torch.randn(1, 3)
  6978. >>> a
  6979. tensor([[-0.8020, 0.5428, -1.5854]])
  6980. >>> torch.prod(a)
  6981. tensor(0.6902)
  6982. .. function:: prod(input, dim, keepdim=False, *, dtype=None) -> Tensor
  6983. :noindex:
  6984. Returns the product of each row of the :attr:`input` tensor in the given
  6985. dimension :attr:`dim`.
  6986. {keepdim_details}
  6987. Args:
  6988. {input}
  6989. {dim}
  6990. {keepdim}
  6991. Keyword args:
  6992. {dtype}
  6993. Example::
  6994. >>> a = torch.randn(4, 2)
  6995. >>> a
  6996. tensor([[ 0.5261, -0.3837],
  6997. [ 1.1857, -0.2498],
  6998. [-1.1646, 0.0705],
  6999. [ 1.1131, -1.0629]])
  7000. >>> torch.prod(a, 1)
  7001. tensor([-0.2018, -0.2962, -0.0821, -1.1831])
  7002. """.format(
  7003. **single_dim_common
  7004. ),
  7005. )
  7006. add_docstr(
  7007. torch.promote_types,
  7008. r"""
  7009. promote_types(type1, type2) -> dtype
  7010. Returns the :class:`torch.dtype` with the smallest size and scalar kind that is
  7011. not smaller nor of lower kind than either `type1` or `type2`. See type promotion
  7012. :ref:`documentation <type-promotion-doc>` for more information on the type
  7013. promotion logic.
  7014. Args:
  7015. type1 (:class:`torch.dtype`)
  7016. type2 (:class:`torch.dtype`)
  7017. Example::
  7018. >>> torch.promote_types(torch.int32, torch.float32)
  7019. torch.float32
  7020. >>> torch.promote_types(torch.uint8, torch.long)
  7021. torch.long
  7022. """,
  7023. )
  7024. add_docstr(
  7025. torch.qr,
  7026. r"""
  7027. qr(input, some=True, *, out=None) -> (Tensor, Tensor)
  7028. Computes the QR decomposition of a matrix or a batch of matrices :attr:`input`,
  7029. and returns a namedtuple (Q, R) of tensors such that :math:`\text{input} = Q R`
  7030. with :math:`Q` being an orthogonal matrix or batch of orthogonal matrices and
  7031. :math:`R` being an upper triangular matrix or batch of upper triangular matrices.
  7032. If :attr:`some` is ``True``, then this function returns the thin (reduced) QR factorization.
  7033. Otherwise, if :attr:`some` is ``False``, this function returns the complete QR factorization.
  7034. .. warning::
  7035. :func:`torch.qr` is deprecated in favor of :func:`torch.linalg.qr`
  7036. and will be removed in a future PyTorch release. The boolean parameter :attr:`some` has been
  7037. replaced with a string parameter :attr:`mode`.
  7038. ``Q, R = torch.qr(A)`` should be replaced with
  7039. .. code:: python
  7040. Q, R = torch.linalg.qr(A)
  7041. ``Q, R = torch.qr(A, some=False)`` should be replaced with
  7042. .. code:: python
  7043. Q, R = torch.linalg.qr(A, mode="complete")
  7044. .. warning::
  7045. If you plan to backpropagate through QR, note that the current backward implementation
  7046. is only well-defined when the first :math:`\min(input.size(-1), input.size(-2))`
  7047. columns of :attr:`input` are linearly independent.
  7048. This behavior will probably change once QR supports pivoting.
  7049. .. note:: This function uses LAPACK for CPU inputs and MAGMA for CUDA inputs,
  7050. and may produce different (valid) decompositions on different device types
  7051. or different platforms.
  7052. Args:
  7053. input (Tensor): the input tensor of size :math:`(*, m, n)` where `*` is zero or more
  7054. batch dimensions consisting of matrices of dimension :math:`m \times n`.
  7055. some (bool, optional): Set to ``True`` for reduced QR decomposition and ``False`` for
  7056. complete QR decomposition. If `k = min(m, n)` then:
  7057. * ``some=True`` : returns `(Q, R)` with dimensions (m, k), (k, n) (default)
  7058. * ``'some=False'``: returns `(Q, R)` with dimensions (m, m), (m, n)
  7059. Keyword args:
  7060. out (tuple, optional): tuple of `Q` and `R` tensors.
  7061. The dimensions of `Q` and `R` are detailed in the description of :attr:`some` above.
  7062. Example::
  7063. >>> a = torch.tensor([[12., -51, 4], [6, 167, -68], [-4, 24, -41]])
  7064. >>> q, r = torch.qr(a)
  7065. >>> q
  7066. tensor([[-0.8571, 0.3943, 0.3314],
  7067. [-0.4286, -0.9029, -0.0343],
  7068. [ 0.2857, -0.1714, 0.9429]])
  7069. >>> r
  7070. tensor([[ -14.0000, -21.0000, 14.0000],
  7071. [ 0.0000, -175.0000, 70.0000],
  7072. [ 0.0000, 0.0000, -35.0000]])
  7073. >>> torch.mm(q, r).round()
  7074. tensor([[ 12., -51., 4.],
  7075. [ 6., 167., -68.],
  7076. [ -4., 24., -41.]])
  7077. >>> torch.mm(q.t(), q).round()
  7078. tensor([[ 1., 0., 0.],
  7079. [ 0., 1., -0.],
  7080. [ 0., -0., 1.]])
  7081. >>> a = torch.randn(3, 4, 5)
  7082. >>> q, r = torch.qr(a, some=False)
  7083. >>> torch.allclose(torch.matmul(q, r), a)
  7084. True
  7085. >>> torch.allclose(torch.matmul(q.mT, q), torch.eye(5))
  7086. True
  7087. """,
  7088. )
  7089. add_docstr(
  7090. torch.rad2deg,
  7091. r"""
  7092. rad2deg(input, *, out=None) -> Tensor
  7093. Returns a new tensor with each of the elements of :attr:`input`
  7094. converted from angles in radians to degrees.
  7095. Args:
  7096. {input}
  7097. Keyword arguments:
  7098. {out}
  7099. Example::
  7100. >>> a = torch.tensor([[3.142, -3.142], [6.283, -6.283], [1.570, -1.570]])
  7101. >>> torch.rad2deg(a)
  7102. tensor([[ 180.0233, -180.0233],
  7103. [ 359.9894, -359.9894],
  7104. [ 89.9544, -89.9544]])
  7105. """.format(
  7106. **common_args
  7107. ),
  7108. )
  7109. add_docstr(
  7110. torch.deg2rad,
  7111. r"""
  7112. deg2rad(input, *, out=None) -> Tensor
  7113. Returns a new tensor with each of the elements of :attr:`input`
  7114. converted from angles in degrees to radians.
  7115. Args:
  7116. {input}
  7117. Keyword arguments:
  7118. {out}
  7119. Example::
  7120. >>> a = torch.tensor([[180.0, -180.0], [360.0, -360.0], [90.0, -90.0]])
  7121. >>> torch.deg2rad(a)
  7122. tensor([[ 3.1416, -3.1416],
  7123. [ 6.2832, -6.2832],
  7124. [ 1.5708, -1.5708]])
  7125. """.format(
  7126. **common_args
  7127. ),
  7128. )
  7129. add_docstr(
  7130. torch.heaviside,
  7131. r"""
  7132. heaviside(input, values, *, out=None) -> Tensor
  7133. Computes the Heaviside step function for each element in :attr:`input`.
  7134. The Heaviside step function is defined as:
  7135. .. math::
  7136. \text{{heaviside}}(input, values) = \begin{cases}
  7137. 0, & \text{if input < 0}\\
  7138. values, & \text{if input == 0}\\
  7139. 1, & \text{if input > 0}
  7140. \end{cases}
  7141. """
  7142. + r"""
  7143. Args:
  7144. {input}
  7145. values (Tensor): The values to use where :attr:`input` is zero.
  7146. Keyword arguments:
  7147. {out}
  7148. Example::
  7149. >>> input = torch.tensor([-1.5, 0, 2.0])
  7150. >>> values = torch.tensor([0.5])
  7151. >>> torch.heaviside(input, values)
  7152. tensor([0.0000, 0.5000, 1.0000])
  7153. >>> values = torch.tensor([1.2, -2.0, 3.5])
  7154. >>> torch.heaviside(input, values)
  7155. tensor([0., -2., 1.])
  7156. """.format(
  7157. **common_args
  7158. ),
  7159. )
  7160. add_docstr(
  7161. torch.rand,
  7162. """
  7163. rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, \
  7164. requires_grad=False, pin_memory=False) -> Tensor
  7165. """
  7166. + r"""
  7167. Returns a tensor filled with random numbers from a uniform distribution
  7168. on the interval :math:`[0, 1)`
  7169. The shape of the tensor is defined by the variable argument :attr:`size`.
  7170. Args:
  7171. size (int...): a sequence of integers defining the shape of the output tensor.
  7172. Can be a variable number of arguments or a collection like a list or tuple.
  7173. Keyword args:
  7174. {generator}
  7175. {out}
  7176. {dtype}
  7177. {layout}
  7178. {device}
  7179. {requires_grad}
  7180. {pin_memory}
  7181. Example::
  7182. >>> torch.rand(4)
  7183. tensor([ 0.5204, 0.2503, 0.3525, 0.5673])
  7184. >>> torch.rand(2, 3)
  7185. tensor([[ 0.8237, 0.5781, 0.6879],
  7186. [ 0.3816, 0.7249, 0.0998]])
  7187. """.format(
  7188. **factory_common_args
  7189. ),
  7190. )
  7191. add_docstr(
  7192. torch.rand_like,
  7193. r"""
  7194. rand_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
  7195. Returns a tensor with the same size as :attr:`input` that is filled with
  7196. random numbers from a uniform distribution on the interval :math:`[0, 1)`.
  7197. ``torch.rand_like(input)`` is equivalent to
  7198. ``torch.rand(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
  7199. Args:
  7200. {input}
  7201. Keyword args:
  7202. {dtype}
  7203. {layout}
  7204. {device}
  7205. {requires_grad}
  7206. {memory_format}
  7207. """.format(
  7208. **factory_like_common_args
  7209. ),
  7210. )
  7211. add_docstr(
  7212. torch.randint,
  7213. """
  7214. randint(low=0, high, size, \\*, generator=None, out=None, \
  7215. dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  7216. Returns a tensor filled with random integers generated uniformly
  7217. between :attr:`low` (inclusive) and :attr:`high` (exclusive).
  7218. The shape of the tensor is defined by the variable argument :attr:`size`.
  7219. .. note::
  7220. With the global dtype default (``torch.float32``), this function returns
  7221. a tensor with dtype ``torch.int64``.
  7222. Args:
  7223. low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
  7224. high (int): One above the highest integer to be drawn from the distribution.
  7225. size (tuple): a tuple defining the shape of the output tensor.
  7226. Keyword args:
  7227. {generator}
  7228. {out}
  7229. dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``,
  7230. this function returns a tensor with dtype ``torch.int64``.
  7231. {layout}
  7232. {device}
  7233. {requires_grad}
  7234. Example::
  7235. >>> torch.randint(3, 5, (3,))
  7236. tensor([4, 3, 4])
  7237. >>> torch.randint(10, (2, 2))
  7238. tensor([[0, 2],
  7239. [5, 5]])
  7240. >>> torch.randint(3, 10, (2, 2))
  7241. tensor([[4, 5],
  7242. [6, 7]])
  7243. """.format(
  7244. **factory_common_args
  7245. ),
  7246. )
  7247. add_docstr(
  7248. torch.randint_like,
  7249. """
  7250. randint_like(input, low=0, high, \\*, dtype=None, layout=torch.strided, device=None, requires_grad=False, \
  7251. memory_format=torch.preserve_format) -> Tensor
  7252. Returns a tensor with the same shape as Tensor :attr:`input` filled with
  7253. random integers generated uniformly between :attr:`low` (inclusive) and
  7254. :attr:`high` (exclusive).
  7255. .. note:
  7256. With the global dtype default (``torch.float32``), this function returns
  7257. a tensor with dtype ``torch.int64``.
  7258. Args:
  7259. {input}
  7260. low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
  7261. high (int): One above the highest integer to be drawn from the distribution.
  7262. Keyword args:
  7263. {dtype}
  7264. {layout}
  7265. {device}
  7266. {requires_grad}
  7267. {memory_format}
  7268. """.format(
  7269. **factory_like_common_args
  7270. ),
  7271. )
  7272. add_docstr(
  7273. torch.randn,
  7274. """
  7275. randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, \
  7276. pin_memory=False) -> Tensor
  7277. """
  7278. + r"""
  7279. Returns a tensor filled with random numbers from a normal distribution
  7280. with mean `0` and variance `1` (also called the standard normal
  7281. distribution).
  7282. .. math::
  7283. \text{{out}}_{{i}} \sim \mathcal{{N}}(0, 1)
  7284. For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and
  7285. unit variance as
  7286. .. math::
  7287. \text{{out}}_{{i}} \sim \mathcal{{CN}}(0, 1)
  7288. This is equivalent to separately sampling the real :math:`(\operatorname{{Re}})` and imaginary
  7289. :math:`(\operatorname{{Im}})` part of :math:`\text{{out}}_i` as
  7290. .. math::
  7291. \operatorname{{Re}}(\text{{out}}_{{i}}) \sim \mathcal{{N}}(0, \frac{{1}}{{2}}),\quad
  7292. \operatorname{{Im}}(\text{{out}}_{{i}}) \sim \mathcal{{N}}(0, \frac{{1}}{{2}})
  7293. The shape of the tensor is defined by the variable argument :attr:`size`.
  7294. Args:
  7295. size (int...): a sequence of integers defining the shape of the output tensor.
  7296. Can be a variable number of arguments or a collection like a list or tuple.
  7297. Keyword args:
  7298. {generator}
  7299. {out}
  7300. {dtype}
  7301. {layout}
  7302. {device}
  7303. {requires_grad}
  7304. {pin_memory}
  7305. Example::
  7306. >>> torch.randn(4)
  7307. tensor([-2.1436, 0.9966, 2.3426, -0.6366])
  7308. >>> torch.randn(2, 3)
  7309. tensor([[ 1.5954, 2.8929, -1.0923],
  7310. [ 1.1719, -0.4709, -0.1996]])
  7311. .. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution
  7312. """.format(
  7313. **factory_common_args
  7314. ),
  7315. )
  7316. add_docstr(
  7317. torch.randn_like,
  7318. r"""
  7319. randn_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
  7320. Returns a tensor with the same size as :attr:`input` that is filled with
  7321. random numbers from a normal distribution with mean 0 and variance 1. Please refer to :func:`torch.randn` for the
  7322. sampling process of complex dtypes. ``torch.randn_like(input)`` is equivalent to
  7323. ``torch.randn(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
  7324. Args:
  7325. {input}
  7326. Keyword args:
  7327. {dtype}
  7328. {layout}
  7329. {device}
  7330. {requires_grad}
  7331. {memory_format}
  7332. """.format(
  7333. **factory_like_common_args
  7334. ),
  7335. )
  7336. add_docstr(
  7337. torch.randperm,
  7338. """
  7339. randperm(n, *, generator=None, out=None, dtype=torch.int64,layout=torch.strided, \
  7340. device=None, requires_grad=False, pin_memory=False) -> Tensor
  7341. """
  7342. + r"""
  7343. Returns a random permutation of integers from ``0`` to ``n - 1``.
  7344. Args:
  7345. n (int): the upper bound (exclusive)
  7346. Keyword args:
  7347. {generator}
  7348. {out}
  7349. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  7350. Default: ``torch.int64``.
  7351. {layout}
  7352. {device}
  7353. {requires_grad}
  7354. {pin_memory}
  7355. Example::
  7356. >>> torch.randperm(4)
  7357. tensor([2, 1, 0, 3])
  7358. """.format(
  7359. **factory_common_args
  7360. ),
  7361. )
  7362. add_docstr(
  7363. torch.tensor,
  7364. r"""
  7365. tensor(data, *, dtype=None, device=None, requires_grad=False, pin_memory=False) -> Tensor
  7366. Constructs a tensor with no autograd history (also known as a "leaf tensor", see :doc:`/notes/autograd`) by copying :attr:`data`.
  7367. .. warning::
  7368. When working with tensors prefer using :func:`torch.Tensor.clone`,
  7369. :func:`torch.Tensor.detach`, and :func:`torch.Tensor.requires_grad_` for
  7370. readability. Letting `t` be a tensor, ``torch.tensor(t)`` is equivalent to
  7371. ``t.clone().detach()``, and ``torch.tensor(t, requires_grad=True)``
  7372. is equivalent to ``t.clone().detach().requires_grad_(True)``.
  7373. .. seealso::
  7374. :func:`torch.as_tensor` preserves autograd history and avoids copies where possible.
  7375. :func:`torch.from_numpy` creates a tensor that shares storage with a NumPy array.
  7376. Args:
  7377. {data}
  7378. Keyword args:
  7379. {dtype}
  7380. device (:class:`torch.device`, optional): the device of the constructed tensor. If None and data is a tensor
  7381. then the device of data is used. If None and data is not a tensor then
  7382. the result tensor is constructed on the current device.
  7383. {requires_grad}
  7384. {pin_memory}
  7385. Example::
  7386. >>> torch.tensor([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]])
  7387. tensor([[ 0.1000, 1.2000],
  7388. [ 2.2000, 3.1000],
  7389. [ 4.9000, 5.2000]])
  7390. >>> torch.tensor([0, 1]) # Type inference on data
  7391. tensor([ 0, 1])
  7392. >>> torch.tensor([[0.11111, 0.222222, 0.3333333]],
  7393. ... dtype=torch.float64,
  7394. ... device=torch.device('cuda:0')) # creates a double tensor on a CUDA device
  7395. tensor([[ 0.1111, 0.2222, 0.3333]], dtype=torch.float64, device='cuda:0')
  7396. >>> torch.tensor(3.14159) # Create a zero-dimensional (scalar) tensor
  7397. tensor(3.1416)
  7398. >>> torch.tensor([]) # Create an empty tensor (of size (0,))
  7399. tensor([])
  7400. """.format(
  7401. **factory_data_common_args
  7402. ),
  7403. )
  7404. add_docstr(
  7405. torch.range,
  7406. r"""
  7407. range(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  7408. Returns a 1-D tensor of size :math:`\left\lfloor \frac{\text{end} - \text{start}}{\text{step}} \right\rfloor + 1`
  7409. with values from :attr:`start` to :attr:`end` with step :attr:`step`. Step is
  7410. the gap between two values in the tensor.
  7411. .. math::
  7412. \text{out}_{i+1} = \text{out}_i + \text{step}.
  7413. """
  7414. + r"""
  7415. .. warning::
  7416. This function is deprecated and will be removed in a future release because its behavior is inconsistent with
  7417. Python's range builtin. Instead, use :func:`torch.arange`, which produces values in [start, end).
  7418. Args:
  7419. start (float): the starting value for the set of points. Default: ``0``.
  7420. end (float): the ending value for the set of points
  7421. step (float): the gap between each pair of adjacent points. Default: ``1``.
  7422. Keyword args:
  7423. {out}
  7424. {dtype} If `dtype` is not given, infer the data type from the other input
  7425. arguments. If any of `start`, `end`, or `stop` are floating-point, the
  7426. `dtype` is inferred to be the default dtype, see
  7427. :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
  7428. be `torch.int64`.
  7429. {layout}
  7430. {device}
  7431. {requires_grad}
  7432. Example::
  7433. >>> torch.range(1, 4)
  7434. tensor([ 1., 2., 3., 4.])
  7435. >>> torch.range(1, 4, 0.5)
  7436. tensor([ 1.0000, 1.5000, 2.0000, 2.5000, 3.0000, 3.5000, 4.0000])
  7437. """.format(
  7438. **factory_common_args
  7439. ),
  7440. )
  7441. add_docstr(
  7442. torch.arange,
  7443. r"""
  7444. arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  7445. Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil`
  7446. with values from the interval ``[start, end)`` taken with common difference
  7447. :attr:`step` beginning from `start`.
  7448. Note that non-integer :attr:`step` is subject to floating point rounding errors when
  7449. comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end`
  7450. in such cases.
  7451. .. math::
  7452. \text{out}_{{i+1}} = \text{out}_{i} + \text{step}
  7453. """
  7454. + r"""
  7455. Args:
  7456. start (Number): the starting value for the set of points. Default: ``0``.
  7457. end (Number): the ending value for the set of points
  7458. step (Number): the gap between each pair of adjacent points. Default: ``1``.
  7459. Keyword args:
  7460. {out}
  7461. {dtype} If `dtype` is not given, infer the data type from the other input
  7462. arguments. If any of `start`, `end`, or `stop` are floating-point, the
  7463. `dtype` is inferred to be the default dtype, see
  7464. :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
  7465. be `torch.int64`.
  7466. {layout}
  7467. {device}
  7468. {requires_grad}
  7469. Example::
  7470. >>> torch.arange(5)
  7471. tensor([ 0, 1, 2, 3, 4])
  7472. >>> torch.arange(1, 4)
  7473. tensor([ 1, 2, 3])
  7474. >>> torch.arange(1, 2.5, 0.5)
  7475. tensor([ 1.0000, 1.5000, 2.0000])
  7476. """.format(
  7477. **factory_common_args
  7478. ),
  7479. )
  7480. add_docstr(
  7481. torch.ravel,
  7482. r"""
  7483. ravel(input) -> Tensor
  7484. Return a contiguous flattened tensor. A copy is made only if needed.
  7485. Args:
  7486. {input}
  7487. Example::
  7488. >>> t = torch.tensor([[[1, 2],
  7489. ... [3, 4]],
  7490. ... [[5, 6],
  7491. ... [7, 8]]])
  7492. >>> torch.ravel(t)
  7493. tensor([1, 2, 3, 4, 5, 6, 7, 8])
  7494. """.format(
  7495. **common_args
  7496. ),
  7497. )
  7498. add_docstr(
  7499. torch.remainder,
  7500. r"""
  7501. remainder(input, other, *, out=None) -> Tensor
  7502. Computes
  7503. `Python's modulus operation <https://docs.python.org/3/reference/expressions.html#binary-arithmetic-operations>`_
  7504. entrywise. The result has the same sign as the divisor :attr:`other` and its absolute value
  7505. is less than that of :attr:`other`.
  7506. It may also be defined in terms of :func:`torch.div` as
  7507. .. code:: python
  7508. torch.remainder(a, b) == a - a.div(b, rounding_mode="floor") * b
  7509. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
  7510. :ref:`type promotion <type-promotion-doc>`, and integer and float inputs.
  7511. .. note::
  7512. Complex inputs are not supported. In some cases, it is not mathematically
  7513. possible to satisfy the definition of a modulo operation with complex numbers.
  7514. See :func:`torch.fmod` for how division by zero is handled.
  7515. .. seealso::
  7516. :func:`torch.fmod` which implements C++'s `std::fmod <https://en.cppreference.com/w/cpp/numeric/math/fmod>`_.
  7517. This one is defined in terms of division rounding towards zero.
  7518. Args:
  7519. input (Tensor or Scalar): the dividend
  7520. other (Tensor or Scalar): the divisor
  7521. Keyword args:
  7522. {out}
  7523. Example::
  7524. >>> torch.remainder(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
  7525. tensor([ 1., 0., 1., 1., 0., 1.])
  7526. >>> torch.remainder(torch.tensor([1, 2, 3, 4, 5]), -1.5)
  7527. tensor([ -0.5000, -1.0000, 0.0000, -0.5000, -1.0000 ])
  7528. """.format(
  7529. **common_args
  7530. ),
  7531. )
  7532. add_docstr(
  7533. torch.renorm,
  7534. r"""
  7535. renorm(input, p, dim, maxnorm, *, out=None) -> Tensor
  7536. Returns a tensor where each sub-tensor of :attr:`input` along dimension
  7537. :attr:`dim` is normalized such that the `p`-norm of the sub-tensor is lower
  7538. than the value :attr:`maxnorm`
  7539. .. note:: If the norm of a row is lower than `maxnorm`, the row is unchanged
  7540. Args:
  7541. {input}
  7542. p (float): the power for the norm computation
  7543. dim (int): the dimension to slice over to get the sub-tensors
  7544. maxnorm (float): the maximum norm to keep each sub-tensor under
  7545. Keyword args:
  7546. {out}
  7547. Example::
  7548. >>> x = torch.ones(3, 3)
  7549. >>> x[1].fill_(2)
  7550. tensor([ 2., 2., 2.])
  7551. >>> x[2].fill_(3)
  7552. tensor([ 3., 3., 3.])
  7553. >>> x
  7554. tensor([[ 1., 1., 1.],
  7555. [ 2., 2., 2.],
  7556. [ 3., 3., 3.]])
  7557. >>> torch.renorm(x, 1, 0, 5)
  7558. tensor([[ 1.0000, 1.0000, 1.0000],
  7559. [ 1.6667, 1.6667, 1.6667],
  7560. [ 1.6667, 1.6667, 1.6667]])
  7561. """.format(
  7562. **common_args
  7563. ),
  7564. )
  7565. add_docstr(
  7566. torch.reshape,
  7567. r"""
  7568. reshape(input, shape) -> Tensor
  7569. Returns a tensor with the same data and number of elements as :attr:`input`,
  7570. but with the specified shape. When possible, the returned tensor will be a view
  7571. of :attr:`input`. Otherwise, it will be a copy. Contiguous inputs and inputs
  7572. with compatible strides can be reshaped without copying, but you should not
  7573. depend on the copying vs. viewing behavior.
  7574. See :meth:`torch.Tensor.view` on when it is possible to return a view.
  7575. A single dimension may be -1, in which case it's inferred from the remaining
  7576. dimensions and the number of elements in :attr:`input`.
  7577. Args:
  7578. input (Tensor): the tensor to be reshaped
  7579. shape (tuple of int): the new shape
  7580. Example::
  7581. >>> a = torch.arange(4.)
  7582. >>> torch.reshape(a, (2, 2))
  7583. tensor([[ 0., 1.],
  7584. [ 2., 3.]])
  7585. >>> b = torch.tensor([[0, 1], [2, 3]])
  7586. >>> torch.reshape(b, (-1,))
  7587. tensor([ 0, 1, 2, 3])
  7588. """,
  7589. )
  7590. add_docstr(
  7591. torch.result_type,
  7592. r"""
  7593. result_type(tensor1, tensor2) -> dtype
  7594. Returns the :class:`torch.dtype` that would result from performing an arithmetic
  7595. operation on the provided input tensors. See type promotion :ref:`documentation <type-promotion-doc>`
  7596. for more information on the type promotion logic.
  7597. Args:
  7598. tensor1 (Tensor or Number): an input tensor or number
  7599. tensor2 (Tensor or Number): an input tensor or number
  7600. Example::
  7601. >>> torch.result_type(torch.tensor([1, 2], dtype=torch.int), 1.0)
  7602. torch.float32
  7603. >>> torch.result_type(torch.tensor([1, 2], dtype=torch.uint8), torch.tensor(1))
  7604. torch.uint8
  7605. """,
  7606. )
  7607. add_docstr(
  7608. torch.row_stack,
  7609. r"""
  7610. row_stack(tensors, *, out=None) -> Tensor
  7611. Alias of :func:`torch.vstack`.
  7612. """,
  7613. )
  7614. add_docstr(
  7615. torch.round,
  7616. r"""
  7617. round(input, *, decimals=0, out=None) -> Tensor
  7618. Rounds elements of :attr:`input` to the nearest integer.
  7619. For integer inputs, follows the array-api convention of returning a
  7620. copy of the input tensor.
  7621. The return type of output is same as that of input's dtype.
  7622. .. note::
  7623. This function implements the "round half to even" to
  7624. break ties when a number is equidistant from two
  7625. integers (e.g. `round(2.5)` is 2).
  7626. When the :attr:\`decimals\` argument is specified the
  7627. algorithm used is similar to NumPy's `around`. This
  7628. algorithm is fast but inexact and it can easily
  7629. overflow for low precision dtypes.
  7630. Eg. `round(tensor([10000], dtype=torch.float16), decimals=3)` is `inf`.
  7631. .. seealso::
  7632. :func:`torch.ceil`, which rounds up.
  7633. :func:`torch.floor`, which rounds down.
  7634. :func:`torch.trunc`, which rounds towards zero.
  7635. Args:
  7636. {input}
  7637. decimals (int): Number of decimal places to round to (default: 0).
  7638. If decimals is negative, it specifies the number of positions
  7639. to the left of the decimal point.
  7640. Keyword args:
  7641. {out}
  7642. Example::
  7643. >>> torch.round(torch.tensor((4.7, -2.3, 9.1, -7.7)))
  7644. tensor([ 5., -2., 9., -8.])
  7645. >>> # Values equidistant from two integers are rounded towards the
  7646. >>> # the nearest even value (zero is treated as even)
  7647. >>> torch.round(torch.tensor([-0.5, 0.5, 1.5, 2.5]))
  7648. tensor([-0., 0., 2., 2.])
  7649. >>> # A positive decimals argument rounds to the to that decimal place
  7650. >>> torch.round(torch.tensor([0.1234567]), decimals=3)
  7651. tensor([0.1230])
  7652. >>> # A negative decimals argument rounds to the left of the decimal
  7653. >>> torch.round(torch.tensor([1200.1234567]), decimals=-3)
  7654. tensor([1000.])
  7655. """.format(
  7656. **common_args
  7657. ),
  7658. )
  7659. add_docstr(
  7660. torch.rsqrt,
  7661. r"""
  7662. rsqrt(input, *, out=None) -> Tensor
  7663. Returns a new tensor with the reciprocal of the square-root of each of
  7664. the elements of :attr:`input`.
  7665. .. math::
  7666. \text{out}_{i} = \frac{1}{\sqrt{\text{input}_{i}}}
  7667. """
  7668. + r"""
  7669. Args:
  7670. {input}
  7671. Keyword args:
  7672. {out}
  7673. Example::
  7674. >>> a = torch.randn(4)
  7675. >>> a
  7676. tensor([-0.0370, 0.2970, 1.5420, -0.9105])
  7677. >>> torch.rsqrt(a)
  7678. tensor([ nan, 1.8351, 0.8053, nan])
  7679. """.format(
  7680. **common_args
  7681. ),
  7682. )
  7683. add_docstr(
  7684. torch.scatter,
  7685. r"""
  7686. scatter(input, dim, index, src) -> Tensor
  7687. Out-of-place version of :meth:`torch.Tensor.scatter_`
  7688. """,
  7689. )
  7690. add_docstr(
  7691. torch.scatter_add,
  7692. r"""
  7693. scatter_add(input, dim, index, src) -> Tensor
  7694. Out-of-place version of :meth:`torch.Tensor.scatter_add_`
  7695. """,
  7696. )
  7697. add_docstr(
  7698. torch.scatter_reduce,
  7699. r"""
  7700. scatter_reduce(input, dim, index, src, reduce, *, include_self=True) -> Tensor
  7701. Out-of-place version of :meth:`torch.Tensor.scatter_reduce_`
  7702. """,
  7703. )
  7704. add_docstr(
  7705. torch.select,
  7706. r"""
  7707. select(input, dim, index) -> Tensor
  7708. Slices the :attr:`input` tensor along the selected dimension at the given index.
  7709. This function returns a view of the original tensor with the given dimension removed.
  7710. .. note:: If :attr:`input` is a sparse tensor and returning a view of
  7711. the tensor is not possible, a RuntimeError exception is
  7712. raised. In this is the case, consider using
  7713. :func:`torch.select_copy` function.
  7714. Args:
  7715. {input}
  7716. dim (int): the dimension to slice
  7717. index (int): the index to select with
  7718. .. note::
  7719. :meth:`select` is equivalent to slicing. For example,
  7720. ``tensor.select(0, index)`` is equivalent to ``tensor[index]`` and
  7721. ``tensor.select(2, index)`` is equivalent to ``tensor[:,:,index]``.
  7722. """.format(
  7723. **common_args
  7724. ),
  7725. )
  7726. add_docstr(
  7727. torch.select_scatter,
  7728. r"""
  7729. select_scatter(input, src, dim, index) -> Tensor
  7730. Embeds the values of the :attr:`src` tensor into :attr:`input` at the given index.
  7731. This function returns a tensor with fresh storage; it does not create a view.
  7732. Args:
  7733. {input}
  7734. src (Tensor): The tensor to embed into :attr:`input`
  7735. dim (int): the dimension to insert the slice into.
  7736. index (int): the index to select with
  7737. .. note::
  7738. :attr:`src` must be of the proper size in order to be embedded
  7739. into :attr:`input`. Specifically, it should have the same shape as
  7740. ``torch.select(input, dim, index)``
  7741. Example::
  7742. >>> a = torch.zeros(2, 2)
  7743. >>> b = torch.ones(2)
  7744. >>> a.select_scatter(b, 0, 0)
  7745. tensor([[1., 1.],
  7746. [0., 0.]])
  7747. """.format(
  7748. **common_args
  7749. ),
  7750. )
  7751. add_docstr(
  7752. torch.slice_scatter,
  7753. r"""
  7754. slice_scatter(input, src, dim=0, start=None, end=None, step=1) -> Tensor
  7755. Embeds the values of the :attr:`src` tensor into :attr:`input` at the given
  7756. dimension.
  7757. This function returns a tensor with fresh storage; it does not create a view.
  7758. Args:
  7759. {input}
  7760. src (Tensor): The tensor to embed into :attr:`input`
  7761. dim (int): the dimension to insert the slice into
  7762. start (Optional[int]): the start index of where to insert the slice
  7763. end (Optional[int]): the end index of where to insert the slice
  7764. step (int): the how many elements to skip in
  7765. Example::
  7766. >>> a = torch.zeros(8, 8)
  7767. >>> b = torch.ones(2, 8)
  7768. >>> a.slice_scatter(b, start=6)
  7769. tensor([[0., 0., 0., 0., 0., 0., 0., 0.],
  7770. [0., 0., 0., 0., 0., 0., 0., 0.],
  7771. [0., 0., 0., 0., 0., 0., 0., 0.],
  7772. [0., 0., 0., 0., 0., 0., 0., 0.],
  7773. [0., 0., 0., 0., 0., 0., 0., 0.],
  7774. [0., 0., 0., 0., 0., 0., 0., 0.],
  7775. [1., 1., 1., 1., 1., 1., 1., 1.],
  7776. [1., 1., 1., 1., 1., 1., 1., 1.]])
  7777. >>> b = torch.ones(8, 2)
  7778. >>> a.slice_scatter(b, dim=1, start=2, end=6, step=2)
  7779. tensor([[0., 0., 1., 0., 1., 0., 0., 0.],
  7780. [0., 0., 1., 0., 1., 0., 0., 0.],
  7781. [0., 0., 1., 0., 1., 0., 0., 0.],
  7782. [0., 0., 1., 0., 1., 0., 0., 0.],
  7783. [0., 0., 1., 0., 1., 0., 0., 0.],
  7784. [0., 0., 1., 0., 1., 0., 0., 0.],
  7785. [0., 0., 1., 0., 1., 0., 0., 0.],
  7786. [0., 0., 1., 0., 1., 0., 0., 0.]])
  7787. """.format(
  7788. **common_args
  7789. ),
  7790. )
  7791. add_docstr(
  7792. torch.set_flush_denormal,
  7793. r"""
  7794. set_flush_denormal(mode) -> bool
  7795. Disables denormal floating numbers on CPU.
  7796. Returns ``True`` if your system supports flushing denormal numbers and it
  7797. successfully configures flush denormal mode. :meth:`~torch.set_flush_denormal`
  7798. is supported on x86 architectures supporting SSE3 and AArch64 architecture.
  7799. Args:
  7800. mode (bool): Controls whether to enable flush denormal mode or not
  7801. Example::
  7802. >>> torch.set_flush_denormal(True)
  7803. True
  7804. >>> torch.tensor([1e-323], dtype=torch.float64)
  7805. tensor([ 0.], dtype=torch.float64)
  7806. >>> torch.set_flush_denormal(False)
  7807. True
  7808. >>> torch.tensor([1e-323], dtype=torch.float64)
  7809. tensor(9.88131e-324 *
  7810. [ 1.0000], dtype=torch.float64)
  7811. """,
  7812. )
  7813. add_docstr(
  7814. torch.set_num_threads,
  7815. r"""
  7816. set_num_threads(int)
  7817. Sets the number of threads used for intraop parallelism on CPU.
  7818. .. warning::
  7819. To ensure that the correct number of threads is used, set_num_threads
  7820. must be called before running eager, JIT or autograd code.
  7821. """,
  7822. )
  7823. add_docstr(
  7824. torch.set_num_interop_threads,
  7825. r"""
  7826. set_num_interop_threads(int)
  7827. Sets the number of threads used for interop parallelism
  7828. (e.g. in JIT interpreter) on CPU.
  7829. .. warning::
  7830. Can only be called once and before any inter-op parallel work
  7831. is started (e.g. JIT execution).
  7832. """,
  7833. )
  7834. add_docstr(
  7835. torch.sigmoid,
  7836. r"""
  7837. sigmoid(input, *, out=None) -> Tensor
  7838. Alias for :func:`torch.special.expit`.
  7839. """,
  7840. )
  7841. add_docstr(
  7842. torch.logit,
  7843. r"""
  7844. logit(input, eps=None, *, out=None) -> Tensor
  7845. Alias for :func:`torch.special.logit`.
  7846. """,
  7847. )
  7848. add_docstr(
  7849. torch.sign,
  7850. r"""
  7851. sign(input, *, out=None) -> Tensor
  7852. Returns a new tensor with the signs of the elements of :attr:`input`.
  7853. .. math::
  7854. \text{out}_{i} = \operatorname{sgn}(\text{input}_{i})
  7855. """
  7856. + r"""
  7857. Args:
  7858. {input}
  7859. Keyword args:
  7860. {out}
  7861. Example::
  7862. >>> a = torch.tensor([0.7, -1.2, 0., 2.3])
  7863. >>> a
  7864. tensor([ 0.7000, -1.2000, 0.0000, 2.3000])
  7865. >>> torch.sign(a)
  7866. tensor([ 1., -1., 0., 1.])
  7867. """.format(
  7868. **common_args
  7869. ),
  7870. )
  7871. add_docstr(
  7872. torch.signbit,
  7873. r"""
  7874. signbit(input, *, out=None) -> Tensor
  7875. Tests if each element of :attr:`input` has its sign bit set or not.
  7876. Args:
  7877. {input}
  7878. Keyword args:
  7879. {out}
  7880. Example::
  7881. >>> a = torch.tensor([0.7, -1.2, 0., 2.3])
  7882. >>> torch.signbit(a)
  7883. tensor([ False, True, False, False])
  7884. >>> a = torch.tensor([-0.0, 0.0])
  7885. >>> torch.signbit(a)
  7886. tensor([ True, False])
  7887. .. note::
  7888. signbit handles signed zeros, so negative zero (-0) returns True.
  7889. """.format(
  7890. **common_args
  7891. ),
  7892. )
  7893. add_docstr(
  7894. torch.sgn,
  7895. r"""
  7896. sgn(input, *, out=None) -> Tensor
  7897. This function is an extension of torch.sign() to complex tensors.
  7898. It computes a new tensor whose elements have
  7899. the same angles as the corresponding elements of :attr:`input` and
  7900. absolute values (i.e. magnitudes) of one for complex tensors and
  7901. is equivalent to torch.sign() for non-complex tensors.
  7902. .. math::
  7903. \text{out}_{i} = \begin{cases}
  7904. 0 & |\text{{input}}_i| == 0 \\
  7905. \frac{{\text{{input}}_i}}{|{\text{{input}}_i}|} & \text{otherwise}
  7906. \end{cases}
  7907. """
  7908. + r"""
  7909. Args:
  7910. {input}
  7911. Keyword args:
  7912. {out}
  7913. Example::
  7914. >>> t = torch.tensor([3+4j, 7-24j, 0, 1+2j])
  7915. >>> t.sgn()
  7916. tensor([0.6000+0.8000j, 0.2800-0.9600j, 0.0000+0.0000j, 0.4472+0.8944j])
  7917. """.format(
  7918. **common_args
  7919. ),
  7920. )
  7921. add_docstr(
  7922. torch.sin,
  7923. r"""
  7924. sin(input, *, out=None) -> Tensor
  7925. Returns a new tensor with the sine of the elements of :attr:`input`.
  7926. .. math::
  7927. \text{out}_{i} = \sin(\text{input}_{i})
  7928. """
  7929. + r"""
  7930. Args:
  7931. {input}
  7932. Keyword args:
  7933. {out}
  7934. Example::
  7935. >>> a = torch.randn(4)
  7936. >>> a
  7937. tensor([-0.5461, 0.1347, -2.7266, -0.2746])
  7938. >>> torch.sin(a)
  7939. tensor([-0.5194, 0.1343, -0.4032, -0.2711])
  7940. """.format(
  7941. **common_args
  7942. ),
  7943. )
  7944. add_docstr(
  7945. torch.sinc,
  7946. r"""
  7947. sinc(input, *, out=None) -> Tensor
  7948. Alias for :func:`torch.special.sinc`.
  7949. """,
  7950. )
  7951. add_docstr(
  7952. torch.sinh,
  7953. r"""
  7954. sinh(input, *, out=None) -> Tensor
  7955. Returns a new tensor with the hyperbolic sine of the elements of
  7956. :attr:`input`.
  7957. .. math::
  7958. \text{out}_{i} = \sinh(\text{input}_{i})
  7959. """
  7960. + r"""
  7961. Args:
  7962. {input}
  7963. Keyword args:
  7964. {out}
  7965. Example::
  7966. >>> a = torch.randn(4)
  7967. >>> a
  7968. tensor([ 0.5380, -0.8632, -0.1265, 0.9399])
  7969. >>> torch.sinh(a)
  7970. tensor([ 0.5644, -0.9744, -0.1268, 1.0845])
  7971. .. note::
  7972. When :attr:`input` is on the CPU, the implementation of torch.sinh may use
  7973. the Sleef library, which rounds very large results to infinity or negative
  7974. infinity. See `here <https://sleef.org/purec.xhtml>`_ for details.
  7975. """.format(
  7976. **common_args
  7977. ),
  7978. )
  7979. add_docstr(
  7980. torch.sort,
  7981. r"""
  7982. sort(input, dim=-1, descending=False, stable=False, *, out=None) -> (Tensor, LongTensor)
  7983. Sorts the elements of the :attr:`input` tensor along a given dimension
  7984. in ascending order by value.
  7985. If :attr:`dim` is not given, the last dimension of the `input` is chosen.
  7986. If :attr:`descending` is ``True`` then the elements are sorted in descending
  7987. order by value.
  7988. If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving
  7989. the order of equivalent elements.
  7990. A namedtuple of (values, indices) is returned, where the `values` are the
  7991. sorted values and `indices` are the indices of the elements in the original
  7992. `input` tensor.
  7993. Args:
  7994. {input}
  7995. dim (int, optional): the dimension to sort along
  7996. descending (bool, optional): controls the sorting order (ascending or descending)
  7997. stable (bool, optional): makes the sorting routine stable, which guarantees that the order
  7998. of equivalent elements is preserved.
  7999. Keyword args:
  8000. out (tuple, optional): the output tuple of (`Tensor`, `LongTensor`) that can
  8001. be optionally given to be used as output buffers
  8002. Example::
  8003. >>> x = torch.randn(3, 4)
  8004. >>> sorted, indices = torch.sort(x)
  8005. >>> sorted
  8006. tensor([[-0.2162, 0.0608, 0.6719, 2.3332],
  8007. [-0.5793, 0.0061, 0.6058, 0.9497],
  8008. [-0.5071, 0.3343, 0.9553, 1.0960]])
  8009. >>> indices
  8010. tensor([[ 1, 0, 2, 3],
  8011. [ 3, 1, 0, 2],
  8012. [ 0, 3, 1, 2]])
  8013. >>> sorted, indices = torch.sort(x, 0)
  8014. >>> sorted
  8015. tensor([[-0.5071, -0.2162, 0.6719, -0.5793],
  8016. [ 0.0608, 0.0061, 0.9497, 0.3343],
  8017. [ 0.6058, 0.9553, 1.0960, 2.3332]])
  8018. >>> indices
  8019. tensor([[ 2, 0, 0, 1],
  8020. [ 0, 1, 1, 2],
  8021. [ 1, 2, 2, 0]])
  8022. >>> x = torch.tensor([0, 1] * 9)
  8023. >>> x.sort()
  8024. torch.return_types.sort(
  8025. values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
  8026. indices=tensor([ 2, 16, 4, 6, 14, 8, 0, 10, 12, 9, 17, 15, 13, 11, 7, 5, 3, 1]))
  8027. >>> x.sort(stable=True)
  8028. torch.return_types.sort(
  8029. values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
  8030. indices=tensor([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 1, 3, 5, 7, 9, 11, 13, 15, 17]))
  8031. """.format(
  8032. **common_args
  8033. ),
  8034. )
  8035. add_docstr(
  8036. torch.argsort,
  8037. r"""
  8038. argsort(input, dim=-1, descending=False, stable=False) -> Tensor
  8039. Returns the indices that sort a tensor along a given dimension in ascending
  8040. order by value.
  8041. This is the second value returned by :meth:`torch.sort`. See its documentation
  8042. for the exact semantics of this method.
  8043. If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving
  8044. the order of equivalent elements. If ``False``, the relative order of values
  8045. which compare equal is not guaranteed. ``True`` is slower.
  8046. Args:
  8047. {input}
  8048. dim (int, optional): the dimension to sort along
  8049. descending (bool, optional): controls the sorting order (ascending or descending)
  8050. stable (bool, optional): controls the relative order of equivalent elements
  8051. Example::
  8052. >>> a = torch.randn(4, 4)
  8053. >>> a
  8054. tensor([[ 0.0785, 1.5267, -0.8521, 0.4065],
  8055. [ 0.1598, 0.0788, -0.0745, -1.2700],
  8056. [ 1.2208, 1.0722, -0.7064, 1.2564],
  8057. [ 0.0669, -0.2318, -0.8229, -0.9280]])
  8058. >>> torch.argsort(a, dim=1)
  8059. tensor([[2, 0, 3, 1],
  8060. [3, 2, 1, 0],
  8061. [2, 1, 0, 3],
  8062. [3, 2, 1, 0]])
  8063. """.format(
  8064. **common_args
  8065. ),
  8066. )
  8067. add_docstr(
  8068. torch.msort,
  8069. r"""
  8070. msort(input, *, out=None) -> Tensor
  8071. Sorts the elements of the :attr:`input` tensor along its first dimension
  8072. in ascending order by value.
  8073. .. note:: `torch.msort(t)` is equivalent to `torch.sort(t, dim=0)[0]`.
  8074. See also :func:`torch.sort`.
  8075. Args:
  8076. {input}
  8077. Keyword args:
  8078. {out}
  8079. Example::
  8080. >>> t = torch.randn(3, 4)
  8081. >>> t
  8082. tensor([[-0.1321, 0.4370, -1.2631, -1.1289],
  8083. [-2.0527, -1.1250, 0.2275, 0.3077],
  8084. [-0.0881, -0.1259, -0.5495, 1.0284]])
  8085. >>> torch.msort(t)
  8086. tensor([[-2.0527, -1.1250, -1.2631, -1.1289],
  8087. [-0.1321, -0.1259, -0.5495, 0.3077],
  8088. [-0.0881, 0.4370, 0.2275, 1.0284]])
  8089. """.format(
  8090. **common_args
  8091. ),
  8092. )
  8093. add_docstr(
  8094. torch.sparse_compressed_tensor,
  8095. r"""sparse_compressed_tensor(compressed_indices, plain_indices, values, size=None, """
  8096. r"""*, dtype=None, layout=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
  8097. Constructs a :ref:`sparse tensor in Compressed Sparse format - CSR,
  8098. CSC, BSR, or BSC - <sparse-compressed-docs>` with specified values at
  8099. the given :attr:`compressed_indices` and :attr:`plain_indices`. Sparse
  8100. matrix multiplication operations in Compressed Sparse format are
  8101. typically faster than that for sparse tensors in COO format. Make you
  8102. have a look at :ref:`the note on the data type of the indices
  8103. <sparse-compressed-docs>`.
  8104. {sparse_factory_device_note}
  8105. Args:
  8106. compressed_indices (array_like): (B+1)-dimensional array of size
  8107. ``(*batchsize, compressed_dim_size + 1)``. The last element of
  8108. each batch is the number of non-zero elements or blocks. This
  8109. tensor encodes the index in ``values`` and ``plain_indices``
  8110. depending on where the given compressed dimension (row or
  8111. column) starts. Each successive number in the tensor
  8112. subtracted by the number before it denotes the number of
  8113. elements or blocks in a given compressed dimension.
  8114. plain_indices (array_like): Plain dimension (column or row)
  8115. co-ordinates of each element or block in values. (B+1)-dimensional
  8116. tensor with the same length as values.
  8117. values (array_list): Initial values for the tensor. Can be a list,
  8118. tuple, NumPy ``ndarray``, scalar, and other types. that
  8119. represents a (1+K)-dimensional (for CSR and CSC layouts) or
  8120. (1+2+K)-dimensional tensor (for BSR and BSC layouts) where
  8121. ``K`` is the number of dense dimensions.
  8122. size (list, tuple, :class:`torch.Size`, optional): Size of the
  8123. sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols *
  8124. blocksize[1], *densesize)`` where ``blocksize[0] ==
  8125. blocksize[1] == 1`` for CSR and CSC formats. If not provided,
  8126. the size will be inferred as the minimum size big enough to
  8127. hold all non-zero elements or blocks.
  8128. Keyword args:
  8129. dtype (:class:`torch.dtype`, optional): the desired data type of
  8130. returned tensor. Default: if None, infers data type from
  8131. :attr:`values`.
  8132. layout (:class:`torch.layout`, required): the desired layout of
  8133. returned tensor: :attr:`torch.sparse_csr`,
  8134. :attr:`torch.sparse_csc`, :attr:`torch.sparse_bsr`, or
  8135. :attr:`torch.sparse_bsc`.
  8136. device (:class:`torch.device`, optional): the desired device of
  8137. returned tensor. Default: if None, uses the current device
  8138. for the default tensor type (see
  8139. :func:`torch.set_default_device`). :attr:`device` will be
  8140. the CPU for CPU tensor types and the current CUDA device for
  8141. CUDA tensor types.
  8142. {requires_grad}
  8143. {check_invariants}
  8144. Example::
  8145. >>> compressed_indices = [0, 2, 4]
  8146. >>> plain_indices = [0, 1, 0, 1]
  8147. >>> values = [1, 2, 3, 4]
  8148. >>> torch.sparse_compressed_tensor(torch.tensor(compressed_indices, dtype=torch.int64),
  8149. ... torch.tensor(plain_indices, dtype=torch.int64),
  8150. ... torch.tensor(values), dtype=torch.double, layout=torch.sparse_csr)
  8151. tensor(crow_indices=tensor([0, 2, 4]),
  8152. col_indices=tensor([0, 1, 0, 1]),
  8153. values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4,
  8154. dtype=torch.float64, layout=torch.sparse_csr)
  8155. """.format(
  8156. **factory_common_args
  8157. ),
  8158. )
  8159. add_docstr(
  8160. torch.sparse_csr_tensor,
  8161. r"""sparse_csr_tensor(crow_indices, col_indices, values, size=None, """
  8162. r"""*, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
  8163. Constructs a :ref:`sparse tensor in CSR (Compressed Sparse Row) <sparse-csr-docs>` with specified
  8164. values at the given :attr:`crow_indices` and :attr:`col_indices`. Sparse matrix multiplication operations
  8165. in CSR format are typically faster than that for sparse tensors in COO format. Make you have a look
  8166. at :ref:`the note on the data type of the indices <sparse-csr-docs>`.
  8167. {sparse_factory_device_note}
  8168. Args:
  8169. crow_indices (array_like): (B+1)-dimensional array of size
  8170. ``(*batchsize, nrows + 1)``. The last element of each batch
  8171. is the number of non-zeros. This tensor encodes the index in
  8172. values and col_indices depending on where the given row
  8173. starts. Each successive number in the tensor subtracted by the
  8174. number before it denotes the number of elements in a given
  8175. row.
  8176. col_indices (array_like): Column co-ordinates of each element in
  8177. values. (B+1)-dimensional tensor with the same length
  8178. as values.
  8179. values (array_list): Initial values for the tensor. Can be a list,
  8180. tuple, NumPy ``ndarray``, scalar, and other types that
  8181. represents a (1+K)-dimensional tensor where ``K`` is the number
  8182. of dense dimensions.
  8183. size (list, tuple, :class:`torch.Size`, optional): Size of the
  8184. sparse tensor: ``(*batchsize, nrows, ncols, *densesize)``. If
  8185. not provided, the size will be inferred as the minimum size
  8186. big enough to hold all non-zero elements.
  8187. Keyword args:
  8188. dtype (:class:`torch.dtype`, optional): the desired data type of
  8189. returned tensor. Default: if None, infers data type from
  8190. :attr:`values`.
  8191. device (:class:`torch.device`, optional): the desired device of
  8192. returned tensor. Default: if None, uses the current device
  8193. for the default tensor type (see
  8194. :func:`torch.set_default_device`). :attr:`device` will be
  8195. the CPU for CPU tensor types and the current CUDA device for
  8196. CUDA tensor types.
  8197. {requires_grad}
  8198. {check_invariants}
  8199. Example::
  8200. >>> crow_indices = [0, 2, 4]
  8201. >>> col_indices = [0, 1, 0, 1]
  8202. >>> values = [1, 2, 3, 4]
  8203. >>> torch.sparse_csr_tensor(torch.tensor(crow_indices, dtype=torch.int64),
  8204. ... torch.tensor(col_indices, dtype=torch.int64),
  8205. ... torch.tensor(values), dtype=torch.double)
  8206. tensor(crow_indices=tensor([0, 2, 4]),
  8207. col_indices=tensor([0, 1, 0, 1]),
  8208. values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4,
  8209. dtype=torch.float64, layout=torch.sparse_csr)
  8210. """.format(
  8211. **factory_common_args
  8212. ),
  8213. )
  8214. add_docstr(
  8215. torch.sparse_csc_tensor,
  8216. r"""sparse_csc_tensor(ccol_indices, row_indices, values, size=None, """
  8217. r"""*, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
  8218. Constructs a :ref:`sparse tensor in CSC (Compressed Sparse Column)
  8219. <sparse-csc-docs>` with specified values at the given
  8220. :attr:`ccol_indices` and :attr:`row_indices`. Sparse matrix
  8221. multiplication operations in CSC format are typically faster than that
  8222. for sparse tensors in COO format. Make you have a look at :ref:`the
  8223. note on the data type of the indices <sparse-csc-docs>`.
  8224. {sparse_factory_device_note}
  8225. Args:
  8226. ccol_indices (array_like): (B+1)-dimensional array of size
  8227. ``(*batchsize, ncols + 1)``. The last element of each batch
  8228. is the number of non-zeros. This tensor encodes the index in
  8229. values and row_indices depending on where the given column
  8230. starts. Each successive number in the tensor subtracted by the
  8231. number before it denotes the number of elements in a given
  8232. column.
  8233. row_indices (array_like): Row co-ordinates of each element in
  8234. values. (B+1)-dimensional tensor with the same length as
  8235. values.
  8236. values (array_list): Initial values for the tensor. Can be a list,
  8237. tuple, NumPy ``ndarray``, scalar, and other types that
  8238. represents a (1+K)-dimensional tensor where ``K`` is the number
  8239. of dense dimensions.
  8240. size (list, tuple, :class:`torch.Size`, optional): Size of the
  8241. sparse tensor: ``(*batchsize, nrows, ncols, *densesize)``. If
  8242. not provided, the size will be inferred as the minimum size
  8243. big enough to hold all non-zero elements.
  8244. Keyword args:
  8245. dtype (:class:`torch.dtype`, optional): the desired data type of
  8246. returned tensor. Default: if None, infers data type from
  8247. :attr:`values`.
  8248. device (:class:`torch.device`, optional): the desired device of
  8249. returned tensor. Default: if None, uses the current device
  8250. for the default tensor type (see
  8251. :func:`torch.set_default_device`). :attr:`device` will be
  8252. the CPU for CPU tensor types and the current CUDA device for
  8253. CUDA tensor types.
  8254. {requires_grad}
  8255. {check_invariants}
  8256. Example::
  8257. >>> ccol_indices = [0, 2, 4]
  8258. >>> row_indices = [0, 1, 0, 1]
  8259. >>> values = [1, 2, 3, 4]
  8260. >>> torch.sparse_csc_tensor(torch.tensor(ccol_indices, dtype=torch.int64),
  8261. ... torch.tensor(row_indices, dtype=torch.int64),
  8262. ... torch.tensor(values), dtype=torch.double)
  8263. tensor(ccol_indices=tensor([0, 2, 4]),
  8264. row_indices=tensor([0, 1, 0, 1]),
  8265. values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4,
  8266. dtype=torch.float64, layout=torch.sparse_csc)
  8267. """.format(
  8268. **factory_common_args
  8269. ),
  8270. )
  8271. add_docstr(
  8272. torch.sparse_bsr_tensor,
  8273. r"""sparse_bsr_tensor(crow_indices, col_indices, values, size=None, """
  8274. r"""*, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
  8275. Constructs a :ref:`sparse tensor in BSR (Block Compressed Sparse Row))
  8276. <sparse-bsr-docs>` with specified 2-dimensional blocks at the given
  8277. :attr:`crow_indices` and :attr:`col_indices`. Sparse matrix
  8278. multiplication operations in BSR format are typically faster than that
  8279. for sparse tensors in COO format. Make you have a look at :ref:`the
  8280. note on the data type of the indices <sparse-bsr-docs>`.
  8281. {sparse_factory_device_note}
  8282. Args:
  8283. crow_indices (array_like): (B+1)-dimensional array of size
  8284. ``(*batchsize, nrowblocks + 1)``. The last element of each
  8285. batch is the number of non-zeros. This tensor encodes the
  8286. block index in values and col_indices depending on where the
  8287. given row block starts. Each successive number in the tensor
  8288. subtracted by the number before it denotes the number of
  8289. blocks in a given row.
  8290. col_indices (array_like): Column block co-ordinates of each block
  8291. in values. (B+1)-dimensional tensor with the same length as
  8292. values.
  8293. values (array_list): Initial values for the tensor. Can be a list,
  8294. tuple, NumPy ``ndarray``, scalar, and other types that
  8295. represents a (1 + 2 + K)-dimensional tensor where ``K`` is the
  8296. number of dense dimensions.
  8297. size (list, tuple, :class:`torch.Size`, optional): Size of the
  8298. sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols *
  8299. blocksize[1], *densesize)`` where ``blocksize ==
  8300. values.shape[1:3]``. If not provided, the size will be
  8301. inferred as the minimum size big enough to hold all non-zero
  8302. blocks.
  8303. Keyword args:
  8304. dtype (:class:`torch.dtype`, optional): the desired data type of
  8305. returned tensor. Default: if None, infers data type from
  8306. :attr:`values`.
  8307. device (:class:`torch.device`, optional): the desired device of
  8308. returned tensor. Default: if None, uses the current device
  8309. for the default tensor type (see
  8310. :func:`torch.set_default_device`). :attr:`device` will be
  8311. the CPU for CPU tensor types and the current CUDA device for
  8312. CUDA tensor types.
  8313. {requires_grad}
  8314. {check_invariants}
  8315. Example::
  8316. >>> crow_indices = [0, 1, 2]
  8317. >>> col_indices = [0, 1]
  8318. >>> values = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
  8319. >>> torch.sparse_bsr_tensor(torch.tensor(crow_indices, dtype=torch.int64),
  8320. ... torch.tensor(col_indices, dtype=torch.int64),
  8321. ... torch.tensor(values), dtype=torch.double)
  8322. tensor(crow_indices=tensor([0, 1, 2]),
  8323. col_indices=tensor([0, 1]),
  8324. values=tensor([[[1., 2.],
  8325. [3., 4.]],
  8326. [[5., 6.],
  8327. [7., 8.]]]), size=(2, 2), nnz=2, dtype=torch.float64,
  8328. layout=torch.sparse_bsr)
  8329. """.format(
  8330. **factory_common_args
  8331. ),
  8332. )
  8333. add_docstr(
  8334. torch.sparse_bsc_tensor,
  8335. r"""sparse_bsc_tensor(ccol_indices, row_indices, values, size=None, """
  8336. r"""*, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
  8337. Constructs a :ref:`sparse tensor in BSC (Block Compressed Sparse
  8338. Column)) <sparse-bsc-docs>` with specified 2-dimensional blocks at the
  8339. given :attr:`ccol_indices` and :attr:`row_indices`. Sparse matrix
  8340. multiplication operations in BSC format are typically faster than that
  8341. for sparse tensors in COO format. Make you have a look at :ref:`the
  8342. note on the data type of the indices <sparse-bsc-docs>`.
  8343. {sparse_factory_device_note}
  8344. Args:
  8345. ccol_indices (array_like): (B+1)-dimensional array of size
  8346. ``(*batchsize, ncolblocks + 1)``. The last element of each
  8347. batch is the number of non-zeros. This tensor encodes the
  8348. index in values and row_indices depending on where the given
  8349. column starts. Each successive number in the tensor subtracted
  8350. by the number before it denotes the number of elements in a
  8351. given column.
  8352. row_indices (array_like): Row block co-ordinates of each block in
  8353. values. (B+1)-dimensional tensor with the same length
  8354. as values.
  8355. values (array_list): Initial blocks for the tensor. Can be a list,
  8356. tuple, NumPy ``ndarray``, and other types that
  8357. represents a (1 + 2 + K)-dimensional tensor where ``K`` is the
  8358. number of dense dimensions.
  8359. size (list, tuple, :class:`torch.Size`, optional): Size of the
  8360. sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols *
  8361. blocksize[1], *densesize)`` If not provided, the size will be
  8362. inferred as the minimum size big enough to hold all non-zero
  8363. blocks.
  8364. Keyword args:
  8365. dtype (:class:`torch.dtype`, optional): the desired data type of
  8366. returned tensor. Default: if None, infers data type from
  8367. :attr:`values`.
  8368. device (:class:`torch.device`, optional): the desired device of
  8369. returned tensor. Default: if None, uses the current device
  8370. for the default tensor type (see
  8371. :func:`torch.set_default_device`). :attr:`device` will be
  8372. the CPU for CPU tensor types and the current CUDA device for
  8373. CUDA tensor types.
  8374. {requires_grad}
  8375. {check_invariants}
  8376. Example::
  8377. >>> ccol_indices = [0, 1, 2]
  8378. >>> row_indices = [0, 1]
  8379. >>> values = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
  8380. >>> torch.sparse_bsc_tensor(torch.tensor(ccol_indices, dtype=torch.int64),
  8381. ... torch.tensor(row_indices, dtype=torch.int64),
  8382. ... torch.tensor(values), dtype=torch.double)
  8383. tensor(ccol_indices=tensor([0, 1, 2]),
  8384. row_indices=tensor([0, 1]),
  8385. values=tensor([[[1., 2.],
  8386. [3., 4.]],
  8387. [[5., 6.],
  8388. [7., 8.]]]), size=(2, 2), nnz=2, dtype=torch.float64,
  8389. layout=torch.sparse_bsc)
  8390. """.format(
  8391. **factory_common_args
  8392. ),
  8393. )
  8394. add_docstr(
  8395. torch.sparse_coo_tensor,
  8396. r"""sparse_coo_tensor(indices, values, size=None, """
  8397. r"""*, dtype=None, device=None, requires_grad=False, check_invariants=None, is_coalesced=None) -> Tensor
  8398. Constructs a :ref:`sparse tensor in COO(rdinate) format
  8399. <sparse-coo-docs>` with specified values at the given
  8400. :attr:`indices`.
  8401. .. note::
  8402. This function returns an :ref:`uncoalesced tensor
  8403. <sparse-uncoalesced-coo-docs>` when :attr:`is_coalesced` is
  8404. unspecified or ``None``.
  8405. {sparse_factory_device_note}
  8406. Args:
  8407. indices (array_like): Initial data for the tensor. Can be a list, tuple,
  8408. NumPy ``ndarray``, scalar, and other types. Will be cast to a :class:`torch.LongTensor`
  8409. internally. The indices are the coordinates of the non-zero values in the matrix, and thus
  8410. should be two-dimensional where the first dimension is the number of tensor dimensions and
  8411. the second dimension is the number of non-zero values.
  8412. values (array_like): Initial values for the tensor. Can be a list, tuple,
  8413. NumPy ``ndarray``, scalar, and other types.
  8414. size (list, tuple, or :class:`torch.Size`, optional): Size of the sparse tensor. If not
  8415. provided the size will be inferred as the minimum size big enough to hold all non-zero
  8416. elements.
  8417. Keyword args:
  8418. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  8419. Default: if None, infers data type from :attr:`values`.
  8420. device (:class:`torch.device`, optional): the desired device of returned tensor.
  8421. Default: if None, uses the current device for the default tensor type
  8422. (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
  8423. for CPU tensor types and the current CUDA device for CUDA tensor types.
  8424. {requires_grad}
  8425. {check_invariants}
  8426. is_coalesced (bool, optional): When``True``, the caller is
  8427. responsible for providing tensor indices that correspond to a
  8428. coalesced tensor. If the :attr:`check_invariants` flag is
  8429. False, no error will be raised if the prerequisites are not
  8430. met and this will lead to silently incorrect results. To force
  8431. coalescion please use :meth:`coalesce` on the resulting
  8432. Tensor.
  8433. Default: None: except for trivial cases (e.g. nnz < 2) the
  8434. resulting Tensor has is_coalesced set to ``False```.
  8435. Example::
  8436. >>> i = torch.tensor([[0, 1, 1],
  8437. ... [2, 0, 2]])
  8438. >>> v = torch.tensor([3, 4, 5], dtype=torch.float32)
  8439. >>> torch.sparse_coo_tensor(i, v, [2, 4])
  8440. tensor(indices=tensor([[0, 1, 1],
  8441. [2, 0, 2]]),
  8442. values=tensor([3., 4., 5.]),
  8443. size=(2, 4), nnz=3, layout=torch.sparse_coo)
  8444. >>> torch.sparse_coo_tensor(i, v) # Shape inference
  8445. tensor(indices=tensor([[0, 1, 1],
  8446. [2, 0, 2]]),
  8447. values=tensor([3., 4., 5.]),
  8448. size=(2, 3), nnz=3, layout=torch.sparse_coo)
  8449. >>> torch.sparse_coo_tensor(i, v, [2, 4],
  8450. ... dtype=torch.float64,
  8451. ... device=torch.device('cuda:0'))
  8452. tensor(indices=tensor([[0, 1, 1],
  8453. [2, 0, 2]]),
  8454. values=tensor([3., 4., 5.]),
  8455. device='cuda:0', size=(2, 4), nnz=3, dtype=torch.float64,
  8456. layout=torch.sparse_coo)
  8457. # Create an empty sparse tensor with the following invariants:
  8458. # 1. sparse_dim + dense_dim = len(SparseTensor.shape)
  8459. # 2. SparseTensor._indices().shape = (sparse_dim, nnz)
  8460. # 3. SparseTensor._values().shape = (nnz, SparseTensor.shape[sparse_dim:])
  8461. #
  8462. # For instance, to create an empty sparse tensor with nnz = 0, dense_dim = 0 and
  8463. # sparse_dim = 1 (hence indices is a 2D tensor of shape = (1, 0))
  8464. >>> S = torch.sparse_coo_tensor(torch.empty([1, 0]), [], [1])
  8465. tensor(indices=tensor([], size=(1, 0)),
  8466. values=tensor([], size=(0,)),
  8467. size=(1,), nnz=0, layout=torch.sparse_coo)
  8468. # and to create an empty sparse tensor with nnz = 0, dense_dim = 1 and
  8469. # sparse_dim = 1
  8470. >>> S = torch.sparse_coo_tensor(torch.empty([1, 0]), torch.empty([0, 2]), [1, 2])
  8471. tensor(indices=tensor([], size=(1, 0)),
  8472. values=tensor([], size=(0, 2)),
  8473. size=(1, 2), nnz=0, layout=torch.sparse_coo)
  8474. .. _torch.sparse: https://pytorch.org/docs/stable/sparse.html
  8475. """.format(
  8476. **factory_common_args
  8477. ),
  8478. )
  8479. add_docstr(
  8480. torch.sqrt,
  8481. r"""
  8482. sqrt(input, *, out=None) -> Tensor
  8483. Returns a new tensor with the square-root of the elements of :attr:`input`.
  8484. .. math::
  8485. \text{out}_{i} = \sqrt{\text{input}_{i}}
  8486. """
  8487. + r"""
  8488. Args:
  8489. {input}
  8490. Keyword args:
  8491. {out}
  8492. Example::
  8493. >>> a = torch.randn(4)
  8494. >>> a
  8495. tensor([-2.0755, 1.0226, 0.0831, 0.4806])
  8496. >>> torch.sqrt(a)
  8497. tensor([ nan, 1.0112, 0.2883, 0.6933])
  8498. """.format(
  8499. **common_args
  8500. ),
  8501. )
  8502. add_docstr(
  8503. torch.square,
  8504. r"""
  8505. square(input, *, out=None) -> Tensor
  8506. Returns a new tensor with the square of the elements of :attr:`input`.
  8507. Args:
  8508. {input}
  8509. Keyword args:
  8510. {out}
  8511. Example::
  8512. >>> a = torch.randn(4)
  8513. >>> a
  8514. tensor([-2.0755, 1.0226, 0.0831, 0.4806])
  8515. >>> torch.square(a)
  8516. tensor([ 4.3077, 1.0457, 0.0069, 0.2310])
  8517. """.format(
  8518. **common_args
  8519. ),
  8520. )
  8521. add_docstr(
  8522. torch.squeeze,
  8523. r"""
  8524. squeeze(input, dim=None) -> Tensor
  8525. Returns a tensor with all specified dimensions of :attr:`input` of size `1` removed.
  8526. For example, if `input` is of shape:
  8527. :math:`(A \times 1 \times B \times C \times 1 \times D)` then the `input.squeeze()`
  8528. will be of shape: :math:`(A \times B \times C \times D)`.
  8529. When :attr:`dim` is given, a squeeze operation is done only in the given
  8530. dimension(s). If `input` is of shape: :math:`(A \times 1 \times B)`,
  8531. ``squeeze(input, 0)`` leaves the tensor unchanged, but ``squeeze(input, 1)``
  8532. will squeeze the tensor to the shape :math:`(A \times B)`.
  8533. .. note:: The returned tensor shares the storage with the input tensor,
  8534. so changing the contents of one will change the contents of the other.
  8535. .. warning:: If the tensor has a batch dimension of size 1, then `squeeze(input)`
  8536. will also remove the batch dimension, which can lead to unexpected
  8537. errors. Consider specifying only the dims you wish to be squeezed.
  8538. Args:
  8539. {input}
  8540. dim (int or tuple of ints, optional): if given, the input will be squeezed
  8541. only in the specified dimensions.
  8542. .. versionchanged:: 2.0
  8543. :attr:`dim` now accepts tuples of dimensions.
  8544. Example::
  8545. >>> x = torch.zeros(2, 1, 2, 1, 2)
  8546. >>> x.size()
  8547. torch.Size([2, 1, 2, 1, 2])
  8548. >>> y = torch.squeeze(x)
  8549. >>> y.size()
  8550. torch.Size([2, 2, 2])
  8551. >>> y = torch.squeeze(x, 0)
  8552. >>> y.size()
  8553. torch.Size([2, 1, 2, 1, 2])
  8554. >>> y = torch.squeeze(x, 1)
  8555. >>> y.size()
  8556. torch.Size([2, 2, 1, 2])
  8557. >>> y = torch.squeeze(x, (1, 2, 3))
  8558. torch.Size([2, 2, 2])
  8559. """.format(
  8560. **common_args
  8561. ),
  8562. )
  8563. add_docstr(
  8564. torch.std,
  8565. r"""
  8566. std(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
  8567. Calculates the standard deviation over the dimensions specified by :attr:`dim`.
  8568. :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
  8569. reduce over all dimensions.
  8570. The standard deviation (:math:`\sigma`) is calculated as
  8571. .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
  8572. where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
  8573. sample mean, :math:`N` is the number of samples and :math:`\delta N` is
  8574. the :attr:`correction`.
  8575. """
  8576. + r"""
  8577. {keepdim_details}
  8578. Args:
  8579. {input}
  8580. {dim}
  8581. Keyword args:
  8582. correction (int): difference between the sample size and sample degrees of freedom.
  8583. Defaults to `Bessel's correction`_, ``correction=1``.
  8584. .. versionchanged:: 2.0
  8585. Previously this argument was called ``unbiased`` and was a boolean
  8586. with ``True`` corresponding to ``correction=1`` and ``False`` being
  8587. ``correction=0``.
  8588. {keepdim}
  8589. {out}
  8590. Example:
  8591. >>> a = torch.tensor(
  8592. ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
  8593. ... [ 1.5027, -0.3270, 0.5905, 0.6538],
  8594. ... [-1.5745, 1.3330, -0.5596, -0.6548],
  8595. ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
  8596. >>> torch.std(a, dim=1, keepdim=True)
  8597. tensor([[1.0311],
  8598. [0.7477],
  8599. [1.2204],
  8600. [0.9087]])
  8601. .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
  8602. """.format(
  8603. **multi_dim_common
  8604. ),
  8605. )
  8606. add_docstr(
  8607. torch.std_mean,
  8608. r"""
  8609. std_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
  8610. Calculates the standard deviation and mean over the dimensions specified by
  8611. :attr:`dim`. :attr:`dim` can be a single dimension, list of dimensions, or
  8612. ``None`` to reduce over all dimensions.
  8613. The standard deviation (:math:`\sigma`) is calculated as
  8614. .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
  8615. where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
  8616. sample mean, :math:`N` is the number of samples and :math:`\delta N` is
  8617. the :attr:`correction`.
  8618. """
  8619. + r"""
  8620. {keepdim_details}
  8621. Args:
  8622. {input}
  8623. {opt_dim}
  8624. Keyword args:
  8625. correction (int): difference between the sample size and sample degrees of freedom.
  8626. Defaults to `Bessel's correction`_, ``correction=1``.
  8627. .. versionchanged:: 2.0
  8628. Previously this argument was called ``unbiased`` and was a boolean
  8629. with ``True`` corresponding to ``correction=1`` and ``False`` being
  8630. ``correction=0``.
  8631. {keepdim}
  8632. {out}
  8633. Returns:
  8634. A tuple (std, mean) containing the standard deviation and mean.
  8635. Example:
  8636. >>> a = torch.tensor(
  8637. ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
  8638. ... [ 1.5027, -0.3270, 0.5905, 0.6538],
  8639. ... [-1.5745, 1.3330, -0.5596, -0.6548],
  8640. ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
  8641. >>> torch.std_mean(a, dim=0, keepdim=True)
  8642. (tensor([[1.2620, 1.0028, 1.0957, 0.6038]]),
  8643. tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
  8644. .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
  8645. """.format(
  8646. **multi_dim_common
  8647. ),
  8648. )
  8649. add_docstr(
  8650. torch.sub,
  8651. r"""
  8652. sub(input, other, *, alpha=1, out=None) -> Tensor
  8653. Subtracts :attr:`other`, scaled by :attr:`alpha`, from :attr:`input`.
  8654. .. math::
  8655. \text{{out}}_i = \text{{input}}_i - \text{{alpha}} \times \text{{other}}_i
  8656. """
  8657. + r"""
  8658. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
  8659. :ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
  8660. Args:
  8661. {input}
  8662. other (Tensor or Number): the tensor or number to subtract from :attr:`input`.
  8663. Keyword args:
  8664. alpha (Number): the multiplier for :attr:`other`.
  8665. {out}
  8666. Example::
  8667. >>> a = torch.tensor((1, 2))
  8668. >>> b = torch.tensor((0, 1))
  8669. >>> torch.sub(a, b, alpha=2)
  8670. tensor([1, 0])
  8671. """.format(
  8672. **common_args
  8673. ),
  8674. )
  8675. add_docstr(
  8676. torch.subtract,
  8677. r"""
  8678. subtract(input, other, *, alpha=1, out=None) -> Tensor
  8679. Alias for :func:`torch.sub`.
  8680. """,
  8681. )
  8682. add_docstr(
  8683. torch.sum,
  8684. r"""
  8685. sum(input, *, dtype=None) -> Tensor
  8686. Returns the sum of all elements in the :attr:`input` tensor.
  8687. Args:
  8688. {input}
  8689. Keyword args:
  8690. {dtype}
  8691. Example::
  8692. >>> a = torch.randn(1, 3)
  8693. >>> a
  8694. tensor([[ 0.1133, -0.9567, 0.2958]])
  8695. >>> torch.sum(a)
  8696. tensor(-0.5475)
  8697. .. function:: sum(input, dim, keepdim=False, *, dtype=None) -> Tensor
  8698. :noindex:
  8699. Returns the sum of each row of the :attr:`input` tensor in the given
  8700. dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
  8701. reduce over all of them.
  8702. {keepdim_details}
  8703. Args:
  8704. {input}
  8705. {opt_dim}
  8706. {keepdim}
  8707. Keyword args:
  8708. {dtype}
  8709. Example::
  8710. >>> a = torch.randn(4, 4)
  8711. >>> a
  8712. tensor([[ 0.0569, -0.2475, 0.0737, -0.3429],
  8713. [-0.2993, 0.9138, 0.9337, -1.6864],
  8714. [ 0.1132, 0.7892, -0.1003, 0.5688],
  8715. [ 0.3637, -0.9906, -0.4752, -1.5197]])
  8716. >>> torch.sum(a, 1)
  8717. tensor([-0.4598, -0.1381, 1.3708, -2.6217])
  8718. >>> b = torch.arange(4 * 5 * 6).view(4, 5, 6)
  8719. >>> torch.sum(b, (2, 1))
  8720. tensor([ 435., 1335., 2235., 3135.])
  8721. """.format(
  8722. **multi_dim_common
  8723. ),
  8724. )
  8725. add_docstr(
  8726. torch.nansum,
  8727. r"""
  8728. nansum(input, *, dtype=None) -> Tensor
  8729. Returns the sum of all elements, treating Not a Numbers (NaNs) as zero.
  8730. Args:
  8731. {input}
  8732. Keyword args:
  8733. {dtype}
  8734. Example::
  8735. >>> a = torch.tensor([1., 2., float('nan'), 4.])
  8736. >>> torch.nansum(a)
  8737. tensor(7.)
  8738. .. function:: nansum(input, dim, keepdim=False, *, dtype=None) -> Tensor
  8739. :noindex:
  8740. Returns the sum of each row of the :attr:`input` tensor in the given
  8741. dimension :attr:`dim`, treating Not a Numbers (NaNs) as zero.
  8742. If :attr:`dim` is a list of dimensions, reduce over all of them.
  8743. {keepdim_details}
  8744. Args:
  8745. {input}
  8746. {opt_dim}
  8747. {keepdim}
  8748. Keyword args:
  8749. {dtype}
  8750. Example::
  8751. >>> torch.nansum(torch.tensor([1., float("nan")]))
  8752. 1.0
  8753. >>> a = torch.tensor([[1, 2], [3., float("nan")]])
  8754. >>> torch.nansum(a)
  8755. tensor(6.)
  8756. >>> torch.nansum(a, dim=0)
  8757. tensor([4., 2.])
  8758. >>> torch.nansum(a, dim=1)
  8759. tensor([3., 3.])
  8760. """.format(
  8761. **multi_dim_common
  8762. ),
  8763. )
  8764. add_docstr(
  8765. torch.svd,
  8766. r"""
  8767. svd(input, some=True, compute_uv=True, *, out=None) -> (Tensor, Tensor, Tensor)
  8768. Computes the singular value decomposition of either a matrix or batch of
  8769. matrices :attr:`input`. The singular value decomposition is represented as a
  8770. namedtuple `(U, S, V)`, such that :attr:`input` :math:`= U \text{diag}(S) V^{\text{H}}`.
  8771. where :math:`V^{\text{H}}` is the transpose of `V` for real inputs,
  8772. and the conjugate transpose of `V` for complex inputs.
  8773. If :attr:`input` is a batch of matrices, then `U`, `S`, and `V` are also
  8774. batched with the same batch dimensions as :attr:`input`.
  8775. If :attr:`some` is `True` (default), the method returns the reduced singular
  8776. value decomposition. In this case, if the last two dimensions of :attr:`input` are
  8777. `m` and `n`, then the returned `U` and `V` matrices will contain only
  8778. `min(n, m)` orthonormal columns.
  8779. If :attr:`compute_uv` is `False`, the returned `U` and `V` will be
  8780. zero-filled matrices of shape `(m, m)` and `(n, n)`
  8781. respectively, and the same device as :attr:`input`. The argument :attr:`some`
  8782. has no effect when :attr:`compute_uv` is `False`.
  8783. Supports :attr:`input` of float, double, cfloat and cdouble data types.
  8784. The dtypes of `U` and `V` are the same as :attr:`input`'s. `S` will
  8785. always be real-valued, even if :attr:`input` is complex.
  8786. .. warning::
  8787. :func:`torch.svd` is deprecated in favor of :func:`torch.linalg.svd`
  8788. and will be removed in a future PyTorch release.
  8789. ``U, S, V = torch.svd(A, some=some, compute_uv=True)`` (default) should be replaced with
  8790. .. code:: python
  8791. U, S, Vh = torch.linalg.svd(A, full_matrices=not some)
  8792. V = Vh.mH
  8793. ``_, S, _ = torch.svd(A, some=some, compute_uv=False)`` should be replaced with
  8794. .. code:: python
  8795. S = torch.linalg.svdvals(A)
  8796. .. note:: Differences with :func:`torch.linalg.svd`:
  8797. * :attr:`some` is the opposite of
  8798. :func:`torch.linalg.svd`'s :attr:`full_matrices`. Note that
  8799. default value for both is `True`, so the default behavior is
  8800. effectively the opposite.
  8801. * :func:`torch.svd` returns `V`, whereas :func:`torch.linalg.svd` returns
  8802. `Vh`, that is, :math:`V^{\text{H}}`.
  8803. * If :attr:`compute_uv` is `False`, :func:`torch.svd` returns zero-filled
  8804. tensors for `U` and `Vh`, whereas :func:`torch.linalg.svd` returns
  8805. empty tensors.
  8806. .. note:: The singular values are returned in descending order. If :attr:`input` is a batch of matrices,
  8807. then the singular values of each matrix in the batch are returned in descending order.
  8808. .. note:: The `S` tensor can only be used to compute gradients if :attr:`compute_uv` is `True`.
  8809. .. note:: When :attr:`some` is `False`, the gradients on `U[..., :, min(m, n):]`
  8810. and `V[..., :, min(m, n):]` will be ignored in the backward pass, as those vectors
  8811. can be arbitrary bases of the corresponding subspaces.
  8812. .. note:: The implementation of :func:`torch.linalg.svd` on CPU uses LAPACK's routine `?gesdd`
  8813. (a divide-and-conquer algorithm) instead of `?gesvd` for speed. Analogously,
  8814. on GPU, it uses cuSOLVER's routines `gesvdj` and `gesvdjBatched` on CUDA 10.1.243
  8815. and later, and MAGMA's routine `gesdd` on earlier versions of CUDA.
  8816. .. note:: The returned `U` will not be contiguous. The matrix (or batch of matrices) will
  8817. be represented as a column-major matrix (i.e. Fortran-contiguous).
  8818. .. warning:: The gradients with respect to `U` and `V` will only be finite when the input does not
  8819. have zero nor repeated singular values.
  8820. .. warning:: If the distance between any two singular values is close to zero, the gradients with respect to
  8821. `U` and `V` will be numerically unstable, as they depends on
  8822. :math:`\frac{1}{\min_{i \neq j} \sigma_i^2 - \sigma_j^2}`. The same happens when the matrix
  8823. has small singular values, as these gradients also depend on `S^{-1}`.
  8824. .. warning:: For complex-valued :attr:`input` the singular value decomposition is not unique,
  8825. as `U` and `V` may be multiplied by an arbitrary phase factor :math:`e^{i \phi}` on every column.
  8826. The same happens when :attr:`input` has repeated singular values, where one may multiply
  8827. the columns of the spanning subspace in `U` and `V` by a rotation matrix
  8828. and `the resulting vectors will span the same subspace`_.
  8829. Different platforms, like NumPy, or inputs on different device types,
  8830. may produce different `U` and `V` tensors.
  8831. Args:
  8832. input (Tensor): the input tensor of size `(*, m, n)` where `*` is zero or more
  8833. batch dimensions consisting of `(m, n)` matrices.
  8834. some (bool, optional): controls whether to compute the reduced or full decomposition, and
  8835. consequently, the shape of returned `U` and `V`. Default: `True`.
  8836. compute_uv (bool, optional): controls whether to compute `U` and `V`. Default: `True`.
  8837. Keyword args:
  8838. out (tuple, optional): the output tuple of tensors
  8839. Example::
  8840. >>> a = torch.randn(5, 3)
  8841. >>> a
  8842. tensor([[ 0.2364, -0.7752, 0.6372],
  8843. [ 1.7201, 0.7394, -0.0504],
  8844. [-0.3371, -1.0584, 0.5296],
  8845. [ 0.3550, -0.4022, 1.5569],
  8846. [ 0.2445, -0.0158, 1.1414]])
  8847. >>> u, s, v = torch.svd(a)
  8848. >>> u
  8849. tensor([[ 0.4027, 0.0287, 0.5434],
  8850. [-0.1946, 0.8833, 0.3679],
  8851. [ 0.4296, -0.2890, 0.5261],
  8852. [ 0.6604, 0.2717, -0.2618],
  8853. [ 0.4234, 0.2481, -0.4733]])
  8854. >>> s
  8855. tensor([2.3289, 2.0315, 0.7806])
  8856. >>> v
  8857. tensor([[-0.0199, 0.8766, 0.4809],
  8858. [-0.5080, 0.4054, -0.7600],
  8859. [ 0.8611, 0.2594, -0.4373]])
  8860. >>> torch.dist(a, torch.mm(torch.mm(u, torch.diag(s)), v.t()))
  8861. tensor(8.6531e-07)
  8862. >>> a_big = torch.randn(7, 5, 3)
  8863. >>> u, s, v = torch.svd(a_big)
  8864. >>> torch.dist(a_big, torch.matmul(torch.matmul(u, torch.diag_embed(s)), v.mT))
  8865. tensor(2.6503e-06)
  8866. .. _the resulting vectors will span the same subspace:
  8867. (https://en.wikipedia.org/wiki/Singular_value_decomposition#Singular_values,_singular_vectors,_and_their_relation_to_the_SVD)
  8868. """,
  8869. )
  8870. add_docstr(
  8871. torch.t,
  8872. r"""
  8873. t(input) -> Tensor
  8874. Expects :attr:`input` to be <= 2-D tensor and transposes dimensions 0
  8875. and 1.
  8876. 0-D and 1-D tensors are returned as is. When input is a 2-D tensor this
  8877. is equivalent to ``transpose(input, 0, 1)``.
  8878. Args:
  8879. {input}
  8880. Example::
  8881. >>> x = torch.randn(())
  8882. >>> x
  8883. tensor(0.1995)
  8884. >>> torch.t(x)
  8885. tensor(0.1995)
  8886. >>> x = torch.randn(3)
  8887. >>> x
  8888. tensor([ 2.4320, -0.4608, 0.7702])
  8889. >>> torch.t(x)
  8890. tensor([ 2.4320, -0.4608, 0.7702])
  8891. >>> x = torch.randn(2, 3)
  8892. >>> x
  8893. tensor([[ 0.4875, 0.9158, -0.5872],
  8894. [ 0.3938, -0.6929, 0.6932]])
  8895. >>> torch.t(x)
  8896. tensor([[ 0.4875, 0.3938],
  8897. [ 0.9158, -0.6929],
  8898. [-0.5872, 0.6932]])
  8899. See also :func:`torch.transpose`.
  8900. """.format(
  8901. **common_args
  8902. ),
  8903. )
  8904. add_docstr(
  8905. torch.flip,
  8906. r"""
  8907. flip(input, dims) -> Tensor
  8908. Reverse the order of an n-D tensor along given axis in dims.
  8909. .. note::
  8910. `torch.flip` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.flip`,
  8911. which returns a view in constant time. Since copying a tensor's data is more work than viewing that data,
  8912. `torch.flip` is expected to be slower than `np.flip`.
  8913. Args:
  8914. {input}
  8915. dims (a list or tuple): axis to flip on
  8916. Example::
  8917. >>> x = torch.arange(8).view(2, 2, 2)
  8918. >>> x
  8919. tensor([[[ 0, 1],
  8920. [ 2, 3]],
  8921. [[ 4, 5],
  8922. [ 6, 7]]])
  8923. >>> torch.flip(x, [0, 1])
  8924. tensor([[[ 6, 7],
  8925. [ 4, 5]],
  8926. [[ 2, 3],
  8927. [ 0, 1]]])
  8928. """.format(
  8929. **common_args
  8930. ),
  8931. )
  8932. add_docstr(
  8933. torch.fliplr,
  8934. r"""
  8935. fliplr(input) -> Tensor
  8936. Flip tensor in the left/right direction, returning a new tensor.
  8937. Flip the entries in each row in the left/right direction.
  8938. Columns are preserved, but appear in a different order than before.
  8939. Note:
  8940. Requires the tensor to be at least 2-D.
  8941. .. note::
  8942. `torch.fliplr` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.fliplr`,
  8943. which returns a view in constant time. Since copying a tensor's data is more work than viewing that data,
  8944. `torch.fliplr` is expected to be slower than `np.fliplr`.
  8945. Args:
  8946. input (Tensor): Must be at least 2-dimensional.
  8947. Example::
  8948. >>> x = torch.arange(4).view(2, 2)
  8949. >>> x
  8950. tensor([[0, 1],
  8951. [2, 3]])
  8952. >>> torch.fliplr(x)
  8953. tensor([[1, 0],
  8954. [3, 2]])
  8955. """.format(
  8956. **common_args
  8957. ),
  8958. )
  8959. add_docstr(
  8960. torch.flipud,
  8961. r"""
  8962. flipud(input) -> Tensor
  8963. Flip tensor in the up/down direction, returning a new tensor.
  8964. Flip the entries in each column in the up/down direction.
  8965. Rows are preserved, but appear in a different order than before.
  8966. Note:
  8967. Requires the tensor to be at least 1-D.
  8968. .. note::
  8969. `torch.flipud` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.flipud`,
  8970. which returns a view in constant time. Since copying a tensor's data is more work than viewing that data,
  8971. `torch.flipud` is expected to be slower than `np.flipud`.
  8972. Args:
  8973. input (Tensor): Must be at least 1-dimensional.
  8974. Example::
  8975. >>> x = torch.arange(4).view(2, 2)
  8976. >>> x
  8977. tensor([[0, 1],
  8978. [2, 3]])
  8979. >>> torch.flipud(x)
  8980. tensor([[2, 3],
  8981. [0, 1]])
  8982. """.format(
  8983. **common_args
  8984. ),
  8985. )
  8986. add_docstr(
  8987. torch.roll,
  8988. r"""
  8989. roll(input, shifts, dims=None) -> Tensor
  8990. Roll the tensor :attr:`input` along the given dimension(s). Elements that are
  8991. shifted beyond the last position are re-introduced at the first position. If
  8992. :attr:`dims` is `None`, the tensor will be flattened before rolling and then
  8993. restored to the original shape.
  8994. Args:
  8995. {input}
  8996. shifts (int or tuple of ints): The number of places by which the elements
  8997. of the tensor are shifted. If shifts is a tuple, dims must be a tuple of
  8998. the same size, and each dimension will be rolled by the corresponding
  8999. value
  9000. dims (int or tuple of ints): Axis along which to roll
  9001. Example::
  9002. >>> x = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8]).view(4, 2)
  9003. >>> x
  9004. tensor([[1, 2],
  9005. [3, 4],
  9006. [5, 6],
  9007. [7, 8]])
  9008. >>> torch.roll(x, 1)
  9009. tensor([[8, 1],
  9010. [2, 3],
  9011. [4, 5],
  9012. [6, 7]])
  9013. >>> torch.roll(x, 1, 0)
  9014. tensor([[7, 8],
  9015. [1, 2],
  9016. [3, 4],
  9017. [5, 6]])
  9018. >>> torch.roll(x, -1, 0)
  9019. tensor([[3, 4],
  9020. [5, 6],
  9021. [7, 8],
  9022. [1, 2]])
  9023. >>> torch.roll(x, shifts=(2, 1), dims=(0, 1))
  9024. tensor([[6, 5],
  9025. [8, 7],
  9026. [2, 1],
  9027. [4, 3]])
  9028. """.format(
  9029. **common_args
  9030. ),
  9031. )
  9032. add_docstr(
  9033. torch.rot90,
  9034. r"""
  9035. rot90(input, k=1, dims=[0,1]) -> Tensor
  9036. Rotate an n-D tensor by 90 degrees in the plane specified by dims axis.
  9037. Rotation direction is from the first towards the second axis if k > 0, and from the second towards the first for k < 0.
  9038. Args:
  9039. {input}
  9040. k (int): number of times to rotate. Default value is 1
  9041. dims (a list or tuple): axis to rotate. Default value is [0, 1]
  9042. Example::
  9043. >>> x = torch.arange(4).view(2, 2)
  9044. >>> x
  9045. tensor([[0, 1],
  9046. [2, 3]])
  9047. >>> torch.rot90(x, 1, [0, 1])
  9048. tensor([[1, 3],
  9049. [0, 2]])
  9050. >>> x = torch.arange(8).view(2, 2, 2)
  9051. >>> x
  9052. tensor([[[0, 1],
  9053. [2, 3]],
  9054. [[4, 5],
  9055. [6, 7]]])
  9056. >>> torch.rot90(x, 1, [1, 2])
  9057. tensor([[[1, 3],
  9058. [0, 2]],
  9059. [[5, 7],
  9060. [4, 6]]])
  9061. """.format(
  9062. **common_args
  9063. ),
  9064. )
  9065. add_docstr(
  9066. torch.take,
  9067. r"""
  9068. take(input, index) -> Tensor
  9069. Returns a new tensor with the elements of :attr:`input` at the given indices.
  9070. The input tensor is treated as if it were viewed as a 1-D tensor. The result
  9071. takes the same shape as the indices.
  9072. Args:
  9073. {input}
  9074. index (LongTensor): the indices into tensor
  9075. Example::
  9076. >>> src = torch.tensor([[4, 3, 5],
  9077. ... [6, 7, 8]])
  9078. >>> torch.take(src, torch.tensor([0, 2, 5]))
  9079. tensor([ 4, 5, 8])
  9080. """.format(
  9081. **common_args
  9082. ),
  9083. )
  9084. add_docstr(
  9085. torch.take_along_dim,
  9086. r"""
  9087. take_along_dim(input, indices, dim=None, *, out=None) -> Tensor
  9088. Selects values from :attr:`input` at the 1-dimensional indices from :attr:`indices` along the given :attr:`dim`.
  9089. If :attr:`dim` is None, the input array is treated as if it has been flattened to 1d.
  9090. Functions that return indices along a dimension, like :func:`torch.argmax` and :func:`torch.argsort`,
  9091. are designed to work with this function. See the examples below.
  9092. .. note::
  9093. This function is similar to NumPy's `take_along_axis`.
  9094. See also :func:`torch.gather`.
  9095. Args:
  9096. {input}
  9097. indices (tensor): the indices into :attr:`input`. Must have long dtype.
  9098. dim (int, optional): dimension to select along.
  9099. Keyword args:
  9100. {out}
  9101. Example::
  9102. >>> t = torch.tensor([[10, 30, 20], [60, 40, 50]])
  9103. >>> max_idx = torch.argmax(t)
  9104. >>> torch.take_along_dim(t, max_idx)
  9105. tensor([60])
  9106. >>> sorted_idx = torch.argsort(t, dim=1)
  9107. >>> torch.take_along_dim(t, sorted_idx, dim=1)
  9108. tensor([[10, 20, 30],
  9109. [40, 50, 60]])
  9110. """.format(
  9111. **common_args
  9112. ),
  9113. )
  9114. add_docstr(
  9115. torch.tan,
  9116. r"""
  9117. tan(input, *, out=None) -> Tensor
  9118. Returns a new tensor with the tangent of the elements of :attr:`input`.
  9119. .. math::
  9120. \text{out}_{i} = \tan(\text{input}_{i})
  9121. """
  9122. + r"""
  9123. Args:
  9124. {input}
  9125. Keyword args:
  9126. {out}
  9127. Example::
  9128. >>> a = torch.randn(4)
  9129. >>> a
  9130. tensor([-1.2027, -1.7687, 0.4412, -1.3856])
  9131. >>> torch.tan(a)
  9132. tensor([-2.5930, 4.9859, 0.4722, -5.3366])
  9133. """.format(
  9134. **common_args
  9135. ),
  9136. )
  9137. add_docstr(
  9138. torch.tanh,
  9139. r"""
  9140. tanh(input, *, out=None) -> Tensor
  9141. Returns a new tensor with the hyperbolic tangent of the elements
  9142. of :attr:`input`.
  9143. .. math::
  9144. \text{out}_{i} = \tanh(\text{input}_{i})
  9145. """
  9146. + r"""
  9147. Args:
  9148. {input}
  9149. Keyword args:
  9150. {out}
  9151. Example::
  9152. >>> a = torch.randn(4)
  9153. >>> a
  9154. tensor([ 0.8986, -0.7279, 1.1745, 0.2611])
  9155. >>> torch.tanh(a)
  9156. tensor([ 0.7156, -0.6218, 0.8257, 0.2553])
  9157. """.format(
  9158. **common_args
  9159. ),
  9160. )
  9161. add_docstr(
  9162. # torch.softmax doc str. Point this to torch.nn.functional.softmax
  9163. torch.softmax,
  9164. r"""
  9165. softmax(input, dim, *, dtype=None) -> Tensor
  9166. Alias for :func:`torch.nn.functional.softmax`.
  9167. """,
  9168. )
  9169. add_docstr(
  9170. torch.topk,
  9171. r"""
  9172. topk(input, k, dim=None, largest=True, sorted=True, *, out=None) -> (Tensor, LongTensor)
  9173. Returns the :attr:`k` largest elements of the given :attr:`input` tensor along
  9174. a given dimension.
  9175. If :attr:`dim` is not given, the last dimension of the `input` is chosen.
  9176. If :attr:`largest` is ``False`` then the `k` smallest elements are returned.
  9177. A namedtuple of `(values, indices)` is returned with the `values` and
  9178. `indices` of the largest `k` elements of each row of the `input` tensor in the
  9179. given dimension `dim`.
  9180. The boolean option :attr:`sorted` if ``True``, will make sure that the returned
  9181. `k` elements are themselves sorted
  9182. Args:
  9183. {input}
  9184. k (int): the k in "top-k"
  9185. dim (int, optional): the dimension to sort along
  9186. largest (bool, optional): controls whether to return largest or
  9187. smallest elements
  9188. sorted (bool, optional): controls whether to return the elements
  9189. in sorted order
  9190. Keyword args:
  9191. out (tuple, optional): the output tuple of (Tensor, LongTensor) that can be
  9192. optionally given to be used as output buffers
  9193. Example::
  9194. >>> x = torch.arange(1., 6.)
  9195. >>> x
  9196. tensor([ 1., 2., 3., 4., 5.])
  9197. >>> torch.topk(x, 3)
  9198. torch.return_types.topk(values=tensor([5., 4., 3.]), indices=tensor([4, 3, 2]))
  9199. """.format(
  9200. **common_args
  9201. ),
  9202. )
  9203. add_docstr(
  9204. torch.trace,
  9205. r"""
  9206. trace(input) -> Tensor
  9207. Returns the sum of the elements of the diagonal of the input 2-D matrix.
  9208. Example::
  9209. >>> x = torch.arange(1., 10.).view(3, 3)
  9210. >>> x
  9211. tensor([[ 1., 2., 3.],
  9212. [ 4., 5., 6.],
  9213. [ 7., 8., 9.]])
  9214. >>> torch.trace(x)
  9215. tensor(15.)
  9216. """,
  9217. )
  9218. add_docstr(
  9219. torch.transpose,
  9220. r"""
  9221. transpose(input, dim0, dim1) -> Tensor
  9222. Returns a tensor that is a transposed version of :attr:`input`.
  9223. The given dimensions :attr:`dim0` and :attr:`dim1` are swapped.
  9224. If :attr:`input` is a strided tensor then the resulting :attr:`out`
  9225. tensor shares its underlying storage with the :attr:`input` tensor, so
  9226. changing the content of one would change the content of the other.
  9227. If :attr:`input` is a :ref:`sparse tensor <sparse-docs>` then the
  9228. resulting :attr:`out` tensor *does not* share the underlying storage
  9229. with the :attr:`input` tensor.
  9230. If :attr:`input` is a :ref:`sparse tensor <sparse-docs>` with compressed
  9231. layout (SparseCSR, SparseBSR, SparseCSC or SparseBSC) the arguments
  9232. :attr:`dim0` and :attr:`dim1` must be both batch dimensions, or must
  9233. both be sparse dimensions. The batch dimensions of a sparse tensor are the
  9234. dimensions preceding the sparse dimensions.
  9235. .. note::
  9236. Transpositions which interchange the sparse dimensions of a `SparseCSR`
  9237. or `SparseCSC` layout tensor will result in the layout changing between
  9238. the two options. Transposition of the sparse dimensions of a ` SparseBSR`
  9239. or `SparseBSC` layout tensor will likewise generate a result with the
  9240. opposite layout.
  9241. Args:
  9242. {input}
  9243. dim0 (int): the first dimension to be transposed
  9244. dim1 (int): the second dimension to be transposed
  9245. Example::
  9246. >>> x = torch.randn(2, 3)
  9247. >>> x
  9248. tensor([[ 1.0028, -0.9893, 0.5809],
  9249. [-0.1669, 0.7299, 0.4942]])
  9250. >>> torch.transpose(x, 0, 1)
  9251. tensor([[ 1.0028, -0.1669],
  9252. [-0.9893, 0.7299],
  9253. [ 0.5809, 0.4942]])
  9254. See also :func:`torch.t`.
  9255. """.format(
  9256. **common_args
  9257. ),
  9258. )
  9259. add_docstr(
  9260. torch.triangular_solve,
  9261. r"""
  9262. triangular_solve(b, A, upper=True, transpose=False, unitriangular=False, *, out=None) -> (Tensor, Tensor)
  9263. Solves a system of equations with a square upper or lower triangular invertible matrix :math:`A`
  9264. and multiple right-hand sides :math:`b`.
  9265. In symbols, it solves :math:`AX = b` and assumes :math:`A` is square upper-triangular
  9266. (or lower-triangular if :attr:`upper`\ `= False`) and does not have zeros on the diagonal.
  9267. `torch.triangular_solve(b, A)` can take in 2D inputs `b, A` or inputs that are
  9268. batches of 2D matrices. If the inputs are batches, then returns
  9269. batched outputs `X`
  9270. If the diagonal of :attr:`A` contains zeros or elements that are very close to zero and
  9271. :attr:`unitriangular`\ `= False` (default) or if the input matrix is badly conditioned,
  9272. the result may contain `NaN` s.
  9273. Supports input of float, double, cfloat and cdouble data types.
  9274. .. warning::
  9275. :func:`torch.triangular_solve` is deprecated in favor of :func:`torch.linalg.solve_triangular`
  9276. and will be removed in a future PyTorch release.
  9277. :func:`torch.linalg.solve_triangular` has its arguments reversed and does not return a
  9278. copy of one of the inputs.
  9279. ``X = torch.triangular_solve(B, A).solution`` should be replaced with
  9280. .. code:: python
  9281. X = torch.linalg.solve_triangular(A, B)
  9282. Args:
  9283. b (Tensor): multiple right-hand sides of size :math:`(*, m, k)` where
  9284. :math:`*` is zero of more batch dimensions
  9285. A (Tensor): the input triangular coefficient matrix of size :math:`(*, m, m)`
  9286. where :math:`*` is zero or more batch dimensions
  9287. upper (bool, optional): whether :math:`A` is upper or lower triangular. Default: ``True``.
  9288. transpose (bool, optional): solves `op(A)X = b` where `op(A) = A^T` if this flag is ``True``,
  9289. and `op(A) = A` if it is ``False``. Default: ``False``.
  9290. unitriangular (bool, optional): whether :math:`A` is unit triangular.
  9291. If True, the diagonal elements of :math:`A` are assumed to be
  9292. 1 and not referenced from :math:`A`. Default: ``False``.
  9293. Keyword args:
  9294. out ((Tensor, Tensor), optional): tuple of two tensors to write
  9295. the output to. Ignored if `None`. Default: `None`.
  9296. Returns:
  9297. A namedtuple `(solution, cloned_coefficient)` where `cloned_coefficient`
  9298. is a clone of :math:`A` and `solution` is the solution :math:`X` to :math:`AX = b`
  9299. (or whatever variant of the system of equations, depending on the keyword arguments.)
  9300. Examples::
  9301. >>> A = torch.randn(2, 2).triu()
  9302. >>> A
  9303. tensor([[ 1.1527, -1.0753],
  9304. [ 0.0000, 0.7986]])
  9305. >>> b = torch.randn(2, 3)
  9306. >>> b
  9307. tensor([[-0.0210, 2.3513, -1.5492],
  9308. [ 1.5429, 0.7403, -1.0243]])
  9309. >>> torch.triangular_solve(b, A)
  9310. torch.return_types.triangular_solve(
  9311. solution=tensor([[ 1.7841, 2.9046, -2.5405],
  9312. [ 1.9320, 0.9270, -1.2826]]),
  9313. cloned_coefficient=tensor([[ 1.1527, -1.0753],
  9314. [ 0.0000, 0.7986]]))
  9315. """,
  9316. )
  9317. add_docstr(
  9318. torch.tril,
  9319. r"""
  9320. tril(input, diagonal=0, *, out=None) -> Tensor
  9321. Returns the lower triangular part of the matrix (2-D tensor) or batch of matrices
  9322. :attr:`input`, the other elements of the result tensor :attr:`out` are set to 0.
  9323. The lower triangular part of the matrix is defined as the elements on and
  9324. below the diagonal.
  9325. The argument :attr:`diagonal` controls which diagonal to consider. If
  9326. :attr:`diagonal` = 0, all elements on and below the main diagonal are
  9327. retained. A positive value includes just as many diagonals above the main
  9328. diagonal, and similarly a negative value excludes just as many diagonals below
  9329. the main diagonal. The main diagonal are the set of indices
  9330. :math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where
  9331. :math:`d_{1}, d_{2}` are the dimensions of the matrix.
  9332. """
  9333. + r"""
  9334. Args:
  9335. {input}
  9336. diagonal (int, optional): the diagonal to consider
  9337. Keyword args:
  9338. {out}
  9339. Example::
  9340. >>> a = torch.randn(3, 3)
  9341. >>> a
  9342. tensor([[-1.0813, -0.8619, 0.7105],
  9343. [ 0.0935, 0.1380, 2.2112],
  9344. [-0.3409, -0.9828, 0.0289]])
  9345. >>> torch.tril(a)
  9346. tensor([[-1.0813, 0.0000, 0.0000],
  9347. [ 0.0935, 0.1380, 0.0000],
  9348. [-0.3409, -0.9828, 0.0289]])
  9349. >>> b = torch.randn(4, 6)
  9350. >>> b
  9351. tensor([[ 1.2219, 0.5653, -0.2521, -0.2345, 1.2544, 0.3461],
  9352. [ 0.4785, -0.4477, 0.6049, 0.6368, 0.8775, 0.7145],
  9353. [ 1.1502, 3.2716, -1.1243, -0.5413, 0.3615, 0.6864],
  9354. [-0.0614, -0.7344, -1.3164, -0.7648, -1.4024, 0.0978]])
  9355. >>> torch.tril(b, diagonal=1)
  9356. tensor([[ 1.2219, 0.5653, 0.0000, 0.0000, 0.0000, 0.0000],
  9357. [ 0.4785, -0.4477, 0.6049, 0.0000, 0.0000, 0.0000],
  9358. [ 1.1502, 3.2716, -1.1243, -0.5413, 0.0000, 0.0000],
  9359. [-0.0614, -0.7344, -1.3164, -0.7648, -1.4024, 0.0000]])
  9360. >>> torch.tril(b, diagonal=-1)
  9361. tensor([[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
  9362. [ 0.4785, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
  9363. [ 1.1502, 3.2716, 0.0000, 0.0000, 0.0000, 0.0000],
  9364. [-0.0614, -0.7344, -1.3164, 0.0000, 0.0000, 0.0000]])
  9365. """.format(
  9366. **common_args
  9367. ),
  9368. )
  9369. # docstr is split in two parts to avoid format mis-captureing :math: braces '{}'
  9370. # as common args.
  9371. add_docstr(
  9372. torch.tril_indices,
  9373. r"""
  9374. tril_indices(row, col, offset=0, *, dtype=torch.long, device='cpu', layout=torch.strided) -> Tensor
  9375. Returns the indices of the lower triangular part of a :attr:`row`-by-
  9376. :attr:`col` matrix in a 2-by-N Tensor, where the first row contains row
  9377. coordinates of all indices and the second row contains column coordinates.
  9378. Indices are ordered based on rows and then columns.
  9379. The lower triangular part of the matrix is defined as the elements on and
  9380. below the diagonal.
  9381. The argument :attr:`offset` controls which diagonal to consider. If
  9382. :attr:`offset` = 0, all elements on and below the main diagonal are
  9383. retained. A positive value includes just as many diagonals above the main
  9384. diagonal, and similarly a negative value excludes just as many diagonals below
  9385. the main diagonal. The main diagonal are the set of indices
  9386. :math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]`
  9387. where :math:`d_{1}, d_{2}` are the dimensions of the matrix.
  9388. .. note::
  9389. When running on CUDA, ``row * col`` must be less than :math:`2^{59}` to
  9390. prevent overflow during calculation.
  9391. """
  9392. + r"""
  9393. Args:
  9394. row (``int``): number of rows in the 2-D matrix.
  9395. col (``int``): number of columns in the 2-D matrix.
  9396. offset (``int``): diagonal offset from the main diagonal.
  9397. Default: if not provided, 0.
  9398. Keyword args:
  9399. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  9400. Default: if ``None``, ``torch.long``.
  9401. {device}
  9402. layout (:class:`torch.layout`, optional): currently only support ``torch.strided``.
  9403. Example::
  9404. >>> a = torch.tril_indices(3, 3)
  9405. >>> a
  9406. tensor([[0, 1, 1, 2, 2, 2],
  9407. [0, 0, 1, 0, 1, 2]])
  9408. >>> a = torch.tril_indices(4, 3, -1)
  9409. >>> a
  9410. tensor([[1, 2, 2, 3, 3, 3],
  9411. [0, 0, 1, 0, 1, 2]])
  9412. >>> a = torch.tril_indices(4, 3, 1)
  9413. >>> a
  9414. tensor([[0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3],
  9415. [0, 1, 0, 1, 2, 0, 1, 2, 0, 1, 2]])
  9416. """.format(
  9417. **factory_common_args
  9418. ),
  9419. )
  9420. add_docstr(
  9421. torch.triu,
  9422. r"""
  9423. triu(input, diagonal=0, *, out=None) -> Tensor
  9424. Returns the upper triangular part of a matrix (2-D tensor) or batch of matrices
  9425. :attr:`input`, the other elements of the result tensor :attr:`out` are set to 0.
  9426. The upper triangular part of the matrix is defined as the elements on and
  9427. above the diagonal.
  9428. The argument :attr:`diagonal` controls which diagonal to consider. If
  9429. :attr:`diagonal` = 0, all elements on and above the main diagonal are
  9430. retained. A positive value excludes just as many diagonals above the main
  9431. diagonal, and similarly a negative value includes just as many diagonals below
  9432. the main diagonal. The main diagonal are the set of indices
  9433. :math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where
  9434. :math:`d_{1}, d_{2}` are the dimensions of the matrix.
  9435. """
  9436. + r"""
  9437. Args:
  9438. {input}
  9439. diagonal (int, optional): the diagonal to consider
  9440. Keyword args:
  9441. {out}
  9442. Example::
  9443. >>> a = torch.randn(3, 3)
  9444. >>> a
  9445. tensor([[ 0.2309, 0.5207, 2.0049],
  9446. [ 0.2072, -1.0680, 0.6602],
  9447. [ 0.3480, -0.5211, -0.4573]])
  9448. >>> torch.triu(a)
  9449. tensor([[ 0.2309, 0.5207, 2.0049],
  9450. [ 0.0000, -1.0680, 0.6602],
  9451. [ 0.0000, 0.0000, -0.4573]])
  9452. >>> torch.triu(a, diagonal=1)
  9453. tensor([[ 0.0000, 0.5207, 2.0049],
  9454. [ 0.0000, 0.0000, 0.6602],
  9455. [ 0.0000, 0.0000, 0.0000]])
  9456. >>> torch.triu(a, diagonal=-1)
  9457. tensor([[ 0.2309, 0.5207, 2.0049],
  9458. [ 0.2072, -1.0680, 0.6602],
  9459. [ 0.0000, -0.5211, -0.4573]])
  9460. >>> b = torch.randn(4, 6)
  9461. >>> b
  9462. tensor([[ 0.5876, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235],
  9463. [-0.2447, 0.9556, -1.2919, 1.3378, -0.1768, -1.0857],
  9464. [ 0.4333, 0.3146, 0.6576, -1.0432, 0.9348, -0.4410],
  9465. [-0.9888, 1.0679, -1.3337, -1.6556, 0.4798, 0.2830]])
  9466. >>> torch.triu(b, diagonal=1)
  9467. tensor([[ 0.0000, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235],
  9468. [ 0.0000, 0.0000, -1.2919, 1.3378, -0.1768, -1.0857],
  9469. [ 0.0000, 0.0000, 0.0000, -1.0432, 0.9348, -0.4410],
  9470. [ 0.0000, 0.0000, 0.0000, 0.0000, 0.4798, 0.2830]])
  9471. >>> torch.triu(b, diagonal=-1)
  9472. tensor([[ 0.5876, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235],
  9473. [-0.2447, 0.9556, -1.2919, 1.3378, -0.1768, -1.0857],
  9474. [ 0.0000, 0.3146, 0.6576, -1.0432, 0.9348, -0.4410],
  9475. [ 0.0000, 0.0000, -1.3337, -1.6556, 0.4798, 0.2830]])
  9476. """.format(
  9477. **common_args
  9478. ),
  9479. )
  9480. # docstr is split in two parts to avoid format mis-capturing :math: braces '{}'
  9481. # as common args.
  9482. add_docstr(
  9483. torch.triu_indices,
  9484. r"""
  9485. triu_indices(row, col, offset=0, *, dtype=torch.long, device='cpu', layout=torch.strided) -> Tensor
  9486. Returns the indices of the upper triangular part of a :attr:`row` by
  9487. :attr:`col` matrix in a 2-by-N Tensor, where the first row contains row
  9488. coordinates of all indices and the second row contains column coordinates.
  9489. Indices are ordered based on rows and then columns.
  9490. The upper triangular part of the matrix is defined as the elements on and
  9491. above the diagonal.
  9492. The argument :attr:`offset` controls which diagonal to consider. If
  9493. :attr:`offset` = 0, all elements on and above the main diagonal are
  9494. retained. A positive value excludes just as many diagonals above the main
  9495. diagonal, and similarly a negative value includes just as many diagonals below
  9496. the main diagonal. The main diagonal are the set of indices
  9497. :math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]`
  9498. where :math:`d_{1}, d_{2}` are the dimensions of the matrix.
  9499. .. note::
  9500. When running on CUDA, ``row * col`` must be less than :math:`2^{59}` to
  9501. prevent overflow during calculation.
  9502. """
  9503. + r"""
  9504. Args:
  9505. row (``int``): number of rows in the 2-D matrix.
  9506. col (``int``): number of columns in the 2-D matrix.
  9507. offset (``int``): diagonal offset from the main diagonal.
  9508. Default: if not provided, 0.
  9509. Keyword args:
  9510. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
  9511. Default: if ``None``, ``torch.long``.
  9512. {device}
  9513. layout (:class:`torch.layout`, optional): currently only support ``torch.strided``.
  9514. Example::
  9515. >>> a = torch.triu_indices(3, 3)
  9516. >>> a
  9517. tensor([[0, 0, 0, 1, 1, 2],
  9518. [0, 1, 2, 1, 2, 2]])
  9519. >>> a = torch.triu_indices(4, 3, -1)
  9520. >>> a
  9521. tensor([[0, 0, 0, 1, 1, 1, 2, 2, 3],
  9522. [0, 1, 2, 0, 1, 2, 1, 2, 2]])
  9523. >>> a = torch.triu_indices(4, 3, 1)
  9524. >>> a
  9525. tensor([[0, 0, 1],
  9526. [1, 2, 2]])
  9527. """.format(
  9528. **factory_common_args
  9529. ),
  9530. )
  9531. add_docstr(
  9532. torch.true_divide,
  9533. r"""
  9534. true_divide(dividend, divisor, *, out) -> Tensor
  9535. Alias for :func:`torch.div` with ``rounding_mode=None``.
  9536. """,
  9537. )
  9538. add_docstr(
  9539. torch.trunc,
  9540. r"""
  9541. trunc(input, *, out=None) -> Tensor
  9542. Returns a new tensor with the truncated integer values of
  9543. the elements of :attr:`input`.
  9544. For integer inputs, follows the array-api convention of returning a
  9545. copy of the input tensor.
  9546. Args:
  9547. {input}
  9548. Keyword args:
  9549. {out}
  9550. Example::
  9551. >>> a = torch.randn(4)
  9552. >>> a
  9553. tensor([ 3.4742, 0.5466, -0.8008, -0.9079])
  9554. >>> torch.trunc(a)
  9555. tensor([ 3., 0., -0., -0.])
  9556. """.format(
  9557. **common_args
  9558. ),
  9559. )
  9560. add_docstr(
  9561. torch.fake_quantize_per_tensor_affine,
  9562. r"""
  9563. fake_quantize_per_tensor_affine(input, scale, zero_point, quant_min, quant_max) -> Tensor
  9564. Returns a new tensor with the data in :attr:`input` fake quantized using :attr:`scale`,
  9565. :attr:`zero_point`, :attr:`quant_min` and :attr:`quant_max`.
  9566. .. math::
  9567. \text{output} = (
  9568. min(
  9569. \text{quant\_max},
  9570. max(
  9571. \text{quant\_min},
  9572. \text{std::nearby\_int}(\text{input} / \text{scale}) + \text{zero\_point}
  9573. )
  9574. ) - \text{zero\_point}
  9575. ) \times \text{scale}
  9576. Args:
  9577. input (Tensor): the input value(s), ``torch.float32`` tensor
  9578. scale (double scalar or ``float32`` Tensor): quantization scale
  9579. zero_point (int64 scalar or ``int32`` Tensor): quantization zero_point
  9580. quant_min (int64): lower bound of the quantized domain
  9581. quant_max (int64): upper bound of the quantized domain
  9582. Returns:
  9583. Tensor: A newly fake_quantized ``torch.float32`` tensor
  9584. Example::
  9585. >>> x = torch.randn(4)
  9586. >>> x
  9587. tensor([ 0.0552, 0.9730, 0.3973, -1.0780])
  9588. >>> torch.fake_quantize_per_tensor_affine(x, 0.1, 0, 0, 255)
  9589. tensor([0.1000, 1.0000, 0.4000, 0.0000])
  9590. >>> torch.fake_quantize_per_tensor_affine(x, torch.tensor(0.1), torch.tensor(0), 0, 255)
  9591. tensor([0.1000, 1.0000, 0.4000, 0.0000])
  9592. """,
  9593. )
  9594. add_docstr(
  9595. torch.fake_quantize_per_channel_affine,
  9596. r"""
  9597. fake_quantize_per_channel_affine(input, scale, zero_point, axis, quant_min, quant_max) -> Tensor
  9598. Returns a new tensor with the data in :attr:`input` fake quantized per channel using :attr:`scale`,
  9599. :attr:`zero_point`, :attr:`quant_min` and :attr:`quant_max`, across the channel specified by :attr:`axis`.
  9600. .. math::
  9601. \text{output} = (
  9602. min(
  9603. \text{quant\_max},
  9604. max(
  9605. \text{quant\_min},
  9606. \text{std::nearby\_int}(\text{input} / \text{scale}) + \text{zero\_point}
  9607. )
  9608. ) - \text{zero\_point}
  9609. ) \times \text{scale}
  9610. Args:
  9611. input (Tensor): the input value(s), in ``torch.float32``
  9612. scale (Tensor): quantization scale, per channel in ``torch.float32``
  9613. zero_point (Tensor): quantization zero_point, per channel in ``torch.int32`` or ``torch.half`` or ``torch.float32``
  9614. axis (int32): channel axis
  9615. quant_min (int64): lower bound of the quantized domain
  9616. quant_max (int64): upper bound of the quantized domain
  9617. Returns:
  9618. Tensor: A newly fake_quantized per channel ``torch.float32`` tensor
  9619. Example::
  9620. >>> x = torch.randn(2, 2, 2)
  9621. >>> x
  9622. tensor([[[-0.2525, -0.0466],
  9623. [ 0.3491, -0.2168]],
  9624. [[-0.5906, 1.6258],
  9625. [ 0.6444, -0.0542]]])
  9626. >>> scales = (torch.randn(2) + 1) * 0.05
  9627. >>> scales
  9628. tensor([0.0475, 0.0486])
  9629. >>> zero_points = torch.zeros(2).to(torch.int32)
  9630. >>> zero_points
  9631. tensor([0, 0])
  9632. >>> torch.fake_quantize_per_channel_affine(x, scales, zero_points, 1, 0, 255)
  9633. tensor([[[0.0000, 0.0000],
  9634. [0.3405, 0.0000]],
  9635. [[0.0000, 1.6134],
  9636. [0.6323, 0.0000]]])
  9637. """,
  9638. )
  9639. add_docstr(
  9640. torch.fix,
  9641. r"""
  9642. fix(input, *, out=None) -> Tensor
  9643. Alias for :func:`torch.trunc`
  9644. """,
  9645. )
  9646. add_docstr(
  9647. torch.unsqueeze,
  9648. r"""
  9649. unsqueeze(input, dim) -> Tensor
  9650. Returns a new tensor with a dimension of size one inserted at the
  9651. specified position.
  9652. The returned tensor shares the same underlying data with this tensor.
  9653. A :attr:`dim` value within the range ``[-input.dim() - 1, input.dim() + 1)``
  9654. can be used. Negative :attr:`dim` will correspond to :meth:`unsqueeze`
  9655. applied at :attr:`dim` = ``dim + input.dim() + 1``.
  9656. Args:
  9657. {input}
  9658. dim (int): the index at which to insert the singleton dimension
  9659. Example::
  9660. >>> x = torch.tensor([1, 2, 3, 4])
  9661. >>> torch.unsqueeze(x, 0)
  9662. tensor([[ 1, 2, 3, 4]])
  9663. >>> torch.unsqueeze(x, 1)
  9664. tensor([[ 1],
  9665. [ 2],
  9666. [ 3],
  9667. [ 4]])
  9668. """.format(
  9669. **common_args
  9670. ),
  9671. )
  9672. add_docstr(
  9673. torch.var,
  9674. r"""
  9675. var(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
  9676. Calculates the variance over the dimensions specified by :attr:`dim`. :attr:`dim`
  9677. can be a single dimension, list of dimensions, or ``None`` to reduce over all
  9678. dimensions.
  9679. The variance (:math:`\sigma^2`) is calculated as
  9680. .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
  9681. where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
  9682. sample mean, :math:`N` is the number of samples and :math:`\delta N` is
  9683. the :attr:`correction`.
  9684. """
  9685. + r"""
  9686. {keepdim_details}
  9687. Args:
  9688. {input}
  9689. {opt_dim}
  9690. Keyword args:
  9691. correction (int): difference between the sample size and sample degrees of freedom.
  9692. Defaults to `Bessel's correction`_, ``correction=1``.
  9693. .. versionchanged:: 2.0
  9694. Previously this argument was called ``unbiased`` and was a boolean
  9695. with ``True`` corresponding to ``correction=1`` and ``False`` being
  9696. ``correction=0``.
  9697. {keepdim}
  9698. {out}
  9699. Example:
  9700. >>> a = torch.tensor(
  9701. ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
  9702. ... [ 1.5027, -0.3270, 0.5905, 0.6538],
  9703. ... [-1.5745, 1.3330, -0.5596, -0.6548],
  9704. ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
  9705. >>> torch.var(a, dim=1, keepdim=True)
  9706. tensor([[1.0631],
  9707. [0.5590],
  9708. [1.4893],
  9709. [0.8258]])
  9710. .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
  9711. """.format(
  9712. **multi_dim_common
  9713. ),
  9714. )
  9715. add_docstr(
  9716. torch.var_mean,
  9717. r"""
  9718. var_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
  9719. Calculates the variance and mean over the dimensions specified by :attr:`dim`.
  9720. :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
  9721. reduce over all dimensions.
  9722. The variance (:math:`\sigma^2`) is calculated as
  9723. .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
  9724. where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
  9725. sample mean, :math:`N` is the number of samples and :math:`\delta N` is
  9726. the :attr:`correction`.
  9727. """
  9728. + r"""
  9729. {keepdim_details}
  9730. Args:
  9731. {input}
  9732. {opt_dim}
  9733. Keyword args:
  9734. correction (int): difference between the sample size and sample degrees of freedom.
  9735. Defaults to `Bessel's correction`_, ``correction=1``.
  9736. .. versionchanged:: 2.0
  9737. Previously this argument was called ``unbiased`` and was a boolean
  9738. with ``True`` corresponding to ``correction=1`` and ``False`` being
  9739. ``correction=0``.
  9740. {keepdim}
  9741. {out}
  9742. Returns:
  9743. A tuple (var, mean) containing the variance and mean.
  9744. Example:
  9745. >>> a = torch.tensor(
  9746. ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
  9747. ... [ 1.5027, -0.3270, 0.5905, 0.6538],
  9748. ... [-1.5745, 1.3330, -0.5596, -0.6548],
  9749. ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
  9750. >>> torch.var_mean(a, dim=0, keepdim=True)
  9751. (tensor([[1.5926, 1.0056, 1.2005, 0.3646]]),
  9752. tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
  9753. .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
  9754. """.format(
  9755. **multi_dim_common
  9756. ),
  9757. )
  9758. add_docstr(
  9759. torch.zeros,
  9760. r"""
  9761. zeros(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  9762. Returns a tensor filled with the scalar value `0`, with the shape defined
  9763. by the variable argument :attr:`size`.
  9764. Args:
  9765. size (int...): a sequence of integers defining the shape of the output tensor.
  9766. Can be a variable number of arguments or a collection like a list or tuple.
  9767. Keyword args:
  9768. {out}
  9769. {dtype}
  9770. {layout}
  9771. {device}
  9772. {requires_grad}
  9773. Example::
  9774. >>> torch.zeros(2, 3)
  9775. tensor([[ 0., 0., 0.],
  9776. [ 0., 0., 0.]])
  9777. >>> torch.zeros(5)
  9778. tensor([ 0., 0., 0., 0., 0.])
  9779. """.format(
  9780. **factory_common_args
  9781. ),
  9782. )
  9783. add_docstr(
  9784. torch.zeros_like,
  9785. r"""
  9786. zeros_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
  9787. Returns a tensor filled with the scalar value `0`, with the same size as
  9788. :attr:`input`. ``torch.zeros_like(input)`` is equivalent to
  9789. ``torch.zeros(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
  9790. .. warning::
  9791. As of 0.4, this function does not support an :attr:`out` keyword. As an alternative,
  9792. the old ``torch.zeros_like(input, out=output)`` is equivalent to
  9793. ``torch.zeros(input.size(), out=output)``.
  9794. Args:
  9795. {input}
  9796. Keyword args:
  9797. {dtype}
  9798. {layout}
  9799. {device}
  9800. {requires_grad}
  9801. {memory_format}
  9802. Example::
  9803. >>> input = torch.empty(2, 3)
  9804. >>> torch.zeros_like(input)
  9805. tensor([[ 0., 0., 0.],
  9806. [ 0., 0., 0.]])
  9807. """.format(
  9808. **factory_like_common_args
  9809. ),
  9810. )
  9811. add_docstr(
  9812. torch.empty,
  9813. """
  9814. empty(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False, \
  9815. memory_format=torch.contiguous_format) -> Tensor
  9816. Returns a tensor filled with uninitialized data. The shape of the tensor is
  9817. defined by the variable argument :attr:`size`.
  9818. .. note::
  9819. If :func:`torch.use_deterministic_algorithms()` and
  9820. :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
  9821. ``True``, the output tensor is initialized to prevent any possible
  9822. nondeterministic behavior from using the data as an input to an operation.
  9823. Floating point and complex tensors are filled with NaN, and integer tensors
  9824. are filled with the maximum value.
  9825. Args:
  9826. size (int...): a sequence of integers defining the shape of the output tensor.
  9827. Can be a variable number of arguments or a collection like a list or tuple.
  9828. Keyword args:
  9829. {out}
  9830. {dtype}
  9831. {layout}
  9832. {device}
  9833. {requires_grad}
  9834. {pin_memory}
  9835. {memory_format}
  9836. Example::
  9837. >>> torch.empty((2,3), dtype=torch.int64)
  9838. tensor([[ 9.4064e+13, 2.8000e+01, 9.3493e+13],
  9839. [ 7.5751e+18, 7.1428e+18, 7.5955e+18]])
  9840. """.format(
  9841. **factory_common_args
  9842. ),
  9843. )
  9844. add_docstr(
  9845. torch.empty_like,
  9846. r"""
  9847. empty_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
  9848. Returns an uninitialized tensor with the same size as :attr:`input`.
  9849. ``torch.empty_like(input)`` is equivalent to
  9850. ``torch.empty(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
  9851. .. note::
  9852. If :func:`torch.use_deterministic_algorithms()` and
  9853. :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
  9854. ``True``, the output tensor is initialized to prevent any possible
  9855. nondeterministic behavior from using the data as an input to an operation.
  9856. Floating point and complex tensors are filled with NaN, and integer tensors
  9857. are filled with the maximum value.
  9858. Args:
  9859. {input}
  9860. Keyword args:
  9861. {dtype}
  9862. {layout}
  9863. {device}
  9864. {requires_grad}
  9865. {memory_format}
  9866. Example::
  9867. >>> a=torch.empty((2,3), dtype=torch.int32, device = 'cuda')
  9868. >>> torch.empty_like(a)
  9869. tensor([[0, 0, 0],
  9870. [0, 0, 0]], device='cuda:0', dtype=torch.int32)
  9871. """.format(
  9872. **factory_like_common_args
  9873. ),
  9874. )
  9875. add_docstr(
  9876. torch.empty_strided,
  9877. r"""
  9878. empty_strided(size, stride, *, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) -> Tensor
  9879. Creates a tensor with the specified :attr:`size` and :attr:`stride` and filled with undefined data.
  9880. .. warning::
  9881. If the constructed tensor is "overlapped" (with multiple indices referring to the same element
  9882. in memory) its behavior is undefined.
  9883. .. note::
  9884. If :func:`torch.use_deterministic_algorithms()` and
  9885. :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
  9886. ``True``, the output tensor is initialized to prevent any possible
  9887. nondeterministic behavior from using the data as an input to an operation.
  9888. Floating point and complex tensors are filled with NaN, and integer tensors
  9889. are filled with the maximum value.
  9890. Args:
  9891. size (tuple of int): the shape of the output tensor
  9892. stride (tuple of int): the strides of the output tensor
  9893. Keyword args:
  9894. {dtype}
  9895. {layout}
  9896. {device}
  9897. {requires_grad}
  9898. {pin_memory}
  9899. Example::
  9900. >>> a = torch.empty_strided((2, 3), (1, 2))
  9901. >>> a
  9902. tensor([[8.9683e-44, 4.4842e-44, 5.1239e+07],
  9903. [0.0000e+00, 0.0000e+00, 3.0705e-41]])
  9904. >>> a.stride()
  9905. (1, 2)
  9906. >>> a.size()
  9907. torch.Size([2, 3])
  9908. """.format(
  9909. **factory_common_args
  9910. ),
  9911. )
  9912. add_docstr(
  9913. torch.empty_permuted,
  9914. r"""
  9915. empty_permuted(size, physical_layout, *, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) -> Tensor
  9916. Creates an uninitialized, non-overlapping and dense tensor with the
  9917. specified :attr:`size`, with :attr:`physical_layout` specifying how the
  9918. dimensions are physically laid out in memory (each logical dimension is listed
  9919. from outermost to innermost). :attr:`physical_layout` is a generalization
  9920. of NCHW/NHWC notation: if each dimension is assigned a number according to
  9921. what order they occur in size (N=0, C=1, H=2, W=3), then NCHW is ``(0, 1, 2, 3)``
  9922. while NHWC is ``(0, 2, 3, 1)``. Equivalently, the strides of the output
  9923. tensor ``t`` are such that ``t.stride(physical_layout[i]) == contiguous_strides[i]``
  9924. (notably, this function is *not* equivalent to ``torch.empty(size).permute(physical_layout)``).
  9925. Unlike :func:`torch.empty_strided`, this is guaranteed to produce a dense
  9926. tensor with no overlaps. If possible, prefer using this function over
  9927. :func:`torch.empty_strided` or manual use of :func:`torch.as_strided`.
  9928. .. note::
  9929. If :func:`torch.use_deterministic_algorithms()` and
  9930. :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
  9931. ``True``, the output tensor is initialized to prevent any possible
  9932. nondeterministic behavior from using the data as an input to an operation.
  9933. Floating point and complex tensors are filled with NaN, and integer tensors
  9934. are filled with the maximum value.
  9935. Args:
  9936. size (tuple of int): the shape of the output tensor
  9937. physical_layout (tuple of int): the ordering of dimensions physically in memory
  9938. Keyword args:
  9939. {dtype}
  9940. {layout}
  9941. {device}
  9942. {requires_grad}
  9943. {pin_memory}
  9944. Examples:
  9945. >>> torch.empty((2, 3, 5, 7)).stride()
  9946. (105, 35, 7, 1)
  9947. >>> torch.empty_permuted((2, 3, 5, 7), (0, 1, 2, 3)).stride()
  9948. (105, 35, 7, 1)
  9949. >>> torch.empty((2, 3, 5, 7), memory_format=torch.channels_last).stride()
  9950. (105, 1, 21, 3)
  9951. >>> torch.empty_permuted((2, 3, 5, 7), (0, 2, 3, 1)).stride()
  9952. (105, 1, 21, 3)
  9953. >>> torch.empty_permuted((2, 3, 5, 7), (0, 2, 3, 1)).dim_order()
  9954. (0, 2, 3, 1)
  9955. """.format(
  9956. **factory_common_args
  9957. ),
  9958. )
  9959. add_docstr(
  9960. torch.full,
  9961. r"""
  9962. full(size, fill_value, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
  9963. Creates a tensor of size :attr:`size` filled with :attr:`fill_value`. The
  9964. tensor's dtype is inferred from :attr:`fill_value`.
  9965. Args:
  9966. size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
  9967. shape of the output tensor.
  9968. fill_value (Scalar): the value to fill the output tensor with.
  9969. Keyword args:
  9970. {out}
  9971. {dtype}
  9972. {layout}
  9973. {device}
  9974. {requires_grad}
  9975. Example::
  9976. >>> torch.full((2, 3), 3.141592)
  9977. tensor([[ 3.1416, 3.1416, 3.1416],
  9978. [ 3.1416, 3.1416, 3.1416]])
  9979. """.format(
  9980. **factory_common_args
  9981. ),
  9982. )
  9983. add_docstr(
  9984. torch.full_like,
  9985. """
  9986. full_like(input, fill_value, \\*, dtype=None, layout=torch.strided, device=None, requires_grad=False, \
  9987. memory_format=torch.preserve_format) -> Tensor
  9988. Returns a tensor with the same size as :attr:`input` filled with :attr:`fill_value`.
  9989. ``torch.full_like(input, fill_value)`` is equivalent to
  9990. ``torch.full(input.size(), fill_value, dtype=input.dtype, layout=input.layout, device=input.device)``.
  9991. Args:
  9992. {input}
  9993. fill_value: the number to fill the output tensor with.
  9994. Keyword args:
  9995. {dtype}
  9996. {layout}
  9997. {device}
  9998. {requires_grad}
  9999. {memory_format}
  10000. """.format(
  10001. **factory_like_common_args
  10002. ),
  10003. )
  10004. add_docstr(
  10005. torch.det,
  10006. r"""
  10007. det(input) -> Tensor
  10008. Alias for :func:`torch.linalg.det`
  10009. """,
  10010. )
  10011. add_docstr(
  10012. torch.where,
  10013. r"""
  10014. where(condition, input, other, *, out=None) -> Tensor
  10015. Return a tensor of elements selected from either :attr:`input` or :attr:`other`, depending on :attr:`condition`.
  10016. The operation is defined as:
  10017. .. math::
  10018. \text{out}_i = \begin{cases}
  10019. \text{input}_i & \text{if } \text{condition}_i \\
  10020. \text{other}_i & \text{otherwise} \\
  10021. \end{cases}
  10022. """
  10023. + r"""
  10024. .. note::
  10025. The tensors :attr:`condition`, :attr:`input`, :attr:`other` must be :ref:`broadcastable <broadcasting-semantics>`.
  10026. Arguments:
  10027. condition (BoolTensor): When True (nonzero), yield input, otherwise yield other
  10028. input (Tensor or Scalar): value (if :attr:`input` is a scalar) or values selected at indices
  10029. where :attr:`condition` is ``True``
  10030. other (Tensor or Scalar): value (if :attr:`other` is a scalar) or values selected at indices
  10031. where :attr:`condition` is ``False``
  10032. Keyword args:
  10033. {out}
  10034. Returns:
  10035. Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`input`, :attr:`other`
  10036. Example::
  10037. >>> x = torch.randn(3, 2)
  10038. >>> y = torch.ones(3, 2)
  10039. >>> x
  10040. tensor([[-0.4620, 0.3139],
  10041. [ 0.3898, -0.7197],
  10042. [ 0.0478, -0.1657]])
  10043. >>> torch.where(x > 0, 1.0, 0.0)
  10044. tensor([[0., 1.],
  10045. [1., 0.],
  10046. [1., 0.]])
  10047. >>> torch.where(x > 0, x, y)
  10048. tensor([[ 1.0000, 0.3139],
  10049. [ 0.3898, 1.0000],
  10050. [ 0.0478, 1.0000]])
  10051. >>> x = torch.randn(2, 2, dtype=torch.double)
  10052. >>> x
  10053. tensor([[ 1.0779, 0.0383],
  10054. [-0.8785, -1.1089]], dtype=torch.float64)
  10055. >>> torch.where(x > 0, x, 0.)
  10056. tensor([[1.0779, 0.0383],
  10057. [0.0000, 0.0000]], dtype=torch.float64)
  10058. .. function:: where(condition) -> tuple of LongTensor
  10059. :noindex:
  10060. ``torch.where(condition)`` is identical to
  10061. ``torch.nonzero(condition, as_tuple=True)``.
  10062. .. note::
  10063. See also :func:`torch.nonzero`.
  10064. """.format(
  10065. **common_args
  10066. ),
  10067. )
  10068. add_docstr(
  10069. torch.logdet,
  10070. r"""
  10071. logdet(input) -> Tensor
  10072. Calculates log determinant of a square matrix or batches of square matrices.
  10073. It returns ``-inf`` if the input has a determinant of zero, and ``NaN`` if it has
  10074. a negative determinant.
  10075. .. note::
  10076. Backward through :meth:`logdet` internally uses SVD results when :attr:`input`
  10077. is not invertible. In this case, double backward through :meth:`logdet` will
  10078. be unstable in when :attr:`input` doesn't have distinct singular values. See
  10079. :func:`torch.linalg.svd` for details.
  10080. .. seealso::
  10081. :func:`torch.linalg.slogdet` computes the sign (resp. angle) and natural logarithm of the
  10082. absolute value of the determinant of real-valued (resp. complex) square matrices.
  10083. Arguments:
  10084. input (Tensor): the input tensor of size ``(*, n, n)`` where ``*`` is zero or more
  10085. batch dimensions.
  10086. Example::
  10087. >>> A = torch.randn(3, 3)
  10088. >>> torch.det(A)
  10089. tensor(0.2611)
  10090. >>> torch.logdet(A)
  10091. tensor(-1.3430)
  10092. >>> A
  10093. tensor([[[ 0.9254, -0.6213],
  10094. [-0.5787, 1.6843]],
  10095. [[ 0.3242, -0.9665],
  10096. [ 0.4539, -0.0887]],
  10097. [[ 1.1336, -0.4025],
  10098. [-0.7089, 0.9032]]])
  10099. >>> A.det()
  10100. tensor([1.1990, 0.4099, 0.7386])
  10101. >>> A.det().log()
  10102. tensor([ 0.1815, -0.8917, -0.3031])
  10103. """,
  10104. )
  10105. add_docstr(
  10106. torch.slogdet,
  10107. r"""
  10108. slogdet(input) -> (Tensor, Tensor)
  10109. Alias for :func:`torch.linalg.slogdet`
  10110. """,
  10111. )
  10112. add_docstr(
  10113. torch.pinverse,
  10114. r"""
  10115. pinverse(input, rcond=1e-15) -> Tensor
  10116. Alias for :func:`torch.linalg.pinv`
  10117. """,
  10118. )
  10119. add_docstr(
  10120. torch.hann_window,
  10121. """
  10122. hann_window(window_length, periodic=True, *, dtype=None, \
  10123. layout=torch.strided, device=None, requires_grad=False) -> Tensor
  10124. """
  10125. + r"""
  10126. Hann window function.
  10127. .. math::
  10128. w[n] = \frac{1}{2}\ \left[1 - \cos \left( \frac{2 \pi n}{N - 1} \right)\right] =
  10129. \sin^2 \left( \frac{\pi n}{N - 1} \right),
  10130. where :math:`N` is the full window size.
  10131. The input :attr:`window_length` is a positive integer controlling the
  10132. returned window size. :attr:`periodic` flag determines whether the returned
  10133. window trims off the last duplicate value from the symmetric window and is
  10134. ready to be used as a periodic window with functions like
  10135. :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
  10136. above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
  10137. ``torch.hann_window(L, periodic=True)`` equal to
  10138. ``torch.hann_window(L + 1, periodic=False)[:-1])``.
  10139. .. note::
  10140. If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
  10141. """
  10142. + r"""
  10143. Arguments:
  10144. window_length (int): the size of returned window
  10145. periodic (bool, optional): If True, returns a window to be used as periodic
  10146. function. If False, return a symmetric window.
  10147. Keyword args:
  10148. {dtype} Only floating point types are supported.
  10149. layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
  10150. ``torch.strided`` (dense layout) is supported.
  10151. {device}
  10152. {requires_grad}
  10153. Returns:
  10154. Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window
  10155. """.format(
  10156. **factory_common_args
  10157. ),
  10158. )
  10159. add_docstr(
  10160. torch.hamming_window,
  10161. """
  10162. hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype=None, \
  10163. layout=torch.strided, device=None, requires_grad=False) -> Tensor
  10164. """
  10165. + r"""
  10166. Hamming window function.
  10167. .. math::
  10168. w[n] = \alpha - \beta\ \cos \left( \frac{2 \pi n}{N - 1} \right),
  10169. where :math:`N` is the full window size.
  10170. The input :attr:`window_length` is a positive integer controlling the
  10171. returned window size. :attr:`periodic` flag determines whether the returned
  10172. window trims off the last duplicate value from the symmetric window and is
  10173. ready to be used as a periodic window with functions like
  10174. :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
  10175. above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
  10176. ``torch.hamming_window(L, periodic=True)`` equal to
  10177. ``torch.hamming_window(L + 1, periodic=False)[:-1])``.
  10178. .. note::
  10179. If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
  10180. .. note::
  10181. This is a generalized version of :meth:`torch.hann_window`.
  10182. """
  10183. + r"""
  10184. Arguments:
  10185. window_length (int): the size of returned window
  10186. periodic (bool, optional): If True, returns a window to be used as periodic
  10187. function. If False, return a symmetric window.
  10188. alpha (float, optional): The coefficient :math:`\alpha` in the equation above
  10189. beta (float, optional): The coefficient :math:`\beta` in the equation above
  10190. Keyword args:
  10191. {dtype} Only floating point types are supported.
  10192. layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
  10193. ``torch.strided`` (dense layout) is supported.
  10194. {device}
  10195. {requires_grad}
  10196. Returns:
  10197. Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window.
  10198. """.format(
  10199. **factory_common_args
  10200. ),
  10201. )
  10202. add_docstr(
  10203. torch.bartlett_window,
  10204. """
  10205. bartlett_window(window_length, periodic=True, *, dtype=None, \
  10206. layout=torch.strided, device=None, requires_grad=False) -> Tensor
  10207. """
  10208. + r"""
  10209. Bartlett window function.
  10210. .. math::
  10211. w[n] = 1 - \left| \frac{2n}{N-1} - 1 \right| = \begin{cases}
  10212. \frac{2n}{N - 1} & \text{if } 0 \leq n \leq \frac{N - 1}{2} \\
  10213. 2 - \frac{2n}{N - 1} & \text{if } \frac{N - 1}{2} < n < N \\
  10214. \end{cases},
  10215. where :math:`N` is the full window size.
  10216. The input :attr:`window_length` is a positive integer controlling the
  10217. returned window size. :attr:`periodic` flag determines whether the returned
  10218. window trims off the last duplicate value from the symmetric window and is
  10219. ready to be used as a periodic window with functions like
  10220. :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
  10221. above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
  10222. ``torch.bartlett_window(L, periodic=True)`` equal to
  10223. ``torch.bartlett_window(L + 1, periodic=False)[:-1])``.
  10224. .. note::
  10225. If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
  10226. """
  10227. + r"""
  10228. Arguments:
  10229. window_length (int): the size of returned window
  10230. periodic (bool, optional): If True, returns a window to be used as periodic
  10231. function. If False, return a symmetric window.
  10232. Keyword args:
  10233. {dtype} Only floating point types are supported.
  10234. layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
  10235. ``torch.strided`` (dense layout) is supported.
  10236. {device}
  10237. {requires_grad}
  10238. Returns:
  10239. Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window
  10240. """.format(
  10241. **factory_common_args
  10242. ),
  10243. )
  10244. add_docstr(
  10245. torch.blackman_window,
  10246. """
  10247. blackman_window(window_length, periodic=True, *, dtype=None, \
  10248. layout=torch.strided, device=None, requires_grad=False) -> Tensor
  10249. """
  10250. + r"""
  10251. Blackman window function.
  10252. .. math::
  10253. w[n] = 0.42 - 0.5 \cos \left( \frac{2 \pi n}{N - 1} \right) + 0.08 \cos \left( \frac{4 \pi n}{N - 1} \right)
  10254. where :math:`N` is the full window size.
  10255. The input :attr:`window_length` is a positive integer controlling the
  10256. returned window size. :attr:`periodic` flag determines whether the returned
  10257. window trims off the last duplicate value from the symmetric window and is
  10258. ready to be used as a periodic window with functions like
  10259. :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
  10260. above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
  10261. ``torch.blackman_window(L, periodic=True)`` equal to
  10262. ``torch.blackman_window(L + 1, periodic=False)[:-1])``.
  10263. .. note::
  10264. If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
  10265. """
  10266. + r"""
  10267. Arguments:
  10268. window_length (int): the size of returned window
  10269. periodic (bool, optional): If True, returns a window to be used as periodic
  10270. function. If False, return a symmetric window.
  10271. Keyword args:
  10272. {dtype} Only floating point types are supported.
  10273. layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
  10274. ``torch.strided`` (dense layout) is supported.
  10275. {device}
  10276. {requires_grad}
  10277. Returns:
  10278. Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window
  10279. """.format(
  10280. **factory_common_args
  10281. ),
  10282. )
  10283. add_docstr(
  10284. torch.kaiser_window,
  10285. """
  10286. kaiser_window(window_length, periodic=True, beta=12.0, *, dtype=None, \
  10287. layout=torch.strided, device=None, requires_grad=False) -> Tensor
  10288. """
  10289. + r"""
  10290. Computes the Kaiser window with window length :attr:`window_length` and shape parameter :attr:`beta`.
  10291. Let I_0 be the zeroth order modified Bessel function of the first kind (see :func:`torch.i0`) and
  10292. ``N = L - 1`` if :attr:`periodic` is False and ``L`` if :attr:`periodic` is True,
  10293. where ``L`` is the :attr:`window_length`. This function computes:
  10294. .. math::
  10295. out_i = I_0 \left( \beta \sqrt{1 - \left( {\frac{i - N/2}{N/2}} \right) ^2 } \right) / I_0( \beta )
  10296. Calling ``torch.kaiser_window(L, B, periodic=True)`` is equivalent to calling
  10297. ``torch.kaiser_window(L + 1, B, periodic=False)[:-1])``.
  10298. The :attr:`periodic` argument is intended as a helpful shorthand
  10299. to produce a periodic window as input to functions like :func:`torch.stft`.
  10300. .. note::
  10301. If :attr:`window_length` is one, then the returned window is a single element tensor containing a one.
  10302. """
  10303. + r"""
  10304. Args:
  10305. window_length (int): length of the window.
  10306. periodic (bool, optional): If True, returns a periodic window suitable for use in spectral analysis.
  10307. If False, returns a symmetric window suitable for use in filter design.
  10308. beta (float, optional): shape parameter for the window.
  10309. Keyword args:
  10310. {dtype}
  10311. layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
  10312. ``torch.strided`` (dense layout) is supported.
  10313. {device}
  10314. {requires_grad}
  10315. """.format(
  10316. **factory_common_args
  10317. ),
  10318. )
  10319. add_docstr(
  10320. torch.vander,
  10321. """
  10322. vander(x, N=None, increasing=False) -> Tensor
  10323. """
  10324. + r"""
  10325. Generates a Vandermonde matrix.
  10326. The columns of the output matrix are elementwise powers of the input vector :math:`x^{{(N-1)}}, x^{{(N-2)}}, ..., x^0`.
  10327. If increasing is True, the order of the columns is reversed :math:`x^0, x^1, ..., x^{{(N-1)}}`. Such a
  10328. matrix with a geometric progression in each row is named for Alexandre-Theophile Vandermonde.
  10329. Arguments:
  10330. x (Tensor): 1-D input tensor.
  10331. N (int, optional): Number of columns in the output. If N is not specified,
  10332. a square array is returned :math:`(N = len(x))`.
  10333. increasing (bool, optional): Order of the powers of the columns. If True,
  10334. the powers increase from left to right, if False (the default) they are reversed.
  10335. Returns:
  10336. Tensor: Vandermonde matrix. If increasing is False, the first column is :math:`x^{{(N-1)}}`,
  10337. the second :math:`x^{{(N-2)}}` and so forth. If increasing is True, the columns
  10338. are :math:`x^0, x^1, ..., x^{{(N-1)}}`.
  10339. Example::
  10340. >>> x = torch.tensor([1, 2, 3, 5])
  10341. >>> torch.vander(x)
  10342. tensor([[ 1, 1, 1, 1],
  10343. [ 8, 4, 2, 1],
  10344. [ 27, 9, 3, 1],
  10345. [125, 25, 5, 1]])
  10346. >>> torch.vander(x, N=3)
  10347. tensor([[ 1, 1, 1],
  10348. [ 4, 2, 1],
  10349. [ 9, 3, 1],
  10350. [25, 5, 1]])
  10351. >>> torch.vander(x, N=3, increasing=True)
  10352. tensor([[ 1, 1, 1],
  10353. [ 1, 2, 4],
  10354. [ 1, 3, 9],
  10355. [ 1, 5, 25]])
  10356. """.format(
  10357. **factory_common_args
  10358. ),
  10359. )
  10360. add_docstr(
  10361. torch.unbind,
  10362. r"""
  10363. unbind(input, dim=0) -> seq
  10364. Removes a tensor dimension.
  10365. Returns a tuple of all slices along a given dimension, already without it.
  10366. Arguments:
  10367. input (Tensor): the tensor to unbind
  10368. dim (int): dimension to remove
  10369. Example::
  10370. >>> torch.unbind(torch.tensor([[1, 2, 3],
  10371. >>> [4, 5, 6],
  10372. >>> [7, 8, 9]]))
  10373. (tensor([1, 2, 3]), tensor([4, 5, 6]), tensor([7, 8, 9]))
  10374. """,
  10375. )
  10376. add_docstr(
  10377. torch.combinations,
  10378. r"""
  10379. combinations(input, r=2, with_replacement=False) -> seq
  10380. Compute combinations of length :math:`r` of the given tensor. The behavior is similar to
  10381. python's `itertools.combinations` when `with_replacement` is set to `False`, and
  10382. `itertools.combinations_with_replacement` when `with_replacement` is set to `True`.
  10383. Arguments:
  10384. input (Tensor): 1D vector.
  10385. r (int, optional): number of elements to combine
  10386. with_replacement (bool, optional): whether to allow duplication in combination
  10387. Returns:
  10388. Tensor: A tensor equivalent to converting all the input tensors into lists, do
  10389. `itertools.combinations` or `itertools.combinations_with_replacement` on these
  10390. lists, and finally convert the resulting list into tensor.
  10391. Example::
  10392. >>> a = [1, 2, 3]
  10393. >>> list(itertools.combinations(a, r=2))
  10394. [(1, 2), (1, 3), (2, 3)]
  10395. >>> list(itertools.combinations(a, r=3))
  10396. [(1, 2, 3)]
  10397. >>> list(itertools.combinations_with_replacement(a, r=2))
  10398. [(1, 1), (1, 2), (1, 3), (2, 2), (2, 3), (3, 3)]
  10399. >>> tensor_a = torch.tensor(a)
  10400. >>> torch.combinations(tensor_a)
  10401. tensor([[1, 2],
  10402. [1, 3],
  10403. [2, 3]])
  10404. >>> torch.combinations(tensor_a, r=3)
  10405. tensor([[1, 2, 3]])
  10406. >>> torch.combinations(tensor_a, with_replacement=True)
  10407. tensor([[1, 1],
  10408. [1, 2],
  10409. [1, 3],
  10410. [2, 2],
  10411. [2, 3],
  10412. [3, 3]])
  10413. """,
  10414. )
  10415. add_docstr(
  10416. torch.trapezoid,
  10417. r"""
  10418. trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor
  10419. Computes the `trapezoidal rule <https://en.wikipedia.org/wiki/Trapezoidal_rule>`_ along
  10420. :attr:`dim`. By default the spacing between elements is assumed to be 1, but
  10421. :attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be
  10422. used to specify arbitrary spacing along :attr:`dim`.
  10423. Assuming :attr:`y` is a one-dimensional tensor with elements :math:`{y_0, y_1, ..., y_n}`,
  10424. the default computation is
  10425. .. math::
  10426. \begin{aligned}
  10427. \sum_{i = 1}^{n-1} \frac{1}{2} (y_i + y_{i-1})
  10428. \end{aligned}
  10429. When :attr:`dx` is specified the computation becomes
  10430. .. math::
  10431. \begin{aligned}
  10432. \sum_{i = 1}^{n-1} \frac{\Delta x}{2} (y_i + y_{i-1})
  10433. \end{aligned}
  10434. effectively multiplying the result by :attr:`dx`. When :attr:`x` is specified,
  10435. assuming :attr:`x` is also a one-dimensional tensor with
  10436. elements :math:`{x_0, x_1, ..., x_n}`, the computation becomes
  10437. .. math::
  10438. \begin{aligned}
  10439. \sum_{i = 1}^{n-1} \frac{(x_i - x_{i-1})}{2} (y_i + y_{i-1})
  10440. \end{aligned}
  10441. When :attr:`x` and :attr:`y` have the same size, the computation is as described above and no broadcasting is needed.
  10442. The broadcasting behavior of this function is as follows when their sizes are different. For both :attr:`x`
  10443. and :attr:`y`, the function computes the difference between consecutive elements along
  10444. dimension :attr:`dim`. This effectively creates two tensors, `x_diff` and `y_diff`, that have
  10445. the same shape as the original tensors except their lengths along the dimension :attr:`dim` is reduced by 1.
  10446. After that, those two tensors are broadcast together to compute final output as part of the trapezoidal rule.
  10447. See the examples below for details.
  10448. .. note::
  10449. The trapezoidal rule is a technique for approximating the definite integral of a function
  10450. by averaging its left and right Riemann sums. The approximation becomes more accurate as
  10451. the resolution of the partition increases.
  10452. Arguments:
  10453. y (Tensor): Values to use when computing the trapezoidal rule.
  10454. x (Tensor): If specified, defines spacing between values as specified above.
  10455. Keyword arguments:
  10456. dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx`
  10457. are specified then this defaults to 1. Effectively multiplies the result by its value.
  10458. dim (int): The dimension along which to compute the trapezoidal rule.
  10459. The last (inner-most) dimension by default.
  10460. Examples::
  10461. >>> # Computes the trapezoidal rule in 1D, spacing is implicitly 1
  10462. >>> y = torch.tensor([1, 5, 10])
  10463. >>> torch.trapezoid(y)
  10464. tensor(10.5)
  10465. >>> # Computes the same trapezoidal rule directly to verify
  10466. >>> (1 + 10 + 10) / 2
  10467. 10.5
  10468. >>> # Computes the trapezoidal rule in 1D with constant spacing of 2
  10469. >>> # NOTE: the result is the same as before, but multiplied by 2
  10470. >>> torch.trapezoid(y, dx=2)
  10471. 21.0
  10472. >>> # Computes the trapezoidal rule in 1D with arbitrary spacing
  10473. >>> x = torch.tensor([1, 3, 6])
  10474. >>> torch.trapezoid(y, x)
  10475. 28.5
  10476. >>> # Computes the same trapezoidal rule directly to verify
  10477. >>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2
  10478. 28.5
  10479. >>> # Computes the trapezoidal rule for each row of a 3x3 matrix
  10480. >>> y = torch.arange(9).reshape(3, 3)
  10481. tensor([[0, 1, 2],
  10482. [3, 4, 5],
  10483. [6, 7, 8]])
  10484. >>> torch.trapezoid(y)
  10485. tensor([ 2., 8., 14.])
  10486. >>> # Computes the trapezoidal rule for each column of the matrix
  10487. >>> torch.trapezoid(y, dim=0)
  10488. tensor([ 6., 8., 10.])
  10489. >>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix
  10490. >>> # with the same arbitrary spacing
  10491. >>> y = torch.ones(3, 3)
  10492. >>> x = torch.tensor([1, 3, 6])
  10493. >>> torch.trapezoid(y, x)
  10494. array([5., 5., 5.])
  10495. >>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix
  10496. >>> # with different arbitrary spacing per row
  10497. >>> y = torch.ones(3, 3)
  10498. >>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]])
  10499. >>> torch.trapezoid(y, x)
  10500. array([2., 4., 6.])
  10501. """,
  10502. )
  10503. add_docstr(
  10504. torch.trapz,
  10505. r"""
  10506. trapz(y, x, *, dim=-1) -> Tensor
  10507. Alias for :func:`torch.trapezoid`.
  10508. """,
  10509. )
  10510. add_docstr(
  10511. torch.cumulative_trapezoid,
  10512. r"""
  10513. cumulative_trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor
  10514. Cumulatively computes the `trapezoidal rule <https://en.wikipedia.org/wiki/Trapezoidal_rule>`_
  10515. along :attr:`dim`. By default the spacing between elements is assumed to be 1, but
  10516. :attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be
  10517. used to specify arbitrary spacing along :attr:`dim`.
  10518. For more details, please read :func:`torch.trapezoid`. The difference between :func:`torch.trapezoid`
  10519. and this function is that, :func:`torch.trapezoid` returns a value for each integration,
  10520. where as this function returns a cumulative value for every spacing within the integration. This
  10521. is analogous to how `.sum` returns a value and `.cumsum` returns a cumulative sum.
  10522. Arguments:
  10523. y (Tensor): Values to use when computing the trapezoidal rule.
  10524. x (Tensor): If specified, defines spacing between values as specified above.
  10525. Keyword arguments:
  10526. dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx`
  10527. are specified then this defaults to 1. Effectively multiplies the result by its value.
  10528. dim (int): The dimension along which to compute the trapezoidal rule.
  10529. The last (inner-most) dimension by default.
  10530. Examples::
  10531. >>> # Cumulatively computes the trapezoidal rule in 1D, spacing is implicitly 1.
  10532. >>> y = torch.tensor([1, 5, 10])
  10533. >>> torch.cumulative_trapezoid(y)
  10534. tensor([3., 10.5])
  10535. >>> # Computes the same trapezoidal rule directly up to each element to verify
  10536. >>> (1 + 5) / 2
  10537. 3.0
  10538. >>> (1 + 10 + 10) / 2
  10539. 10.5
  10540. >>> # Cumulatively computes the trapezoidal rule in 1D with constant spacing of 2
  10541. >>> # NOTE: the result is the same as before, but multiplied by 2
  10542. >>> torch.cumulative_trapezoid(y, dx=2)
  10543. tensor([6., 21.])
  10544. >>> # Cumulatively computes the trapezoidal rule in 1D with arbitrary spacing
  10545. >>> x = torch.tensor([1, 3, 6])
  10546. >>> torch.cumulative_trapezoid(y, x)
  10547. tensor([6., 28.5])
  10548. >>> # Computes the same trapezoidal rule directly up to each element to verify
  10549. >>> ((3 - 1) * (1 + 5)) / 2
  10550. 6.0
  10551. >>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2
  10552. 28.5
  10553. >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 matrix
  10554. >>> y = torch.arange(9).reshape(3, 3)
  10555. tensor([[0, 1, 2],
  10556. [3, 4, 5],
  10557. [6, 7, 8]])
  10558. >>> torch.cumulative_trapezoid(y)
  10559. tensor([[ 0.5, 2.],
  10560. [ 3.5, 8.],
  10561. [ 6.5, 14.]])
  10562. >>> # Cumulatively computes the trapezoidal rule for each column of the matrix
  10563. >>> torch.cumulative_trapezoid(y, dim=0)
  10564. tensor([[ 1.5, 2.5, 3.5],
  10565. [ 6.0, 8.0, 10.0]])
  10566. >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix
  10567. >>> # with the same arbitrary spacing
  10568. >>> y = torch.ones(3, 3)
  10569. >>> x = torch.tensor([1, 3, 6])
  10570. >>> torch.cumulative_trapezoid(y, x)
  10571. tensor([[2., 5.],
  10572. [2., 5.],
  10573. [2., 5.]])
  10574. >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix
  10575. >>> # with different arbitrary spacing per row
  10576. >>> y = torch.ones(3, 3)
  10577. >>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]])
  10578. >>> torch.cumulative_trapezoid(y, x)
  10579. tensor([[1., 2.],
  10580. [2., 4.],
  10581. [3., 6.]])
  10582. """,
  10583. )
  10584. add_docstr(
  10585. torch.repeat_interleave,
  10586. r"""
  10587. repeat_interleave(input, repeats, dim=None, *, output_size=None) -> Tensor
  10588. Repeat elements of a tensor.
  10589. .. warning::
  10590. This is different from :meth:`torch.Tensor.repeat` but similar to ``numpy.repeat``.
  10591. Args:
  10592. {input}
  10593. repeats (Tensor or int): The number of repetitions for each element.
  10594. repeats is broadcasted to fit the shape of the given axis.
  10595. dim (int, optional): The dimension along which to repeat values.
  10596. By default, use the flattened input array, and return a flat output
  10597. array.
  10598. Keyword args:
  10599. output_size (int, optional): Total output size for the given axis
  10600. ( e.g. sum of repeats). If given, it will avoid stream synchronization
  10601. needed to calculate output shape of the tensor.
  10602. Returns:
  10603. Tensor: Repeated tensor which has the same shape as input, except along the given axis.
  10604. Example::
  10605. >>> x = torch.tensor([1, 2, 3])
  10606. >>> x.repeat_interleave(2)
  10607. tensor([1, 1, 2, 2, 3, 3])
  10608. >>> y = torch.tensor([[1, 2], [3, 4]])
  10609. >>> torch.repeat_interleave(y, 2)
  10610. tensor([1, 1, 2, 2, 3, 3, 4, 4])
  10611. >>> torch.repeat_interleave(y, 3, dim=1)
  10612. tensor([[1, 1, 1, 2, 2, 2],
  10613. [3, 3, 3, 4, 4, 4]])
  10614. >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0)
  10615. tensor([[1, 2],
  10616. [3, 4],
  10617. [3, 4]])
  10618. >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0, output_size=3)
  10619. tensor([[1, 2],
  10620. [3, 4],
  10621. [3, 4]])
  10622. If the `repeats` is `tensor([n1, n2, n3, ...])`, then the output will be
  10623. `tensor([0, 0, ..., 1, 1, ..., 2, 2, ..., ...])` where `0` appears `n1` times,
  10624. `1` appears `n2` times, `2` appears `n3` times, etc.
  10625. .. function:: repeat_interleave(repeats, *) -> Tensor
  10626. :noindex:
  10627. Repeats 0 repeats[0] times, 1 repeats[1] times, 2 repeats[2] times, etc.
  10628. Args:
  10629. repeats (Tensor): The number of repetitions for each element.
  10630. Returns:
  10631. Tensor: Repeated tensor of size `sum(repeats)`.
  10632. Example::
  10633. >>> torch.repeat_interleave(torch.tensor([1, 2, 3]))
  10634. tensor([0, 1, 1, 2, 2, 2])
  10635. """.format(
  10636. **common_args
  10637. ),
  10638. )
  10639. add_docstr(
  10640. torch.tile,
  10641. r"""
  10642. tile(input, dims) -> Tensor
  10643. Constructs a tensor by repeating the elements of :attr:`input`.
  10644. The :attr:`dims` argument specifies the number of repetitions
  10645. in each dimension.
  10646. If :attr:`dims` specifies fewer dimensions than :attr:`input` has, then
  10647. ones are prepended to :attr:`dims` until all dimensions are specified.
  10648. For example, if :attr:`input` has shape (8, 6, 4, 2) and :attr:`dims`
  10649. is (2, 2), then :attr:`dims` is treated as (1, 1, 2, 2).
  10650. Analogously, if :attr:`input` has fewer dimensions than :attr:`dims`
  10651. specifies, then :attr:`input` is treated as if it were unsqueezed at
  10652. dimension zero until it has as many dimensions as :attr:`dims` specifies.
  10653. For example, if :attr:`input` has shape (4, 2) and :attr:`dims`
  10654. is (3, 3, 2, 2), then :attr:`input` is treated as if it had the
  10655. shape (1, 1, 4, 2).
  10656. .. note::
  10657. This function is similar to NumPy's tile function.
  10658. Args:
  10659. input (Tensor): the tensor whose elements to repeat.
  10660. dims (tuple): the number of repetitions per dimension.
  10661. Example::
  10662. >>> x = torch.tensor([1, 2, 3])
  10663. >>> x.tile((2,))
  10664. tensor([1, 2, 3, 1, 2, 3])
  10665. >>> y = torch.tensor([[1, 2], [3, 4]])
  10666. >>> torch.tile(y, (2, 2))
  10667. tensor([[1, 2, 1, 2],
  10668. [3, 4, 3, 4],
  10669. [1, 2, 1, 2],
  10670. [3, 4, 3, 4]])
  10671. """,
  10672. )
  10673. add_docstr(
  10674. torch.quantize_per_tensor,
  10675. r"""
  10676. quantize_per_tensor(input, scale, zero_point, dtype) -> Tensor
  10677. Converts a float tensor to a quantized tensor with given scale and zero point.
  10678. Arguments:
  10679. input (Tensor): float tensor or list of tensors to quantize
  10680. scale (float or Tensor): scale to apply in quantization formula
  10681. zero_point (int or Tensor): offset in integer value that maps to float zero
  10682. dtype (:class:`torch.dtype`): the desired data type of returned tensor.
  10683. Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32``
  10684. Returns:
  10685. Tensor: A newly quantized tensor or list of quantized tensors.
  10686. Example::
  10687. >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8)
  10688. tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
  10689. quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10)
  10690. >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8).int_repr()
  10691. tensor([ 0, 10, 20, 30], dtype=torch.uint8)
  10692. >>> torch.quantize_per_tensor([torch.tensor([-1.0, 0.0]), torch.tensor([-2.0, 2.0])],
  10693. >>> torch.tensor([0.1, 0.2]), torch.tensor([10, 20]), torch.quint8)
  10694. (tensor([-1., 0.], size=(2,), dtype=torch.quint8,
  10695. quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10),
  10696. tensor([-2., 2.], size=(2,), dtype=torch.quint8,
  10697. quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=20))
  10698. >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.tensor(0.1), torch.tensor(10), torch.quint8)
  10699. tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
  10700. quantization_scheme=torch.per_tensor_affine, scale=0.10, zero_point=10)
  10701. """,
  10702. )
  10703. add_docstr(
  10704. torch.quantize_per_tensor_dynamic,
  10705. r"""
  10706. quantize_per_tensor_dynamic(input, dtype, reduce_range) -> Tensor
  10707. Converts a float tensor to a quantized tensor with scale and zero_point calculated
  10708. dynamically based on the input.
  10709. Arguments:
  10710. input (Tensor): float tensor or list of tensors to quantize
  10711. dtype (:class:`torch.dtype`): the desired data type of returned tensor.
  10712. Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``
  10713. reduce_range (bool): a flag to indicate whether to reduce the range of quantized
  10714. data by 1 bit, it's required to avoid instruction overflow for some hardwares
  10715. Returns:
  10716. Tensor: A newly (dynamically) quantized tensor
  10717. Example::
  10718. >>> t = torch.quantize_per_tensor_dynamic(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.quint8, False)
  10719. >>> print(t)
  10720. tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
  10721. quantization_scheme=torch.per_tensor_affine, scale=0.011764705882352941,
  10722. zero_point=85)
  10723. >>> t.int_repr()
  10724. tensor([ 0, 85, 170, 255], dtype=torch.uint8)
  10725. """,
  10726. )
  10727. add_docstr(
  10728. torch.quantize_per_channel,
  10729. r"""
  10730. quantize_per_channel(input, scales, zero_points, axis, dtype) -> Tensor
  10731. Converts a float tensor to a per-channel quantized tensor with given scales and zero points.
  10732. Arguments:
  10733. input (Tensor): float tensor to quantize
  10734. scales (Tensor): float 1D tensor of scales to use, size should match ``input.size(axis)``
  10735. zero_points (int): integer 1D tensor of offset to use, size should match ``input.size(axis)``
  10736. axis (int): dimension on which apply per-channel quantization
  10737. dtype (:class:`torch.dtype`): the desired data type of returned tensor.
  10738. Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32``
  10739. Returns:
  10740. Tensor: A newly quantized tensor
  10741. Example::
  10742. >>> x = torch.tensor([[-1.0, 0.0], [1.0, 2.0]])
  10743. >>> torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8)
  10744. tensor([[-1., 0.],
  10745. [ 1., 2.]], size=(2, 2), dtype=torch.quint8,
  10746. quantization_scheme=torch.per_channel_affine,
  10747. scale=tensor([0.1000, 0.0100], dtype=torch.float64),
  10748. zero_point=tensor([10, 0]), axis=0)
  10749. >>> torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8).int_repr()
  10750. tensor([[ 0, 10],
  10751. [100, 200]], dtype=torch.uint8)
  10752. """,
  10753. )
  10754. add_docstr(
  10755. torch.quantized_batch_norm,
  10756. r"""
  10757. quantized_batch_norm(input, weight=None, bias=None, mean, var, eps, output_scale, output_zero_point) -> Tensor
  10758. Applies batch normalization on a 4D (NCHW) quantized tensor.
  10759. .. math::
  10760. y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
  10761. Arguments:
  10762. input (Tensor): quantized tensor
  10763. weight (Tensor): float tensor that corresponds to the gamma, size C
  10764. bias (Tensor): float tensor that corresponds to the beta, size C
  10765. mean (Tensor): float mean value in batch normalization, size C
  10766. var (Tensor): float tensor for variance, size C
  10767. eps (float): a value added to the denominator for numerical stability.
  10768. output_scale (float): output quantized tensor scale
  10769. output_zero_point (int): output quantized tensor zero_point
  10770. Returns:
  10771. Tensor: A quantized tensor with batch normalization applied.
  10772. Example::
  10773. >>> qx = torch.quantize_per_tensor(torch.rand(2, 2, 2, 2), 1.5, 3, torch.quint8)
  10774. >>> torch.quantized_batch_norm(qx, torch.ones(2), torch.zeros(2), torch.rand(2), torch.rand(2), 0.00001, 0.2, 2)
  10775. tensor([[[[-0.2000, -0.2000],
  10776. [ 1.6000, -0.2000]],
  10777. [[-0.4000, -0.4000],
  10778. [-0.4000, 0.6000]]],
  10779. [[[-0.2000, -0.2000],
  10780. [-0.2000, -0.2000]],
  10781. [[ 0.6000, -0.4000],
  10782. [ 0.6000, -0.4000]]]], size=(2, 2, 2, 2), dtype=torch.quint8,
  10783. quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=2)
  10784. """,
  10785. )
  10786. add_docstr(
  10787. torch.quantized_max_pool1d,
  10788. r"""
  10789. quantized_max_pool1d(input, kernel_size, stride=[], padding=0, dilation=1, ceil_mode=False) -> Tensor
  10790. Applies a 1D max pooling over an input quantized tensor composed of several input planes.
  10791. Arguments:
  10792. input (Tensor): quantized tensor
  10793. kernel_size (list of int): the size of the sliding window
  10794. stride (``list of int``, optional): the stride of the sliding window
  10795. padding (``list of int``, optional): padding to be added on both sides, must be >= 0 and <= kernel_size / 2
  10796. dilation (``list of int``, optional): The stride between elements within a sliding window, must be > 0. Default 1
  10797. ceil_mode (bool, optional): If True, will use ceil instead of floor to compute the output shape.
  10798. Defaults to False.
  10799. Returns:
  10800. Tensor: A quantized tensor with max_pool1d applied.
  10801. Example::
  10802. >>> qx = torch.quantize_per_tensor(torch.rand(2, 2), 1.5, 3, torch.quint8)
  10803. >>> torch.quantized_max_pool1d(qx, [2])
  10804. tensor([[0.0000],
  10805. [1.5000]], size=(2, 1), dtype=torch.quint8,
  10806. quantization_scheme=torch.per_tensor_affine, scale=1.5, zero_point=3)
  10807. """,
  10808. )
  10809. add_docstr(
  10810. torch.quantized_max_pool2d,
  10811. r"""
  10812. quantized_max_pool2d(input, kernel_size, stride=[], padding=0, dilation=1, ceil_mode=False) -> Tensor
  10813. Applies a 2D max pooling over an input quantized tensor composed of several input planes.
  10814. Arguments:
  10815. input (Tensor): quantized tensor
  10816. kernel_size (``list of int``): the size of the sliding window
  10817. stride (``list of int``, optional): the stride of the sliding window
  10818. padding (``list of int``, optional): padding to be added on both sides, must be >= 0 and <= kernel_size / 2
  10819. dilation (``list of int``, optional): The stride between elements within a sliding window, must be > 0. Default 1
  10820. ceil_mode (bool, optional): If True, will use ceil instead of floor to compute the output shape.
  10821. Defaults to False.
  10822. Returns:
  10823. Tensor: A quantized tensor with max_pool2d applied.
  10824. Example::
  10825. >>> qx = torch.quantize_per_tensor(torch.rand(2, 2, 2, 2), 1.5, 3, torch.quint8)
  10826. >>> torch.quantized_max_pool2d(qx, [2,2])
  10827. tensor([[[[1.5000]],
  10828. [[1.5000]]],
  10829. [[[0.0000]],
  10830. [[0.0000]]]], size=(2, 2, 1, 1), dtype=torch.quint8,
  10831. quantization_scheme=torch.per_tensor_affine, scale=1.5, zero_point=3)
  10832. """,
  10833. )
  10834. add_docstr(
  10835. torch.Generator,
  10836. r"""
  10837. Generator(device='cpu') -> Generator
  10838. Creates and returns a generator object that manages the state of the algorithm which
  10839. produces pseudo random numbers. Used as a keyword argument in many :ref:`inplace-random-sampling`
  10840. functions.
  10841. Arguments:
  10842. device (:class:`torch.device`, optional): the desired device for the generator.
  10843. Returns:
  10844. Generator: An torch.Generator object.
  10845. Example::
  10846. >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
  10847. >>> g_cpu = torch.Generator()
  10848. >>> g_cuda = torch.Generator(device='cuda')
  10849. """,
  10850. )
  10851. add_docstr(
  10852. torch.Generator.set_state,
  10853. r"""
  10854. Generator.set_state(new_state) -> void
  10855. Sets the Generator state.
  10856. Arguments:
  10857. new_state (torch.ByteTensor): The desired state.
  10858. Example::
  10859. >>> g_cpu = torch.Generator()
  10860. >>> g_cpu_other = torch.Generator()
  10861. >>> g_cpu.set_state(g_cpu_other.get_state())
  10862. """,
  10863. )
  10864. add_docstr(
  10865. torch.Generator.get_state,
  10866. r"""
  10867. Generator.get_state() -> Tensor
  10868. Returns the Generator state as a ``torch.ByteTensor``.
  10869. Returns:
  10870. Tensor: A ``torch.ByteTensor`` which contains all the necessary bits
  10871. to restore a Generator to a specific point in time.
  10872. Example::
  10873. >>> g_cpu = torch.Generator()
  10874. >>> g_cpu.get_state()
  10875. """,
  10876. )
  10877. add_docstr(
  10878. torch.Generator.graphsafe_set_state,
  10879. r"""
  10880. Generator.graphsafe_set_state(state) -> None
  10881. Sets the state of the generator to the specified state in a manner that is safe for use in graph capture.
  10882. This method is crucial for ensuring that the generator's state can be captured in the CUDA graph.
  10883. Arguments:
  10884. state (torch.Generator): A Generator point to the new state for the generator, typically obtained from `graphsafe_get_state`.
  10885. Example:
  10886. >>> g_cuda = torch.Generator(device='cuda')
  10887. >>> g_cuda_other = torch.Generator(device='cuda')
  10888. >>> current_state = g_cuda_other.graphsafe_get_state()
  10889. >>> g_cuda.graphsafe_set_state(current_state)
  10890. """,
  10891. )
  10892. add_docstr(
  10893. torch.Generator.graphsafe_get_state,
  10894. r"""
  10895. Generator.graphsafe_get_state() -> torch.Generator
  10896. Retrieves the current state of the generator in a manner that is safe for graph capture.
  10897. This method is crucial for ensuring that the generator's state can be captured in the CUDA graph.
  10898. Returns:
  10899. torch.Generator: A Generator point to the current state of the generator
  10900. Example:
  10901. >>> g_cuda = torch.Generator(device='cuda')
  10902. >>> current_state = g_cuda.graphsafe_get_state()
  10903. """,
  10904. )
  10905. add_docstr(
  10906. torch.Generator.clone_state,
  10907. r"""
  10908. Generator.clone_state() -> torch.Generator
  10909. Clones the current state of the generator and returns a new generator pointing to this cloned state.
  10910. This method is beneficial for preserving a particular state of a generator to restore at a later point.
  10911. Returns:
  10912. torch.Generator: A Generator pointing to the newly cloned state.
  10913. Example:
  10914. >>> g_cuda = torch.Generator(device='cuda')
  10915. >>> cloned_state = g_cuda.clone_state()
  10916. """,
  10917. )
  10918. add_docstr(
  10919. torch.Generator.manual_seed,
  10920. r"""
  10921. Generator.manual_seed(seed) -> Generator
  10922. Sets the seed for generating random numbers. Returns a `torch.Generator` object. Any 32-bit integer is a valid seed.
  10923. Arguments:
  10924. seed (int): The desired seed. Value must be within the inclusive range
  10925. `[-0x8000_0000_0000_0000, 0xffff_ffff_ffff_ffff]`. Otherwise, a RuntimeError
  10926. is raised. Negative inputs are remapped to positive values with the formula
  10927. `0xffff_ffff_ffff_ffff + seed`.
  10928. Returns:
  10929. Generator: An torch.Generator object.
  10930. Example::
  10931. >>> g_cpu = torch.Generator()
  10932. >>> g_cpu.manual_seed(2147483647)
  10933. """,
  10934. )
  10935. add_docstr(
  10936. torch.Generator.initial_seed,
  10937. r"""
  10938. Generator.initial_seed() -> int
  10939. Returns the initial seed for generating random numbers.
  10940. Example::
  10941. >>> g_cpu = torch.Generator()
  10942. >>> g_cpu.initial_seed()
  10943. 2147483647
  10944. """,
  10945. )
  10946. add_docstr(
  10947. torch.Generator.seed,
  10948. r"""
  10949. Generator.seed() -> int
  10950. Gets a non-deterministic random number from std::random_device or the current
  10951. time and uses it to seed a Generator.
  10952. Example::
  10953. >>> g_cpu = torch.Generator()
  10954. >>> g_cpu.seed()
  10955. 1516516984916
  10956. """,
  10957. )
  10958. add_docstr(
  10959. torch.Generator.device,
  10960. r"""
  10961. Generator.device -> device
  10962. Gets the current device of the generator.
  10963. Example::
  10964. >>> g_cpu = torch.Generator()
  10965. >>> g_cpu.device
  10966. device(type='cpu')
  10967. """,
  10968. )
  10969. add_docstr(
  10970. torch._assert_async,
  10971. r"""
  10972. _assert_async(tensor) -> void
  10973. Asynchronously assert that the contents of tensor are nonzero. For CPU tensors,
  10974. this is equivalent to ``assert tensor`` or ``assert tensor.is_nonzero()``; for
  10975. CUDA tensors, we DO NOT synchronize and you may only find out the assertion
  10976. failed at a later CUDA kernel launch. Asynchronous assertion can be helpful for
  10977. testing invariants in CUDA tensors without giving up performance. This function
  10978. is NOT intended to be used for regular error checking, as it will trash your CUDA
  10979. context if the assert fails (forcing you to restart your PyTorch process.)
  10980. Args:
  10981. tensor (Tensor): a one element tensor to test to see if it is nonzero. Zero
  10982. elements (including False for boolean tensors) cause an assertion failure
  10983. to be raised.
  10984. """,
  10985. )
  10986. add_docstr(
  10987. torch.searchsorted,
  10988. r"""
  10989. searchsorted(sorted_sequence, values, *, out_int32=False, right=False, side=None, out=None, sorter=None) -> Tensor
  10990. Find the indices from the *innermost* dimension of :attr:`sorted_sequence` such that, if the
  10991. corresponding values in :attr:`values` were inserted before the indices, when sorted, the order
  10992. of the corresponding *innermost* dimension within :attr:`sorted_sequence` would be preserved.
  10993. Return a new tensor with the same size as :attr:`values`. More formally,
  10994. the returned index satisfies the following rules:
  10995. .. list-table::
  10996. :widths: 12 10 78
  10997. :header-rows: 1
  10998. * - :attr:`sorted_sequence`
  10999. - :attr:`right`
  11000. - *returned index satisfies*
  11001. * - 1-D
  11002. - False
  11003. - ``sorted_sequence[i-1] < values[m][n]...[l][x] <= sorted_sequence[i]``
  11004. * - 1-D
  11005. - True
  11006. - ``sorted_sequence[i-1] <= values[m][n]...[l][x] < sorted_sequence[i]``
  11007. * - N-D
  11008. - False
  11009. - ``sorted_sequence[m][n]...[l][i-1] < values[m][n]...[l][x] <= sorted_sequence[m][n]...[l][i]``
  11010. * - N-D
  11011. - True
  11012. - ``sorted_sequence[m][n]...[l][i-1] <= values[m][n]...[l][x] < sorted_sequence[m][n]...[l][i]``
  11013. Args:
  11014. sorted_sequence (Tensor): N-D or 1-D tensor, containing monotonically increasing sequence on the *innermost*
  11015. dimension unless :attr:`sorter` is provided, in which case the sequence does not
  11016. need to be sorted
  11017. values (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s).
  11018. Keyword args:
  11019. out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise.
  11020. Default value is False, i.e. default output data type is torch.int64.
  11021. right (bool, optional): if False, return the first suitable location that is found. If True, return the
  11022. last such index. If no suitable index found, return 0 for non-numerical value
  11023. (eg. nan, inf) or the size of *innermost* dimension within :attr:`sorted_sequence`
  11024. (one pass the last index of the *innermost* dimension). In other words, if False,
  11025. gets the lower bound index for each value in :attr:`values` on the corresponding
  11026. *innermost* dimension of the :attr:`sorted_sequence`. If True, gets the upper
  11027. bound index instead. Default value is False. :attr:`side` does the same and is
  11028. preferred. It will error if :attr:`side` is set to "left" while this is True.
  11029. side (str, optional): the same as :attr:`right` but preferred. "left" corresponds to False for :attr:`right`
  11030. and "right" corresponds to True for :attr:`right`. It will error if this is set to
  11031. "left" while :attr:`right` is True. Default value is None.
  11032. out (Tensor, optional): the output tensor, must be the same size as :attr:`values` if provided.
  11033. sorter (LongTensor, optional): if provided, a tensor matching the shape of the unsorted
  11034. :attr:`sorted_sequence` containing a sequence of indices that sort it in the
  11035. ascending order on the innermost dimension
  11036. Example::
  11037. >>> sorted_sequence = torch.tensor([[1, 3, 5, 7, 9], [2, 4, 6, 8, 10]])
  11038. >>> sorted_sequence
  11039. tensor([[ 1, 3, 5, 7, 9],
  11040. [ 2, 4, 6, 8, 10]])
  11041. >>> values = torch.tensor([[3, 6, 9], [3, 6, 9]])
  11042. >>> values
  11043. tensor([[3, 6, 9],
  11044. [3, 6, 9]])
  11045. >>> torch.searchsorted(sorted_sequence, values)
  11046. tensor([[1, 3, 4],
  11047. [1, 2, 4]])
  11048. >>> torch.searchsorted(sorted_sequence, values, side='right')
  11049. tensor([[2, 3, 5],
  11050. [1, 3, 4]])
  11051. >>> sorted_sequence_1d = torch.tensor([1, 3, 5, 7, 9])
  11052. >>> sorted_sequence_1d
  11053. tensor([1, 3, 5, 7, 9])
  11054. >>> torch.searchsorted(sorted_sequence_1d, values)
  11055. tensor([[1, 3, 4],
  11056. [1, 3, 4]])
  11057. """,
  11058. )
  11059. add_docstr(
  11060. torch.bucketize,
  11061. r"""
  11062. bucketize(input, boundaries, *, out_int32=False, right=False, out=None) -> Tensor
  11063. Returns the indices of the buckets to which each value in the :attr:`input` belongs, where the
  11064. boundaries of the buckets are set by :attr:`boundaries`. Return a new tensor with the same size
  11065. as :attr:`input`. If :attr:`right` is False (default), then the left boundary is open. Note that
  11066. this behavior is opposite the behavior of
  11067. `numpy.digitize <https://docs.scipy.org/doc/numpy/reference/generated/numpy.digitize.html>`_.
  11068. More formally, the returned index satisfies the following rules:
  11069. .. list-table::
  11070. :widths: 15 85
  11071. :header-rows: 1
  11072. * - :attr:`right`
  11073. - *returned index satisfies*
  11074. * - False
  11075. - ``boundaries[i-1] < input[m][n]...[l][x] <= boundaries[i]``
  11076. * - True
  11077. - ``boundaries[i-1] <= input[m][n]...[l][x] < boundaries[i]``
  11078. Args:
  11079. input (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s).
  11080. boundaries (Tensor): 1-D tensor, must contain a strictly increasing sequence, or the return value is undefined.
  11081. Keyword args:
  11082. out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise.
  11083. Default value is False, i.e. default output data type is torch.int64.
  11084. right (bool, optional): if False, return the first suitable location that is found. If True, return the
  11085. last such index. If no suitable index found, return 0 for non-numerical value
  11086. (eg. nan, inf) or the size of :attr:`boundaries` (one pass the last index).
  11087. In other words, if False, gets the lower bound index for each value in :attr:`input`
  11088. from :attr:`boundaries`. If True, gets the upper bound index instead.
  11089. Default value is False.
  11090. out (Tensor, optional): the output tensor, must be the same size as :attr:`input` if provided.
  11091. Example::
  11092. >>> boundaries = torch.tensor([1, 3, 5, 7, 9])
  11093. >>> boundaries
  11094. tensor([1, 3, 5, 7, 9])
  11095. >>> v = torch.tensor([[3, 6, 9], [3, 6, 9]])
  11096. >>> v
  11097. tensor([[3, 6, 9],
  11098. [3, 6, 9]])
  11099. >>> torch.bucketize(v, boundaries)
  11100. tensor([[1, 3, 4],
  11101. [1, 3, 4]])
  11102. >>> torch.bucketize(v, boundaries, right=True)
  11103. tensor([[2, 3, 5],
  11104. [2, 3, 5]])
  11105. """,
  11106. )
  11107. add_docstr(
  11108. torch.view_as_real_copy,
  11109. r"""
  11110. Performs the same operation as :func:`torch.view_as_real`, but all output tensors
  11111. are freshly created instead of aliasing the input.
  11112. """,
  11113. )
  11114. add_docstr(
  11115. torch.view_as_complex_copy,
  11116. r"""
  11117. Performs the same operation as :func:`torch.view_as_complex`, but all output tensors
  11118. are freshly created instead of aliasing the input.
  11119. """,
  11120. )
  11121. add_docstr(
  11122. torch.as_strided_copy,
  11123. r"""
  11124. Performs the same operation as :func:`torch.as_strided`, but all output tensors
  11125. are freshly created instead of aliasing the input.
  11126. """,
  11127. )
  11128. add_docstr(
  11129. torch.diagonal_copy,
  11130. r"""
  11131. Performs the same operation as :func:`torch.diagonal`, but all output tensors
  11132. are freshly created instead of aliasing the input.
  11133. """,
  11134. )
  11135. add_docstr(
  11136. torch.expand_copy,
  11137. r"""
  11138. Performs the same operation as :func:`torch.expand`, but all output tensors
  11139. are freshly created instead of aliasing the input.
  11140. """,
  11141. )
  11142. add_docstr(
  11143. torch.permute_copy,
  11144. r"""
  11145. Performs the same operation as :func:`torch.permute`, but all output tensors
  11146. are freshly created instead of aliasing the input.
  11147. """,
  11148. )
  11149. add_docstr(
  11150. torch.select_copy,
  11151. r"""
  11152. Performs the same operation as :func:`torch.select`, but all output tensors
  11153. are freshly created instead of aliasing the input.
  11154. """,
  11155. )
  11156. add_docstr(
  11157. torch.detach_copy,
  11158. r"""
  11159. Performs the same operation as :func:`torch.detach`, but all output tensors
  11160. are freshly created instead of aliasing the input.
  11161. """,
  11162. )
  11163. add_docstr(
  11164. torch.slice_copy,
  11165. r"""
  11166. Performs the same operation as :func:`torch.slice`, but all output tensors
  11167. are freshly created instead of aliasing the input.
  11168. """,
  11169. )
  11170. add_docstr(
  11171. torch.split_copy,
  11172. r"""
  11173. Performs the same operation as :func:`torch.split`, but all output tensors
  11174. are freshly created instead of aliasing the input.
  11175. """,
  11176. )
  11177. add_docstr(
  11178. torch.split_with_sizes_copy,
  11179. r"""
  11180. Performs the same operation as :func:`torch.split_with_sizes`, but all output tensors
  11181. are freshly created instead of aliasing the input.
  11182. """,
  11183. )
  11184. add_docstr(
  11185. torch.squeeze_copy,
  11186. r"""
  11187. Performs the same operation as :func:`torch.squeeze`, but all output tensors
  11188. are freshly created instead of aliasing the input.
  11189. """,
  11190. )
  11191. add_docstr(
  11192. torch.t_copy,
  11193. r"""
  11194. Performs the same operation as :func:`torch.t`, but all output tensors
  11195. are freshly created instead of aliasing the input.
  11196. """,
  11197. )
  11198. add_docstr(
  11199. torch.transpose_copy,
  11200. r"""
  11201. Performs the same operation as :func:`torch.transpose`, but all output tensors
  11202. are freshly created instead of aliasing the input.
  11203. """,
  11204. )
  11205. add_docstr(
  11206. torch.unsqueeze_copy,
  11207. r"""
  11208. Performs the same operation as :func:`torch.unsqueeze`, but all output tensors
  11209. are freshly created instead of aliasing the input.
  11210. """,
  11211. )
  11212. add_docstr(
  11213. torch.indices_copy,
  11214. r"""
  11215. Performs the same operation as :func:`torch.indices`, but all output tensors
  11216. are freshly created instead of aliasing the input.
  11217. """,
  11218. )
  11219. add_docstr(
  11220. torch.values_copy,
  11221. r"""
  11222. Performs the same operation as :func:`torch.values`, but all output tensors
  11223. are freshly created instead of aliasing the input.
  11224. """,
  11225. )
  11226. add_docstr(
  11227. torch.crow_indices_copy,
  11228. r"""
  11229. Performs the same operation as :func:`torch.crow_indices`, but all output tensors
  11230. are freshly created instead of aliasing the input.
  11231. """,
  11232. )
  11233. add_docstr(
  11234. torch.col_indices_copy,
  11235. r"""
  11236. Performs the same operation as :func:`torch.col_indices`, but all output tensors
  11237. are freshly created instead of aliasing the input.
  11238. """,
  11239. )
  11240. add_docstr(
  11241. torch.unbind_copy,
  11242. r"""
  11243. Performs the same operation as :func:`torch.unbind`, but all output tensors
  11244. are freshly created instead of aliasing the input.
  11245. """,
  11246. )
  11247. add_docstr(
  11248. torch.view_copy,
  11249. r"""
  11250. Performs the same operation as :func:`torch.view`, but all output tensors
  11251. are freshly created instead of aliasing the input.
  11252. """,
  11253. )
  11254. add_docstr(
  11255. torch.unfold_copy,
  11256. r"""
  11257. Performs the same operation as :func:`torch.unfold`, but all output tensors
  11258. are freshly created instead of aliasing the input.
  11259. """,
  11260. )
  11261. add_docstr(
  11262. torch.alias_copy,
  11263. r"""
  11264. Performs the same operation as :func:`torch.alias`, but all output tensors
  11265. are freshly created instead of aliasing the input.
  11266. """,
  11267. )
  11268. for unary_base_func_name in (
  11269. "exp",
  11270. "sqrt",
  11271. "abs",
  11272. "acos",
  11273. "asin",
  11274. "atan",
  11275. "ceil",
  11276. "cos",
  11277. "cosh",
  11278. "erf",
  11279. "erfc",
  11280. "expm1",
  11281. "floor",
  11282. "log",
  11283. "log10",
  11284. "log1p",
  11285. "log2",
  11286. "neg",
  11287. "tan",
  11288. "tanh",
  11289. "sin",
  11290. "sinh",
  11291. "round",
  11292. "lgamma",
  11293. "frac",
  11294. "reciprocal",
  11295. "sigmoid",
  11296. "trunc",
  11297. "zero",
  11298. ):
  11299. unary_foreach_func_name = f"_foreach_{unary_base_func_name}"
  11300. if hasattr(torch, unary_foreach_func_name):
  11301. add_docstr(
  11302. getattr(torch, unary_foreach_func_name),
  11303. rf"""
  11304. {unary_foreach_func_name}(self: List[Tensor]) -> List[Tensor]
  11305. Apply :func:`torch.{unary_base_func_name}` to each Tensor of the input list.
  11306. """,
  11307. )
  11308. unary_inplace_foreach_func_name = f"{unary_foreach_func_name}_"
  11309. if hasattr(torch, unary_inplace_foreach_func_name):
  11310. add_docstr(
  11311. getattr(torch, unary_inplace_foreach_func_name),
  11312. rf"""
  11313. {unary_inplace_foreach_func_name}(self: List[Tensor]) -> None
  11314. Apply :func:`torch.{unary_base_func_name}` to each Tensor of the input list.
  11315. """,
  11316. )