| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200102011020210203102041020510206102071020810209102101021110212102131021410215102161021710218102191022010221102221022310224102251022610227102281022910230102311023210233102341023510236102371023810239102401024110242102431024410245102461024710248102491025010251102521025310254102551025610257102581025910260102611026210263102641026510266102671026810269102701027110272102731027410275102761027710278102791028010281102821028310284102851028610287102881028910290102911029210293102941029510296102971029810299103001030110302103031030410305103061030710308103091031010311103121031310314103151031610317103181031910320103211032210323103241032510326103271032810329103301033110332103331033410335103361033710338103391034010341103421034310344103451034610347103481034910350103511035210353103541035510356103571035810359103601036110362103631036410365103661036710368103691037010371103721037310374103751037610377103781037910380103811038210383103841038510386103871038810389103901039110392103931039410395103961039710398103991040010401104021040310404104051040610407104081040910410104111041210413104141041510416104171041810419104201042110422104231042410425104261042710428104291043010431104321043310434104351043610437104381043910440104411044210443104441044510446104471044810449104501045110452104531045410455104561045710458104591046010461104621046310464104651046610467104681046910470104711047210473104741047510476104771047810479104801048110482104831048410485104861048710488104891049010491104921049310494104951049610497104981049910500105011050210503105041050510506105071050810509105101051110512105131051410515105161051710518105191052010521105221052310524105251052610527105281052910530105311053210533105341053510536105371053810539105401054110542105431054410545105461054710548105491055010551105521055310554105551055610557105581055910560105611056210563105641056510566105671056810569105701057110572105731057410575105761057710578105791058010581105821058310584105851058610587105881058910590105911059210593105941059510596105971059810599106001060110602106031060410605106061060710608106091061010611106121061310614106151061610617106181061910620106211062210623106241062510626106271062810629106301063110632106331063410635106361063710638106391064010641106421064310644106451064610647106481064910650106511065210653106541065510656106571065810659106601066110662106631066410665106661066710668106691067010671106721067310674106751067610677106781067910680106811068210683106841068510686106871068810689106901069110692106931069410695106961069710698106991070010701107021070310704107051070610707107081070910710107111071210713107141071510716107171071810719107201072110722107231072410725107261072710728107291073010731107321073310734107351073610737107381073910740107411074210743107441074510746107471074810749107501075110752107531075410755107561075710758107591076010761107621076310764107651076610767107681076910770107711077210773107741077510776107771077810779107801078110782107831078410785107861078710788107891079010791107921079310794107951079610797107981079910800108011080210803108041080510806108071080810809108101081110812108131081410815108161081710818108191082010821108221082310824108251082610827108281082910830108311083210833108341083510836108371083810839108401084110842108431084410845108461084710848108491085010851108521085310854108551085610857108581085910860108611086210863108641086510866108671086810869108701087110872108731087410875108761087710878108791088010881108821088310884108851088610887108881088910890108911089210893108941089510896108971089810899109001090110902109031090410905109061090710908109091091010911109121091310914109151091610917109181091910920109211092210923109241092510926109271092810929109301093110932109331093410935109361093710938109391094010941109421094310944109451094610947109481094910950109511095210953109541095510956109571095810959109601096110962109631096410965109661096710968109691097010971109721097310974109751097610977109781097910980109811098210983109841098510986109871098810989109901099110992109931099410995109961099710998109991100011001110021100311004110051100611007110081100911010110111101211013110141101511016110171101811019110201102111022110231102411025110261102711028110291103011031110321103311034110351103611037110381103911040110411104211043110441104511046110471104811049110501105111052110531105411055110561105711058110591106011061110621106311064110651106611067110681106911070110711107211073110741107511076110771107811079110801108111082110831108411085110861108711088110891109011091110921109311094110951109611097110981109911100111011110211103111041110511106111071110811109111101111111112111131111411115111161111711118111191112011121111221112311124111251112611127111281112911130111311113211133111341113511136111371113811139111401114111142111431114411145111461114711148111491115011151111521115311154111551115611157111581115911160111611116211163111641116511166111671116811169111701117111172111731117411175111761117711178111791118011181111821118311184111851118611187111881118911190111911119211193111941119511196111971119811199112001120111202112031120411205112061120711208112091121011211112121121311214112151121611217112181121911220112211122211223112241122511226112271122811229112301123111232112331123411235112361123711238112391124011241112421124311244112451124611247112481124911250112511125211253112541125511256112571125811259112601126111262112631126411265112661126711268112691127011271112721127311274112751127611277112781127911280112811128211283112841128511286112871128811289112901129111292112931129411295112961129711298112991130011301113021130311304113051130611307113081130911310113111131211313113141131511316113171131811319113201132111322113231132411325113261132711328113291133011331113321133311334113351133611337113381133911340113411134211343113441134511346113471134811349113501135111352113531135411355113561135711358113591136011361113621136311364113651136611367113681136911370113711137211373113741137511376113771137811379113801138111382113831138411385113861138711388113891139011391113921139311394113951139611397113981139911400114011140211403114041140511406114071140811409114101141111412114131141411415114161141711418114191142011421114221142311424114251142611427114281142911430114311143211433114341143511436114371143811439114401144111442114431144411445114461144711448114491145011451114521145311454114551145611457114581145911460114611146211463114641146511466114671146811469114701147111472114731147411475114761147711478114791148011481114821148311484114851148611487114881148911490114911149211493114941149511496114971149811499115001150111502115031150411505115061150711508115091151011511115121151311514115151151611517115181151911520115211152211523115241152511526115271152811529115301153111532115331153411535115361153711538115391154011541115421154311544115451154611547115481154911550115511155211553115541155511556115571155811559115601156111562115631156411565115661156711568115691157011571115721157311574115751157611577115781157911580115811158211583115841158511586115871158811589115901159111592115931159411595115961159711598115991160011601116021160311604116051160611607116081160911610116111161211613116141161511616116171161811619116201162111622116231162411625116261162711628116291163011631116321163311634116351163611637116381163911640116411164211643116441164511646116471164811649116501165111652116531165411655116561165711658116591166011661116621166311664116651166611667116681166911670116711167211673116741167511676116771167811679116801168111682116831168411685116861168711688116891169011691116921169311694116951169611697116981169911700117011170211703117041170511706117071170811709117101171111712117131171411715117161171711718117191172011721117221172311724117251172611727117281172911730117311173211733117341173511736117371173811739117401174111742117431174411745117461174711748117491175011751117521175311754117551175611757117581175911760117611176211763117641176511766117671176811769117701177111772117731177411775117761177711778117791178011781117821178311784117851178611787117881178911790117911179211793117941179511796117971179811799118001180111802118031180411805118061180711808118091181011811118121181311814118151181611817118181181911820118211182211823118241182511826118271182811829118301183111832118331183411835118361183711838118391184011841118421184311844118451184611847118481184911850118511185211853118541185511856118571185811859118601186111862118631186411865118661186711868118691187011871118721187311874118751187611877118781187911880118811188211883118841188511886118871188811889118901189111892118931189411895118961189711898118991190011901119021190311904119051190611907119081190911910119111191211913119141191511916119171191811919119201192111922119231192411925119261192711928119291193011931119321193311934119351193611937119381193911940119411194211943119441194511946119471194811949119501195111952119531195411955119561195711958119591196011961119621196311964119651196611967119681196911970119711197211973119741197511976119771197811979119801198111982119831198411985119861198711988119891199011991119921199311994119951199611997119981199912000120011200212003120041200512006120071200812009120101201112012120131201412015120161201712018120191202012021120221202312024120251202612027120281202912030120311203212033120341203512036120371203812039120401204112042120431204412045120461204712048120491205012051120521205312054120551205612057120581205912060120611206212063120641206512066120671206812069120701207112072120731207412075120761207712078120791208012081120821208312084120851208612087120881208912090120911209212093120941209512096120971209812099121001210112102121031210412105121061210712108121091211012111121121211312114121151211612117121181211912120121211212212123121241212512126121271212812129121301213112132121331213412135121361213712138121391214012141121421214312144121451214612147121481214912150121511215212153121541215512156121571215812159121601216112162121631216412165121661216712168121691217012171121721217312174121751217612177121781217912180121811218212183121841218512186121871218812189121901219112192121931219412195121961219712198121991220012201122021220312204122051220612207122081220912210122111221212213122141221512216122171221812219122201222112222122231222412225122261222712228122291223012231122321223312234122351223612237122381223912240122411224212243122441224512246122471224812249122501225112252122531225412255122561225712258122591226012261122621226312264122651226612267122681226912270122711227212273122741227512276122771227812279122801228112282122831228412285122861228712288122891229012291122921229312294122951229612297122981229912300123011230212303123041230512306123071230812309123101231112312123131231412315123161231712318123191232012321123221232312324123251232612327123281232912330123311233212333123341233512336123371233812339123401234112342123431234412345123461234712348123491235012351123521235312354123551235612357123581235912360123611236212363123641236512366123671236812369123701237112372123731237412375123761237712378123791238012381123821238312384123851238612387123881238912390123911239212393123941239512396123971239812399124001240112402124031240412405124061240712408124091241012411124121241312414124151241612417124181241912420124211242212423124241242512426124271242812429124301243112432124331243412435124361243712438124391244012441124421244312444124451244612447124481244912450124511245212453124541245512456124571245812459124601246112462124631246412465124661246712468124691247012471124721247312474124751247612477124781247912480124811248212483124841248512486124871248812489124901249112492124931249412495124961249712498124991250012501125021250312504125051250612507125081250912510125111251212513125141251512516125171251812519125201252112522125231252412525125261252712528125291253012531125321253312534125351253612537125381253912540125411254212543125441254512546125471254812549125501255112552125531255412555125561255712558125591256012561125621256312564125651256612567125681256912570125711257212573125741257512576125771257812579125801258112582125831258412585125861258712588125891259012591125921259312594125951259612597125981259912600126011260212603126041260512606126071260812609126101261112612126131261412615126161261712618126191262012621126221262312624126251262612627126281262912630126311263212633126341263512636126371263812639126401264112642126431264412645126461264712648126491265012651126521265312654126551265612657126581265912660126611266212663126641266512666126671266812669126701267112672126731267412675126761267712678126791268012681126821268312684126851268612687126881268912690126911269212693126941269512696126971269812699127001270112702127031270412705127061270712708127091271012711127121271312714127151271612717127181271912720127211272212723127241272512726127271272812729127301273112732127331273412735127361273712738127391274012741127421274312744127451274612747127481274912750127511275212753127541275512756127571275812759127601276112762127631276412765127661276712768127691277012771127721277312774127751277612777127781277912780127811278212783127841278512786127871278812789127901279112792127931279412795127961279712798127991280012801128021280312804128051280612807128081280912810128111281212813128141281512816128171281812819128201282112822128231282412825128261282712828128291283012831128321283312834128351283612837128381283912840128411284212843128441284512846128471284812849128501285112852128531285412855128561285712858128591286012861128621286312864128651286612867128681286912870128711287212873128741287512876128771287812879128801288112882128831288412885128861288712888128891289012891128921289312894128951289612897128981289912900129011290212903129041290512906129071290812909129101291112912129131291412915129161291712918129191292012921129221292312924129251292612927129281292912930129311293212933129341293512936129371293812939129401294112942129431294412945129461294712948129491295012951129521295312954129551295612957129581295912960129611296212963129641296512966129671296812969129701297112972129731297412975129761297712978129791298012981129821298312984129851298612987129881298912990129911299212993129941299512996129971299812999130001300113002130031300413005130061300713008130091301013011130121301313014130151301613017130181301913020130211302213023130241302513026130271302813029130301303113032130331303413035130361303713038130391304013041130421304313044130451304613047130481304913050130511305213053130541305513056130571305813059130601306113062130631306413065130661306713068130691307013071130721307313074130751307613077130781307913080130811308213083130841308513086130871308813089130901309113092130931309413095130961309713098130991310013101131021310313104131051310613107131081310913110131111311213113131141311513116131171311813119131201312113122131231312413125131261312713128131291313013131131321313313134131351313613137131381313913140131411314213143131441314513146131471314813149131501315113152131531315413155131561315713158131591316013161131621316313164131651316613167131681316913170131711317213173131741317513176131771317813179131801318113182131831318413185131861318713188131891319013191131921319313194131951319613197131981319913200132011320213203132041320513206132071320813209132101321113212132131321413215132161321713218132191322013221132221322313224132251322613227132281322913230132311323213233132341323513236132371323813239132401324113242132431324413245132461324713248132491325013251132521325313254132551325613257132581325913260132611326213263132641326513266132671326813269132701327113272132731327413275132761327713278132791328013281132821328313284132851328613287132881328913290132911329213293132941329513296132971329813299133001330113302133031330413305133061330713308133091331013311133121331313314133151331613317133181331913320133211332213323133241332513326133271332813329133301333113332133331333413335133361333713338133391334013341133421334313344133451334613347133481334913350133511335213353133541335513356133571335813359133601336113362133631336413365133661336713368133691337013371133721337313374133751337613377133781337913380133811338213383133841338513386133871338813389133901339113392133931339413395133961339713398133991340013401134021340313404134051340613407134081340913410134111341213413134141341513416134171341813419134201342113422134231342413425134261342713428134291343013431134321343313434134351343613437134381343913440134411344213443134441344513446134471344813449134501345113452134531345413455134561345713458134591346013461134621346313464134651346613467134681346913470134711347213473134741347513476134771347813479134801348113482134831348413485134861348713488134891349013491134921349313494134951349613497134981349913500135011350213503135041350513506135071350813509135101351113512135131351413515135161351713518135191352013521135221352313524135251352613527135281352913530135311353213533135341353513536135371353813539135401354113542135431354413545135461354713548135491355013551135521355313554135551355613557135581355913560135611356213563135641356513566135671356813569135701357113572135731357413575135761357713578135791358013581135821358313584135851358613587135881358913590135911359213593135941359513596135971359813599136001360113602136031360413605136061360713608136091361013611136121361313614136151361613617136181361913620136211362213623136241362513626136271362813629136301363113632136331363413635136361363713638136391364013641136421364313644136451364613647136481364913650136511365213653136541365513656136571365813659136601366113662136631366413665136661366713668136691367013671136721367313674136751367613677136781367913680136811368213683136841368513686136871368813689136901369113692136931369413695136961369713698136991370013701137021370313704137051370613707137081370913710137111371213713137141371513716137171371813719137201372113722137231372413725137261372713728137291373013731137321373313734137351373613737137381373913740137411374213743137441374513746137471374813749137501375113752137531375413755137561375713758137591376013761137621376313764137651376613767137681376913770137711377213773137741377513776137771377813779137801378113782137831378413785137861378713788137891379013791137921379313794137951379613797137981379913800138011380213803138041380513806138071380813809138101381113812138131381413815138161381713818138191382013821138221382313824138251382613827138281382913830138311383213833138341383513836138371383813839138401384113842138431384413845138461384713848138491385013851138521385313854138551385613857138581385913860138611386213863138641386513866138671386813869138701387113872138731387413875138761387713878138791388013881138821388313884138851388613887138881388913890138911389213893138941389513896138971389813899139001390113902139031390413905139061390713908139091391013911139121391313914139151391613917139181391913920139211392213923139241392513926139271392813929139301393113932139331393413935139361393713938139391394013941139421394313944139451394613947139481394913950139511395213953139541395513956139571395813959139601396113962139631396413965139661396713968139691397013971139721397313974139751397613977139781397913980139811398213983139841398513986139871398813989139901399113992139931399413995139961399713998139991400014001140021400314004140051400614007140081400914010140111401214013140141401514016140171401814019140201402114022140231402414025140261402714028140291403014031140321403314034140351403614037140381403914040140411404214043140441404514046140471404814049140501405114052140531405414055140561405714058140591406014061140621406314064140651406614067140681406914070140711407214073140741407514076140771407814079140801408114082140831408414085140861408714088140891409014091140921409314094140951409614097140981409914100141011410214103141041410514106141071410814109141101411114112141131411414115141161411714118141191412014121141221412314124141251412614127141281412914130141311413214133141341413514136141371413814139141401414114142141431414414145141461414714148141491415014151141521415314154141551415614157141581415914160141611416214163141641416514166141671416814169141701417114172141731417414175141761417714178141791418014181141821418314184141851418614187141881418914190141911419214193141941419514196141971419814199142001420114202142031420414205142061420714208142091421014211142121421314214142151421614217142181421914220142211422214223142241422514226142271422814229142301423114232142331423414235142361423714238142391424014241142421424314244142451424614247142481424914250142511425214253142541425514256142571425814259142601426114262142631426414265142661426714268142691427014271142721427314274142751427614277142781427914280142811428214283142841428514286142871428814289142901429114292142931429414295142961429714298142991430014301143021430314304143051430614307143081430914310143111431214313143141431514316143171431814319143201432114322143231432414325143261432714328143291433014331143321433314334143351433614337143381433914340143411434214343143441434514346143471434814349143501435114352143531435414355143561435714358143591436014361143621436314364143651436614367143681436914370143711437214373143741437514376143771437814379143801438114382143831438414385143861438714388143891439014391143921439314394143951439614397143981439914400144011440214403144041440514406144071440814409144101441114412144131441414415144161441714418144191442014421144221442314424144251442614427144281442914430144311443214433144341443514436144371443814439144401444114442144431444414445144461444714448144491445014451144521445314454144551445614457144581445914460144611446214463144641446514466144671446814469144701447114472144731447414475144761447714478144791448014481144821448314484144851448614487144881448914490144911449214493144941449514496144971449814499145001450114502145031450414505145061450714508145091451014511145121451314514145151451614517145181451914520145211452214523145241452514526145271452814529145301453114532145331453414535145361453714538145391454014541145421454314544145451454614547145481454914550145511455214553145541455514556145571455814559145601456114562145631456414565145661456714568145691457014571145721457314574145751457614577145781457914580145811458214583145841458514586145871458814589145901459114592145931459414595145961459714598145991460014601146021460314604146051460614607146081460914610146111461214613146141461514616146171461814619146201462114622146231462414625146261462714628146291463014631146321463314634146351463614637146381463914640146411464214643146441464514646146471464814649146501465114652146531465414655146561465714658146591466014661146621466314664146651466614667146681466914670146711467214673146741467514676146771467814679146801468114682146831468414685146861468714688146891469014691146921469314694146951469614697146981469914700147011470214703147041470514706147071470814709147101471114712147131471414715147161471714718147191472014721147221472314724147251472614727147281472914730147311473214733147341473514736147371473814739147401474114742147431474414745147461474714748147491475014751147521475314754147551475614757147581475914760147611476214763147641476514766147671476814769147701477114772147731477414775147761477714778147791478014781147821478314784147851478614787147881478914790147911479214793147941479514796147971479814799148001480114802148031480414805148061480714808148091481014811148121481314814148151481614817148181481914820148211482214823148241482514826148271482814829148301483114832148331483414835148361483714838148391484014841148421484314844148451484614847148481484914850148511485214853148541485514856148571485814859148601486114862148631486414865148661486714868148691487014871148721487314874148751487614877148781487914880148811488214883148841488514886148871488814889148901489114892148931489414895148961489714898148991490014901149021490314904149051490614907149081490914910149111491214913149141491514916149171491814919149201492114922149231492414925149261492714928149291493014931149321493314934149351493614937149381493914940149411494214943149441494514946149471494814949149501495114952149531495414955149561495714958149591496014961149621496314964149651496614967149681496914970149711497214973149741497514976149771497814979149801498114982149831498414985149861498714988149891499014991149921499314994149951499614997149981499915000150011500215003150041500515006150071500815009150101501115012150131501415015150161501715018150191502015021150221502315024150251502615027150281502915030150311503215033150341503515036150371503815039150401504115042150431504415045150461504715048150491505015051150521505315054150551505615057150581505915060150611506215063150641506515066150671506815069150701507115072150731507415075150761507715078150791508015081150821508315084150851508615087150881508915090150911509215093150941509515096150971509815099151001510115102151031510415105151061510715108151091511015111151121511315114151151511615117151181511915120151211512215123151241512515126151271512815129151301513115132151331513415135151361513715138151391514015141151421514315144151451514615147151481514915150151511515215153151541515515156151571515815159151601516115162151631516415165151661516715168151691517015171151721517315174151751517615177151781517915180151811518215183151841518515186151871518815189151901519115192151931519415195151961519715198151991520015201152021520315204152051520615207152081520915210152111521215213152141521515216152171521815219152201522115222152231522415225152261522715228152291523015231152321523315234152351523615237152381523915240152411524215243152441524515246152471524815249152501525115252152531525415255152561525715258152591526015261152621526315264152651526615267152681526915270152711527215273152741527515276152771527815279152801528115282152831528415285152861528715288152891529015291152921529315294152951529615297152981529915300153011530215303153041530515306153071530815309153101531115312153131531415315153161531715318153191532015321153221532315324153251532615327153281532915330153311533215333153341533515336153371533815339153401534115342153431534415345153461534715348153491535015351153521535315354153551535615357153581535915360153611536215363153641536515366153671536815369153701537115372153731537415375153761537715378153791538015381153821538315384153851538615387153881538915390153911539215393153941539515396153971539815399154001540115402154031540415405154061540715408154091541015411154121541315414154151541615417154181541915420154211542215423154241542515426154271542815429154301543115432154331543415435154361543715438154391544015441154421544315444154451544615447154481544915450154511545215453154541545515456154571545815459154601546115462154631546415465154661546715468154691547015471154721547315474154751547615477154781547915480154811548215483154841548515486154871548815489154901549115492154931549415495154961549715498154991550015501155021550315504155051550615507155081550915510155111551215513155141551515516155171551815519155201552115522155231552415525155261552715528155291553015531155321553315534155351553615537155381553915540155411554215543155441554515546155471554815549155501555115552155531555415555155561555715558155591556015561155621556315564155651556615567155681556915570155711557215573155741557515576155771557815579155801558115582155831558415585155861558715588155891559015591155921559315594155951559615597155981559915600156011560215603156041560515606156071560815609156101561115612156131561415615156161561715618156191562015621156221562315624156251562615627156281562915630156311563215633156341563515636156371563815639156401564115642156431564415645156461564715648156491565015651156521565315654156551565615657156581565915660156611566215663156641566515666156671566815669156701567115672156731567415675156761567715678156791568015681156821568315684156851568615687156881568915690156911569215693156941569515696156971569815699157001570115702157031570415705157061570715708157091571015711157121571315714157151571615717157181571915720157211572215723157241572515726157271572815729157301573115732157331573415735157361573715738157391574015741157421574315744157451574615747157481574915750157511575215753157541575515756157571575815759157601576115762157631576415765157661576715768157691577015771157721577315774157751577615777157781577915780157811578215783157841578515786157871578815789157901579115792157931579415795157961579715798157991580015801158021580315804158051580615807158081580915810158111581215813158141581515816158171581815819158201582115822158231582415825158261582715828158291583015831158321583315834158351583615837158381583915840158411584215843158441584515846158471584815849158501585115852158531585415855158561585715858158591586015861158621586315864158651586615867158681586915870158711587215873158741587515876158771587815879158801588115882158831588415885158861588715888158891589015891158921589315894158951589615897158981589915900159011590215903159041590515906159071590815909159101591115912159131591415915159161591715918159191592015921159221592315924159251592615927159281592915930159311593215933159341593515936159371593815939159401594115942159431594415945159461594715948159491595015951159521595315954159551595615957159581595915960159611596215963159641596515966159671596815969159701597115972159731597415975159761597715978159791598015981159821598315984159851598615987159881598915990159911599215993159941599515996159971599815999160001600116002160031600416005160061600716008160091601016011160121601316014160151601616017160181601916020160211602216023160241602516026160271602816029160301603116032160331603416035160361603716038160391604016041160421604316044160451604616047160481604916050160511605216053160541605516056160571605816059160601606116062160631606416065160661606716068160691607016071160721607316074160751607616077160781607916080160811608216083160841608516086160871608816089160901609116092160931609416095160961609716098160991610016101161021610316104161051610616107161081610916110161111611216113161141611516116161171611816119161201612116122161231612416125161261612716128161291613016131161321613316134161351613616137161381613916140161411614216143161441614516146161471614816149161501615116152161531615416155161561615716158161591616016161161621616316164161651616616167161681616916170161711617216173161741617516176161771617816179161801618116182161831618416185161861618716188161891619016191161921619316194161951619616197161981619916200162011620216203162041620516206162071620816209162101621116212162131621416215162161621716218162191622016221162221622316224162251622616227162281622916230162311623216233162341623516236162371623816239162401624116242162431624416245162461624716248162491625016251162521625316254162551625616257162581625916260162611626216263162641626516266162671626816269162701627116272162731627416275162761627716278162791628016281162821628316284162851628616287162881628916290162911629216293162941629516296162971629816299163001630116302163031630416305163061630716308163091631016311163121631316314163151631616317163181631916320163211632216323163241632516326163271632816329163301633116332163331633416335163361633716338163391634016341163421634316344163451634616347163481634916350163511635216353163541635516356163571635816359163601636116362163631636416365163661636716368163691637016371163721637316374163751637616377163781637916380163811638216383163841638516386163871638816389163901639116392163931639416395163961639716398163991640016401164021640316404164051640616407164081640916410164111641216413164141641516416164171641816419164201642116422164231642416425164261642716428164291643016431164321643316434164351643616437164381643916440164411644216443164441644516446164471644816449164501645116452164531645416455164561645716458164591646016461164621646316464164651646616467164681646916470164711647216473164741647516476164771647816479164801648116482164831648416485164861648716488164891649016491164921649316494164951649616497164981649916500165011650216503165041650516506165071650816509165101651116512165131651416515165161651716518165191652016521165221652316524165251652616527165281652916530165311653216533165341653516536165371653816539165401654116542165431654416545165461654716548165491655016551165521655316554165551655616557165581655916560165611656216563165641656516566165671656816569165701657116572165731657416575165761657716578165791658016581165821658316584165851658616587165881658916590165911659216593165941659516596165971659816599166001660116602166031660416605166061660716608166091661016611166121661316614166151661616617166181661916620166211662216623166241662516626166271662816629166301663116632166331663416635166361663716638166391664016641166421664316644166451664616647166481664916650166511665216653166541665516656166571665816659166601666116662166631666416665166661666716668166691667016671166721667316674166751667616677166781667916680166811668216683166841668516686166871668816689166901669116692166931669416695166961669716698166991670016701167021670316704167051670616707167081670916710167111671216713167141671516716167171671816719167201672116722167231672416725167261672716728167291673016731167321673316734167351673616737167381673916740167411674216743167441674516746167471674816749167501675116752167531675416755167561675716758167591676016761167621676316764167651676616767167681676916770167711677216773167741677516776167771677816779167801678116782167831678416785167861678716788167891679016791167921679316794167951679616797167981679916800168011680216803168041680516806168071680816809168101681116812168131681416815168161681716818168191682016821168221682316824168251682616827168281682916830168311683216833168341683516836168371683816839168401684116842168431684416845168461684716848168491685016851168521685316854168551685616857168581685916860168611686216863168641686516866168671686816869168701687116872168731687416875168761687716878168791688016881168821688316884168851688616887168881688916890168911689216893168941689516896168971689816899169001690116902169031690416905169061690716908169091691016911169121691316914169151691616917169181691916920169211692216923169241692516926169271692816929169301693116932169331693416935169361693716938169391694016941169421694316944169451694616947169481694916950169511695216953169541695516956169571695816959169601696116962169631696416965169661696716968169691697016971169721697316974169751697616977169781697916980169811698216983169841698516986169871698816989169901699116992169931699416995169961699716998169991700017001170021700317004170051700617007170081700917010170111701217013170141701517016170171701817019170201702117022170231702417025170261702717028170291703017031170321703317034170351703617037170381703917040170411704217043170441704517046170471704817049170501705117052170531705417055170561705717058170591706017061170621706317064170651706617067170681706917070170711707217073170741707517076170771707817079170801708117082170831708417085170861708717088170891709017091170921709317094170951709617097170981709917100171011710217103171041710517106171071710817109171101711117112171131711417115171161711717118171191712017121171221712317124171251712617127171281712917130171311713217133171341713517136171371713817139171401714117142171431714417145171461714717148171491715017151171521715317154171551715617157171581715917160171611716217163171641716517166171671716817169171701717117172171731717417175171761717717178171791718017181171821718317184171851718617187171881718917190171911719217193171941719517196171971719817199172001720117202172031720417205172061720717208172091721017211172121721317214172151721617217172181721917220172211722217223172241722517226172271722817229172301723117232172331723417235172361723717238172391724017241172421724317244172451724617247172481724917250172511725217253172541725517256172571725817259172601726117262172631726417265172661726717268172691727017271172721727317274172751727617277172781727917280172811728217283172841728517286172871728817289172901729117292172931729417295172961729717298172991730017301173021730317304173051730617307173081730917310173111731217313173141731517316173171731817319173201732117322173231732417325173261732717328173291733017331173321733317334173351733617337173381733917340173411734217343173441734517346173471734817349173501735117352173531735417355173561735717358173591736017361173621736317364173651736617367173681736917370173711737217373173741737517376173771737817379173801738117382173831738417385173861738717388173891739017391173921739317394173951739617397173981739917400174011740217403174041740517406174071740817409174101741117412174131741417415174161741717418174191742017421174221742317424174251742617427174281742917430174311743217433174341743517436174371743817439174401744117442174431744417445174461744717448174491745017451174521745317454174551745617457174581745917460174611746217463174641746517466174671746817469174701747117472174731747417475174761747717478174791748017481174821748317484174851748617487174881748917490174911749217493174941749517496174971749817499175001750117502175031750417505175061750717508175091751017511175121751317514175151751617517175181751917520175211752217523175241752517526175271752817529175301753117532175331753417535175361753717538175391754017541175421754317544175451754617547175481754917550175511755217553175541755517556175571755817559175601756117562175631756417565175661756717568175691757017571175721757317574175751757617577175781757917580175811758217583175841758517586175871758817589175901759117592175931759417595175961759717598175991760017601176021760317604176051760617607176081760917610176111761217613176141761517616176171761817619176201762117622176231762417625176261762717628176291763017631176321763317634176351763617637176381763917640176411764217643176441764517646176471764817649176501765117652176531765417655176561765717658176591766017661176621766317664176651766617667176681766917670176711767217673176741767517676176771767817679176801768117682176831768417685176861768717688176891769017691176921769317694176951769617697176981769917700177011770217703177041770517706177071770817709177101771117712177131771417715177161771717718177191772017721177221772317724177251772617727177281772917730177311773217733177341773517736177371773817739177401774117742177431774417745177461774717748177491775017751177521775317754177551775617757177581775917760177611776217763177641776517766177671776817769177701777117772177731777417775177761777717778177791778017781177821778317784177851778617787177881778917790177911779217793177941779517796177971779817799178001780117802178031780417805178061780717808178091781017811178121781317814178151781617817178181781917820178211782217823178241782517826178271782817829178301783117832178331783417835178361783717838178391784017841178421784317844178451784617847178481784917850178511785217853178541785517856178571785817859178601786117862178631786417865178661786717868178691787017871178721787317874178751787617877178781787917880178811788217883178841788517886178871788817889178901789117892178931789417895178961789717898178991790017901179021790317904179051790617907179081790917910179111791217913179141791517916179171791817919179201792117922179231792417925179261792717928179291793017931179321793317934179351793617937179381793917940179411794217943179441794517946179471794817949179501795117952179531795417955179561795717958179591796017961179621796317964179651796617967179681796917970179711797217973179741797517976179771797817979179801798117982179831798417985179861798717988179891799017991179921799317994179951799617997179981799918000180011800218003180041800518006180071800818009180101801118012180131801418015180161801718018180191802018021180221802318024180251802618027180281802918030180311803218033180341803518036180371803818039180401804118042180431804418045180461804718048180491805018051180521805318054180551805618057180581805918060180611806218063180641806518066180671806818069180701807118072180731807418075180761807718078180791808018081180821808318084180851808618087180881808918090180911809218093180941809518096180971809818099181001810118102181031810418105181061810718108181091811018111181121811318114181151811618117181181811918120181211812218123181241812518126181271812818129181301813118132181331813418135181361813718138181391814018141181421814318144181451814618147181481814918150181511815218153181541815518156181571815818159181601816118162181631816418165181661816718168181691817018171181721817318174181751817618177181781817918180181811818218183181841818518186181871818818189181901819118192181931819418195181961819718198181991820018201182021820318204182051820618207182081820918210182111821218213182141821518216182171821818219182201822118222182231822418225182261822718228182291823018231182321823318234182351823618237182381823918240182411824218243182441824518246182471824818249182501825118252182531825418255182561825718258182591826018261182621826318264182651826618267182681826918270182711827218273182741827518276182771827818279182801828118282182831828418285182861828718288182891829018291182921829318294182951829618297182981829918300183011830218303183041830518306183071830818309183101831118312183131831418315183161831718318183191832018321183221832318324183251832618327183281832918330183311833218333183341833518336183371833818339183401834118342183431834418345183461834718348183491835018351183521835318354183551835618357183581835918360183611836218363183641836518366183671836818369183701837118372183731837418375183761837718378183791838018381183821838318384183851838618387183881838918390183911839218393183941839518396183971839818399184001840118402184031840418405184061840718408184091841018411184121841318414184151841618417184181841918420184211842218423184241842518426184271842818429184301843118432184331843418435184361843718438184391844018441184421844318444184451844618447184481844918450184511845218453184541845518456184571845818459184601846118462184631846418465184661846718468184691847018471184721847318474184751847618477184781847918480184811848218483184841848518486184871848818489184901849118492184931849418495184961849718498184991850018501185021850318504185051850618507185081850918510185111851218513185141851518516185171851818519185201852118522185231852418525185261852718528185291853018531185321853318534185351853618537185381853918540185411854218543185441854518546185471854818549185501855118552185531855418555185561855718558185591856018561185621856318564185651856618567185681856918570185711857218573185741857518576185771857818579185801858118582185831858418585185861858718588185891859018591185921859318594185951859618597185981859918600186011860218603186041860518606186071860818609186101861118612186131861418615186161861718618186191862018621186221862318624186251862618627186281862918630186311863218633186341863518636186371863818639186401864118642186431864418645186461864718648186491865018651186521865318654186551865618657186581865918660186611866218663186641866518666186671866818669186701867118672186731867418675186761867718678186791868018681186821868318684186851868618687186881868918690186911869218693186941869518696186971869818699187001870118702187031870418705187061870718708187091871018711187121871318714187151871618717187181871918720187211872218723187241872518726187271872818729187301873118732187331873418735187361873718738187391874018741187421874318744187451874618747187481874918750187511875218753187541875518756187571875818759187601876118762187631876418765187661876718768187691877018771187721877318774187751877618777187781877918780187811878218783187841878518786187871878818789187901879118792187931879418795187961879718798187991880018801188021880318804188051880618807188081880918810188111881218813188141881518816188171881818819188201882118822188231882418825188261882718828188291883018831188321883318834188351883618837188381883918840188411884218843188441884518846188471884818849188501885118852188531885418855188561885718858188591886018861188621886318864188651886618867188681886918870188711887218873188741887518876188771887818879188801888118882188831888418885188861888718888188891889018891188921889318894188951889618897188981889918900189011890218903189041890518906189071890818909189101891118912189131891418915189161891718918189191892018921189221892318924189251892618927189281892918930189311893218933189341893518936189371893818939189401894118942189431894418945189461894718948189491895018951189521895318954189551895618957189581895918960189611896218963189641896518966189671896818969189701897118972189731897418975189761897718978189791898018981189821898318984189851898618987189881898918990189911899218993189941899518996189971899818999190001900119002190031900419005190061900719008190091901019011190121901319014190151901619017190181901919020190211902219023190241902519026190271902819029190301903119032190331903419035190361903719038190391904019041190421904319044190451904619047190481904919050190511905219053190541905519056190571905819059190601906119062190631906419065190661906719068190691907019071190721907319074190751907619077190781907919080190811908219083190841908519086190871908819089190901909119092190931909419095190961909719098190991910019101191021910319104191051910619107191081910919110191111911219113191141911519116191171911819119191201912119122191231912419125191261912719128191291913019131191321913319134191351913619137191381913919140191411914219143191441914519146191471914819149191501915119152191531915419155191561915719158191591916019161191621916319164191651916619167191681916919170191711917219173191741917519176191771917819179191801918119182191831918419185191861918719188191891919019191191921919319194191951919619197191981919919200192011920219203192041920519206192071920819209192101921119212192131921419215192161921719218192191922019221192221922319224192251922619227192281922919230192311923219233192341923519236192371923819239192401924119242192431924419245192461924719248192491925019251192521925319254192551925619257192581925919260192611926219263192641926519266192671926819269192701927119272192731927419275192761927719278192791928019281192821928319284192851928619287192881928919290192911929219293192941929519296192971929819299193001930119302193031930419305193061930719308193091931019311193121931319314193151931619317193181931919320193211932219323193241932519326193271932819329193301933119332193331933419335193361933719338193391934019341193421934319344193451934619347193481934919350193511935219353193541935519356193571935819359193601936119362193631936419365193661936719368193691937019371193721937319374193751937619377193781937919380193811938219383193841938519386193871938819389193901939119392193931939419395193961939719398193991940019401194021940319404194051940619407194081940919410194111941219413194141941519416194171941819419194201942119422194231942419425194261942719428194291943019431194321943319434194351943619437194381943919440194411944219443194441944519446194471944819449194501945119452194531945419455194561945719458194591946019461194621946319464194651946619467194681946919470194711947219473194741947519476194771947819479194801948119482194831948419485194861948719488194891949019491194921949319494194951949619497194981949919500195011950219503195041950519506195071950819509195101951119512195131951419515195161951719518195191952019521195221952319524195251952619527195281952919530195311953219533195341953519536195371953819539195401954119542195431954419545195461954719548195491955019551195521955319554195551955619557195581955919560195611956219563195641956519566195671956819569195701957119572195731957419575195761957719578195791958019581195821958319584195851958619587195881958919590195911959219593195941959519596195971959819599196001960119602196031960419605196061960719608196091961019611196121961319614196151961619617196181961919620196211962219623196241962519626196271962819629196301963119632196331963419635196361963719638196391964019641196421964319644196451964619647196481964919650196511965219653196541965519656196571965819659196601966119662196631966419665196661966719668196691967019671196721967319674196751967619677196781967919680196811968219683196841968519686196871968819689196901969119692196931969419695196961969719698196991970019701197021970319704197051970619707197081970919710197111971219713197141971519716197171971819719197201972119722197231972419725197261972719728197291973019731197321973319734197351973619737197381973919740197411974219743197441974519746197471974819749197501975119752197531975419755197561975719758197591976019761197621976319764197651976619767197681976919770197711977219773197741977519776197771977819779197801978119782197831978419785197861978719788197891979019791197921979319794197951979619797197981979919800198011980219803198041980519806198071980819809198101981119812198131981419815198161981719818198191982019821198221982319824198251982619827198281982919830198311983219833198341983519836198371983819839198401984119842198431984419845198461984719848198491985019851198521985319854198551985619857198581985919860198611986219863198641986519866198671986819869198701987119872198731987419875198761987719878198791988019881198821988319884198851988619887198881988919890198911989219893198941989519896198971989819899199001990119902199031990419905199061990719908199091991019911199121991319914199151991619917199181991919920199211992219923199241992519926199271992819929199301993119932199331993419935199361993719938199391994019941199421994319944199451994619947199481994919950199511995219953199541995519956199571995819959199601996119962199631996419965199661996719968199691997019971199721997319974199751997619977199781997919980199811998219983199841998519986199871998819989199901999119992199931999419995199961999719998199992000020001200022000320004200052000620007200082000920010200112001220013200142001520016200172001820019200202002120022200232002420025200262002720028200292003020031200322003320034200352003620037200382003920040200412004220043200442004520046200472004820049200502005120052200532005420055200562005720058200592006020061200622006320064200652006620067200682006920070200712007220073200742007520076200772007820079200802008120082200832008420085200862008720088200892009020091200922009320094200952009620097200982009920100201012010220103201042010520106201072010820109201102011120112201132011420115201162011720118201192012020121201222012320124201252012620127201282012920130201312013220133201342013520136201372013820139201402014120142201432014420145201462014720148201492015020151201522015320154201552015620157201582015920160201612016220163201642016520166201672016820169201702017120172201732017420175201762017720178201792018020181201822018320184201852018620187201882018920190201912019220193201942019520196201972019820199202002020120202202032020420205202062020720208202092021020211202122021320214202152021620217202182021920220202212022220223202242022520226202272022820229202302023120232202332023420235202362023720238202392024020241202422024320244202452024620247202482024920250202512025220253202542025520256202572025820259202602026120262202632026420265202662026720268202692027020271202722027320274202752027620277202782027920280202812028220283202842028520286202872028820289202902029120292202932029420295202962029720298202992030020301203022030320304203052030620307203082030920310203112031220313203142031520316203172031820319203202032120322203232032420325203262032720328203292033020331203322033320334203352033620337203382033920340203412034220343203442034520346203472034820349203502035120352203532035420355203562035720358203592036020361203622036320364203652036620367203682036920370203712037220373203742037520376203772037820379203802038120382203832038420385203862038720388203892039020391203922039320394203952039620397203982039920400204012040220403204042040520406204072040820409204102041120412204132041420415204162041720418204192042020421204222042320424204252042620427204282042920430204312043220433204342043520436204372043820439204402044120442204432044420445204462044720448204492045020451204522045320454204552045620457204582045920460204612046220463204642046520466204672046820469204702047120472204732047420475204762047720478204792048020481204822048320484204852048620487204882048920490204912049220493204942049520496204972049820499205002050120502205032050420505205062050720508205092051020511205122051320514205152051620517205182051920520205212052220523205242052520526205272052820529205302053120532205332053420535205362053720538205392054020541205422054320544205452054620547205482054920550205512055220553205542055520556205572055820559205602056120562205632056420565205662056720568205692057020571205722057320574205752057620577205782057920580205812058220583205842058520586205872058820589205902059120592205932059420595205962059720598205992060020601206022060320604206052060620607206082060920610206112061220613206142061520616206172061820619206202062120622206232062420625206262062720628206292063020631206322063320634206352063620637206382063920640206412064220643206442064520646206472064820649206502065120652206532065420655206562065720658206592066020661206622066320664206652066620667206682066920670206712067220673206742067520676206772067820679206802068120682206832068420685206862068720688206892069020691206922069320694206952069620697206982069920700207012070220703207042070520706207072070820709207102071120712207132071420715207162071720718207192072020721207222072320724207252072620727207282072920730207312073220733207342073520736207372073820739207402074120742207432074420745207462074720748207492075020751207522075320754207552075620757207582075920760207612076220763207642076520766207672076820769207702077120772207732077420775207762077720778207792078020781207822078320784207852078620787207882078920790207912079220793207942079520796207972079820799208002080120802208032080420805208062080720808208092081020811208122081320814208152081620817208182081920820208212082220823208242082520826208272082820829208302083120832208332083420835208362083720838208392084020841208422084320844208452084620847208482084920850208512085220853208542085520856208572085820859208602086120862208632086420865208662086720868208692087020871208722087320874208752087620877208782087920880208812088220883208842088520886208872088820889208902089120892208932089420895208962089720898208992090020901209022090320904209052090620907209082090920910209112091220913209142091520916209172091820919209202092120922209232092420925209262092720928209292093020931209322093320934209352093620937209382093920940209412094220943209442094520946209472094820949209502095120952209532095420955209562095720958209592096020961209622096320964209652096620967209682096920970209712097220973209742097520976209772097820979209802098120982209832098420985209862098720988209892099020991209922099320994209952099620997209982099921000210012100221003210042100521006210072100821009210102101121012210132101421015210162101721018210192102021021210222102321024210252102621027210282102921030210312103221033210342103521036210372103821039210402104121042210432104421045210462104721048210492105021051210522105321054210552105621057210582105921060210612106221063210642106521066210672106821069210702107121072210732107421075210762107721078210792108021081210822108321084210852108621087210882108921090210912109221093210942109521096210972109821099211002110121102211032110421105211062110721108211092111021111211122111321114211152111621117211182111921120211212112221123211242112521126211272112821129211302113121132211332113421135211362113721138211392114021141211422114321144211452114621147211482114921150211512115221153211542115521156211572115821159211602116121162211632116421165211662116721168211692117021171211722117321174211752117621177211782117921180211812118221183211842118521186211872118821189211902119121192211932119421195211962119721198211992120021201212022120321204212052120621207212082120921210212112121221213212142121521216212172121821219212202122121222212232122421225212262122721228212292123021231212322123321234212352123621237212382123921240212412124221243212442124521246212472124821249212502125121252212532125421255212562125721258212592126021261212622126321264212652126621267212682126921270212712127221273212742127521276212772127821279212802128121282212832128421285212862128721288212892129021291212922129321294212952129621297212982129921300213012130221303213042130521306213072130821309213102131121312213132131421315213162131721318213192132021321213222132321324213252132621327213282132921330213312133221333213342133521336213372133821339213402134121342213432134421345213462134721348213492135021351213522135321354213552135621357213582135921360213612136221363213642136521366213672136821369213702137121372213732137421375213762137721378213792138021381213822138321384213852138621387213882138921390213912139221393213942139521396213972139821399214002140121402214032140421405214062140721408214092141021411214122141321414214152141621417214182141921420214212142221423214242142521426214272142821429214302143121432214332143421435214362143721438214392144021441214422144321444214452144621447214482144921450214512145221453214542145521456214572145821459214602146121462214632146421465214662146721468214692147021471214722147321474214752147621477214782147921480214812148221483214842148521486214872148821489214902149121492214932149421495214962149721498214992150021501215022150321504215052150621507215082150921510215112151221513215142151521516215172151821519215202152121522215232152421525215262152721528215292153021531215322153321534215352153621537215382153921540215412154221543215442154521546215472154821549215502155121552215532155421555215562155721558215592156021561215622156321564215652156621567215682156921570215712157221573215742157521576215772157821579215802158121582215832158421585215862158721588215892159021591215922159321594215952159621597215982159921600216012160221603216042160521606216072160821609216102161121612216132161421615216162161721618216192162021621216222162321624216252162621627216282162921630216312163221633216342163521636216372163821639216402164121642216432164421645216462164721648216492165021651216522165321654216552165621657216582165921660216612166221663216642166521666216672166821669216702167121672216732167421675216762167721678216792168021681216822168321684216852168621687216882168921690216912169221693216942169521696216972169821699217002170121702217032170421705217062170721708217092171021711217122171321714217152171621717217182171921720217212172221723217242172521726217272172821729217302173121732217332173421735217362173721738217392174021741217422174321744217452174621747217482174921750217512175221753217542175521756217572175821759217602176121762217632176421765217662176721768217692177021771217722177321774217752177621777217782177921780217812178221783217842178521786217872178821789217902179121792217932179421795217962179721798217992180021801218022180321804218052180621807218082180921810218112181221813218142181521816218172181821819218202182121822218232182421825218262182721828218292183021831218322183321834218352183621837218382183921840218412184221843218442184521846218472184821849218502185121852218532185421855218562185721858218592186021861218622186321864218652186621867218682186921870218712187221873218742187521876218772187821879218802188121882218832188421885218862188721888218892189021891218922189321894218952189621897218982189921900219012190221903219042190521906219072190821909219102191121912219132191421915219162191721918219192192021921219222192321924219252192621927219282192921930219312193221933219342193521936219372193821939219402194121942219432194421945219462194721948219492195021951219522195321954219552195621957219582195921960219612196221963219642196521966219672196821969219702197121972219732197421975219762197721978219792198021981219822198321984219852198621987219882198921990219912199221993219942199521996219972199821999220002200122002220032200422005220062200722008220092201022011220122201322014220152201622017220182201922020220212202222023220242202522026220272202822029220302203122032220332203422035220362203722038220392204022041220422204322044220452204622047220482204922050220512205222053220542205522056220572205822059220602206122062220632206422065220662206722068220692207022071220722207322074220752207622077220782207922080220812208222083220842208522086220872208822089220902209122092220932209422095220962209722098220992210022101221022210322104221052210622107221082210922110221112211222113221142211522116221172211822119221202212122122221232212422125221262212722128221292213022131221322213322134221352213622137221382213922140221412214222143221442214522146221472214822149221502215122152221532215422155221562215722158221592216022161221622216322164221652216622167221682216922170221712217222173221742217522176221772217822179221802218122182221832218422185221862218722188221892219022191221922219322194221952219622197221982219922200222012220222203222042220522206222072220822209222102221122212222132221422215222162221722218222192222022221222222222322224222252222622227222282222922230222312223222233222342223522236222372223822239222402224122242222432224422245222462224722248222492225022251222522225322254222552225622257222582225922260222612226222263222642226522266222672226822269222702227122272222732227422275222762227722278222792228022281222822228322284222852228622287222882228922290222912229222293222942229522296222972229822299223002230122302223032230422305223062230722308223092231022311223122231322314223152231622317223182231922320223212232222323223242232522326223272232822329223302233122332223332233422335223362233722338223392234022341223422234322344223452234622347223482234922350223512235222353223542235522356223572235822359223602236122362223632236422365223662236722368223692237022371223722237322374223752237622377223782237922380223812238222383223842238522386223872238822389223902239122392223932239422395223962239722398223992240022401224022240322404224052240622407224082240922410224112241222413224142241522416224172241822419224202242122422224232242422425224262242722428224292243022431224322243322434224352243622437224382243922440224412244222443224442244522446224472244822449224502245122452224532245422455224562245722458224592246022461224622246322464224652246622467224682246922470224712247222473224742247522476224772247822479224802248122482224832248422485224862248722488224892249022491224922249322494224952249622497224982249922500225012250222503225042250522506225072250822509225102251122512225132251422515225162251722518225192252022521225222252322524225252252622527225282252922530225312253222533225342253522536225372253822539225402254122542225432254422545225462254722548225492255022551225522255322554225552255622557225582255922560225612256222563225642256522566225672256822569225702257122572225732257422575225762257722578225792258022581225822258322584225852258622587225882258922590225912259222593225942259522596225972259822599226002260122602226032260422605226062260722608226092261022611226122261322614226152261622617226182261922620226212262222623226242262522626226272262822629226302263122632226332263422635226362263722638226392264022641226422264322644226452264622647226482264922650226512265222653226542265522656226572265822659226602266122662226632266422665226662266722668226692267022671226722267322674226752267622677226782267922680226812268222683226842268522686226872268822689226902269122692226932269422695226962269722698226992270022701227022270322704227052270622707227082270922710227112271222713227142271522716227172271822719227202272122722227232272422725227262272722728227292273022731227322273322734227352273622737227382273922740227412274222743227442274522746227472274822749227502275122752227532275422755227562275722758227592276022761227622276322764227652276622767227682276922770227712277222773227742277522776227772277822779227802278122782227832278422785227862278722788227892279022791227922279322794227952279622797227982279922800228012280222803228042280522806228072280822809228102281122812228132281422815228162281722818228192282022821228222282322824228252282622827228282282922830228312283222833228342283522836228372283822839228402284122842228432284422845228462284722848228492285022851228522285322854228552285622857228582285922860228612286222863228642286522866228672286822869228702287122872228732287422875228762287722878228792288022881228822288322884228852288622887228882288922890228912289222893228942289522896228972289822899229002290122902229032290422905229062290722908229092291022911229122291322914229152291622917229182291922920229212292222923229242292522926229272292822929229302293122932229332293422935229362293722938229392294022941229422294322944229452294622947229482294922950229512295222953229542295522956229572295822959229602296122962229632296422965229662296722968229692297022971229722297322974229752297622977229782297922980229812298222983229842298522986229872298822989229902299122992229932299422995229962299722998229992300023001230022300323004230052300623007230082300923010230112301223013230142301523016230172301823019230202302123022230232302423025230262302723028230292303023031230322303323034230352303623037230382303923040230412304223043230442304523046230472304823049230502305123052230532305423055230562305723058230592306023061230622306323064230652306623067230682306923070230712307223073230742307523076230772307823079230802308123082230832308423085230862308723088230892309023091230922309323094230952309623097230982309923100231012310223103231042310523106231072310823109231102311123112231132311423115231162311723118231192312023121231222312323124231252312623127231282312923130231312313223133231342313523136231372313823139231402314123142231432314423145231462314723148231492315023151231522315323154231552315623157231582315923160231612316223163231642316523166231672316823169231702317123172231732317423175231762317723178231792318023181231822318323184231852318623187231882318923190231912319223193231942319523196231972319823199232002320123202232032320423205232062320723208232092321023211232122321323214232152321623217232182321923220232212322223223232242322523226232272322823229232302323123232232332323423235232362323723238232392324023241232422324323244232452324623247232482324923250232512325223253232542325523256232572325823259232602326123262232632326423265232662326723268232692327023271232722327323274232752327623277232782327923280232812328223283232842328523286232872328823289232902329123292232932329423295232962329723298232992330023301233022330323304233052330623307233082330923310233112331223313233142331523316233172331823319233202332123322233232332423325233262332723328233292333023331233322333323334233352333623337233382333923340233412334223343233442334523346233472334823349233502335123352233532335423355233562335723358233592336023361233622336323364233652336623367233682336923370233712337223373233742337523376233772337823379233802338123382233832338423385233862338723388233892339023391233922339323394233952339623397233982339923400234012340223403234042340523406234072340823409234102341123412234132341423415234162341723418234192342023421234222342323424234252342623427234282342923430234312343223433234342343523436234372343823439234402344123442234432344423445234462344723448234492345023451234522345323454234552345623457234582345923460234612346223463234642346523466234672346823469234702347123472234732347423475234762347723478234792348023481234822348323484234852348623487234882348923490234912349223493234942349523496234972349823499235002350123502235032350423505235062350723508235092351023511235122351323514235152351623517235182351923520235212352223523235242352523526235272352823529235302353123532235332353423535235362353723538235392354023541235422354323544235452354623547235482354923550235512355223553235542355523556235572355823559235602356123562235632356423565235662356723568235692357023571235722357323574235752357623577235782357923580235812358223583235842358523586235872358823589235902359123592235932359423595235962359723598235992360023601236022360323604236052360623607236082360923610236112361223613236142361523616236172361823619236202362123622236232362423625236262362723628236292363023631236322363323634236352363623637236382363923640236412364223643236442364523646236472364823649236502365123652236532365423655236562365723658236592366023661236622366323664236652366623667236682366923670236712367223673236742367523676236772367823679236802368123682236832368423685236862368723688236892369023691236922369323694236952369623697236982369923700237012370223703237042370523706237072370823709237102371123712237132371423715237162371723718237192372023721237222372323724237252372623727237282372923730237312373223733237342373523736237372373823739237402374123742237432374423745237462374723748237492375023751237522375323754237552375623757237582375923760237612376223763237642376523766237672376823769237702377123772237732377423775237762377723778237792378023781237822378323784237852378623787237882378923790237912379223793237942379523796237972379823799238002380123802238032380423805238062380723808238092381023811238122381323814238152381623817238182381923820238212382223823238242382523826238272382823829238302383123832238332383423835238362383723838238392384023841238422384323844238452384623847238482384923850238512385223853238542385523856238572385823859238602386123862238632386423865238662386723868238692387023871238722387323874238752387623877238782387923880238812388223883238842388523886238872388823889238902389123892238932389423895238962389723898238992390023901239022390323904239052390623907239082390923910239112391223913239142391523916239172391823919239202392123922239232392423925239262392723928239292393023931239322393323934239352393623937239382393923940239412394223943239442394523946239472394823949239502395123952239532395423955239562395723958239592396023961239622396323964239652396623967239682396923970239712397223973239742397523976239772397823979239802398123982239832398423985239862398723988239892399023991239922399323994239952399623997239982399924000240012400224003240042400524006240072400824009240102401124012240132401424015240162401724018240192402024021240222402324024240252402624027240282402924030240312403224033240342403524036240372403824039240402404124042240432404424045240462404724048240492405024051240522405324054240552405624057240582405924060240612406224063240642406524066240672406824069240702407124072240732407424075240762407724078240792408024081240822408324084240852408624087240882408924090240912409224093240942409524096240972409824099241002410124102241032410424105241062410724108241092411024111241122411324114241152411624117241182411924120241212412224123241242412524126241272412824129241302413124132241332413424135241362413724138241392414024141241422414324144241452414624147241482414924150241512415224153241542415524156241572415824159241602416124162241632416424165241662416724168241692417024171241722417324174241752417624177241782417924180241812418224183241842418524186241872418824189241902419124192241932419424195241962419724198241992420024201242022420324204242052420624207242082420924210242112421224213242142421524216242172421824219242202422124222242232422424225242262422724228242292423024231242322423324234242352423624237242382423924240242412424224243242442424524246242472424824249242502425124252242532425424255242562425724258242592426024261242622426324264242652426624267242682426924270242712427224273242742427524276242772427824279242802428124282242832428424285242862428724288242892429024291242922429324294242952429624297242982429924300243012430224303243042430524306243072430824309243102431124312243132431424315243162431724318243192432024321243222432324324243252432624327243282432924330243312433224333243342433524336243372433824339243402434124342243432434424345243462434724348243492435024351243522435324354243552435624357243582435924360243612436224363243642436524366243672436824369243702437124372243732437424375243762437724378243792438024381243822438324384243852438624387243882438924390243912439224393243942439524396243972439824399244002440124402244032440424405244062440724408244092441024411244122441324414244152441624417244182441924420244212442224423244242442524426244272442824429244302443124432244332443424435244362443724438244392444024441244422444324444244452444624447244482444924450244512445224453244542445524456244572445824459244602446124462244632446424465244662446724468244692447024471244722447324474244752447624477244782447924480244812448224483244842448524486244872448824489244902449124492244932449424495244962449724498244992450024501245022450324504245052450624507245082450924510245112451224513245142451524516245172451824519245202452124522245232452424525245262452724528245292453024531245322453324534245352453624537245382453924540245412454224543245442454524546245472454824549245502455124552245532455424555245562455724558245592456024561245622456324564245652456624567245682456924570245712457224573245742457524576245772457824579245802458124582245832458424585245862458724588245892459024591245922459324594245952459624597245982459924600246012460224603246042460524606246072460824609246102461124612246132461424615246162461724618246192462024621246222462324624246252462624627246282462924630246312463224633246342463524636246372463824639246402464124642246432464424645246462464724648246492465024651246522465324654246552465624657246582465924660246612466224663246642466524666246672466824669246702467124672246732467424675246762467724678246792468024681246822468324684246852468624687246882468924690246912469224693246942469524696246972469824699247002470124702247032470424705247062470724708247092471024711247122471324714247152471624717247182471924720247212472224723247242472524726247272472824729247302473124732247332473424735247362473724738247392474024741247422474324744247452474624747247482474924750247512475224753247542475524756247572475824759247602476124762247632476424765247662476724768247692477024771247722477324774247752477624777247782477924780247812478224783247842478524786247872478824789247902479124792247932479424795247962479724798247992480024801248022480324804248052480624807248082480924810248112481224813248142481524816248172481824819248202482124822248232482424825248262482724828248292483024831248322483324834248352483624837248382483924840248412484224843248442484524846248472484824849248502485124852248532485424855248562485724858248592486024861248622486324864248652486624867248682486924870248712487224873248742487524876248772487824879248802488124882248832488424885248862488724888248892489024891248922489324894248952489624897248982489924900249012490224903249042490524906249072490824909249102491124912249132491424915249162491724918249192492024921249222492324924249252492624927249282492924930249312493224933249342493524936249372493824939249402494124942249432494424945249462494724948249492495024951249522495324954249552495624957249582495924960249612496224963249642496524966249672496824969249702497124972249732497424975249762497724978249792498024981249822498324984249852498624987249882498924990249912499224993249942499524996249972499824999250002500125002250032500425005250062500725008250092501025011250122501325014250152501625017250182501925020250212502225023250242502525026250272502825029250302503125032250332503425035250362503725038250392504025041250422504325044250452504625047250482504925050250512505225053250542505525056250572505825059250602506125062250632506425065250662506725068250692507025071250722507325074250752507625077250782507925080250812508225083250842508525086250872508825089250902509125092250932509425095250962509725098250992510025101251022510325104251052510625107251082510925110251112511225113251142511525116251172511825119251202512125122251232512425125251262512725128251292513025131251322513325134251352513625137251382513925140251412514225143251442514525146251472514825149251502515125152251532515425155251562515725158251592516025161251622516325164251652516625167251682516925170251712517225173251742517525176251772517825179251802518125182251832518425185251862518725188251892519025191251922519325194251952519625197251982519925200252012520225203252042520525206252072520825209252102521125212252132521425215252162521725218252192522025221252222522325224252252522625227252282522925230252312523225233252342523525236252372523825239252402524125242252432524425245252462524725248252492525025251252522525325254252552525625257252582525925260252612526225263252642526525266252672526825269252702527125272252732527425275252762527725278252792528025281252822528325284252852528625287252882528925290252912529225293252942529525296252972529825299253002530125302253032530425305253062530725308253092531025311253122531325314253152531625317253182531925320253212532225323253242532525326253272532825329253302533125332253332533425335253362533725338253392534025341253422534325344253452534625347253482534925350253512535225353253542535525356253572535825359253602536125362253632536425365253662536725368253692537025371253722537325374253752537625377253782537925380253812538225383253842538525386253872538825389253902539125392253932539425395253962539725398253992540025401254022540325404254052540625407254082540925410254112541225413254142541525416254172541825419254202542125422254232542425425254262542725428254292543025431254322543325434254352543625437254382543925440254412544225443254442544525446254472544825449254502545125452254532545425455254562545725458254592546025461254622546325464254652546625467254682546925470254712547225473254742547525476254772547825479254802548125482254832548425485254862548725488254892549025491254922549325494254952549625497254982549925500255012550225503255042550525506255072550825509255102551125512255132551425515255162551725518255192552025521255222552325524255252552625527255282552925530255312553225533255342553525536255372553825539255402554125542255432554425545255462554725548255492555025551255522555325554255552555625557255582555925560255612556225563255642556525566255672556825569255702557125572255732557425575255762557725578255792558025581255822558325584255852558625587255882558925590255912559225593255942559525596255972559825599256002560125602256032560425605256062560725608256092561025611256122561325614256152561625617256182561925620256212562225623256242562525626256272562825629256302563125632256332563425635256362563725638256392564025641256422564325644256452564625647256482564925650256512565225653256542565525656256572565825659256602566125662256632566425665256662566725668256692567025671 |
- # @generated from torch/_C/_VariableFunctions.pyi.in
- # mypy: disable-error-code="type-arg"
- # mypy: allow-untyped-defs
- import builtins
- from typing import (
- Any,
- Callable,
- ContextManager,
- Iterator,
- List,
- Literal,
- NamedTuple,
- Optional,
- overload,
- Sequence,
- Tuple,
- TypeVar,
- Union,
- )
- import torch
- from torch import contiguous_format, Generator, inf, memory_format, strided, SymInt, Tensor
- from torch.types import (
- _bool,
- _complex,
- _device,
- _dtype,
- _float,
- _int,
- _layout,
- _qscheme,
- _size,
- Device,
- Number,
- )
- from torch._prims_common import DeviceLikeType
- @overload
- def __and__(input: Tensor, other: Tensor) -> Tensor: ...
- @overload
- def __and__(input: Tensor, other: Union[Number, _complex]) -> Tensor: ...
- @overload
- def __lshift__(input: Tensor, other: Tensor) -> Tensor: ...
- @overload
- def __lshift__(input: Tensor, other: Union[Number, _complex]) -> Tensor: ...
- @overload
- def __or__(input: Tensor, other: Tensor) -> Tensor: ...
- @overload
- def __or__(input: Tensor, other: Union[Number, _complex]) -> Tensor: ...
- @overload
- def __rshift__(input: Tensor, other: Tensor) -> Tensor: ...
- @overload
- def __rshift__(input: Tensor, other: Union[Number, _complex]) -> Tensor: ...
- @overload
- def __xor__(input: Tensor, other: Tensor) -> Tensor: ...
- @overload
- def __xor__(input: Tensor, other: Union[Number, _complex]) -> Tensor: ...
- def _adaptive_avg_pool2d(input: Tensor, output_size: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]]) -> Tensor: ...
- def _adaptive_avg_pool3d(input: Tensor, output_size: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]]) -> Tensor: ...
- def _add_batch_dim(input: Tensor, batch_dim: _int, level: _int) -> Tensor: ...
- @overload
- def _add_relu(input: Tensor, other: Tensor, *, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: ...
- @overload
- def _add_relu(input: Tensor, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor: ...
- @overload
- def _add_relu_(input: Tensor, other: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor: ...
- @overload
- def _add_relu_(input: Tensor, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor: ...
- def _addmm_activation(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, use_gelu: _bool = False, out: Optional[Tensor] = None) -> Tensor: ...
- @overload
- def _aminmax(input: Tensor) -> Tuple[Tensor, Tensor]: ...
- @overload
- def _aminmax(input: Tensor, dim: _int, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: ...
- def _amp_foreach_non_finite_check_and_unscale_(self: Union[Tuple[Tensor, ...], List[Tensor]], found_inf: Tensor, inv_scale: Tensor) -> None: ...
- def _amp_update_scale_(input: Tensor, growth_tracker: Tensor, found_inf: Tensor, scale_growth_factor: _float, scale_backoff_factor: _float, growth_interval: _int) -> Tensor: ...
- @overload
- def _assert_async(input: Tensor) -> None:
- r"""
- _assert_async(tensor) -> void
-
- Asynchronously assert that the contents of tensor are nonzero. For CPU tensors,
- this is equivalent to ``assert tensor`` or ``assert tensor.is_nonzero()``; for
- CUDA tensors, we DO NOT synchronize and you may only find out the assertion
- failed at a later CUDA kernel launch. Asynchronous assertion can be helpful for
- testing invariants in CUDA tensors without giving up performance. This function
- is NOT intended to be used for regular error checking, as it will trash your CUDA
- context if the assert fails (forcing you to restart your PyTorch process.)
-
- Args:
- tensor (Tensor): a one element tensor to test to see if it is nonzero. Zero
- elements (including False for boolean tensors) cause an assertion failure
- to be raised.
- """
- ...
- @overload
- def _assert_async(input: Tensor, assert_msg: str) -> None:
- r"""
- _assert_async(tensor) -> void
-
- Asynchronously assert that the contents of tensor are nonzero. For CPU tensors,
- this is equivalent to ``assert tensor`` or ``assert tensor.is_nonzero()``; for
- CUDA tensors, we DO NOT synchronize and you may only find out the assertion
- failed at a later CUDA kernel launch. Asynchronous assertion can be helpful for
- testing invariants in CUDA tensors without giving up performance. This function
- is NOT intended to be used for regular error checking, as it will trash your CUDA
- context if the assert fails (forcing you to restart your PyTorch process.)
-
- Args:
- tensor (Tensor): a one element tensor to test to see if it is nonzero. Zero
- elements (including False for boolean tensors) cause an assertion failure
- to be raised.
- """
- ...
- def _assert_scalar(self: Union[Number, _complex], assert_msg: str) -> None: ...
- def _assert_tensor_metadata(a: Tensor, size: Optional[Sequence[Union[_int, SymInt]]] = None, stride: Optional[Sequence[Union[_int, SymInt]]] = None, dtype: Optional[_dtype] = None) -> None: ...
- def _batch_norm_impl_index(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, momentum: _float, eps: _float, cudnn_enabled: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor, _int]: ...
- def _cast_Byte(input: Tensor, non_blocking: _bool = False) -> Tensor: ...
- def _cast_Char(input: Tensor, non_blocking: _bool = False) -> Tensor: ...
- def _cast_Double(input: Tensor, non_blocking: _bool = False) -> Tensor: ...
- def _cast_Float(input: Tensor, non_blocking: _bool = False) -> Tensor: ...
- def _cast_Half(input: Tensor, non_blocking: _bool = False) -> Tensor: ...
- def _cast_Int(input: Tensor, non_blocking: _bool = False) -> Tensor: ...
- def _cast_Long(input: Tensor, non_blocking: _bool = False) -> Tensor: ...
- def _cast_Short(input: Tensor, non_blocking: _bool = False) -> Tensor: ...
- def _choose_qparams_per_tensor(input: Tensor, reduce_range: _bool = False) -> Tuple[_float, _int]: ...
- def _chunk_cat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int, num_chunks: _int, *, out: Optional[Tensor] = None) -> Tensor: ...
- def _coalesce(input: Tensor) -> Tensor: ...
- def _compute_linear_combination(input: Tensor, coefficients: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
- def _conj(input: Tensor) -> Tensor: ...
- def _conj_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
- def _conj_physical(input: Tensor) -> Tensor: ...
- def _convert_indices_from_coo_to_csr(input: Tensor, size: _int, *, out_int32: _bool = False, out: Optional[Tensor] = None) -> Tensor: ...
- def _convert_indices_from_csr_to_coo(crow_indices: Tensor, col_indices: Tensor, *, out_int32: _bool = False, transpose: _bool = False, out: Optional[Tensor] = None) -> Tensor: ...
- def _convert_weight_to_int4pack(input: Tensor, innerKTiles: _int) -> Tensor: ...
- @overload
- def _convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], transposed: _bool, output_padding: _size, groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool, cudnn_enabled: _bool) -> Tensor: ...
- @overload
- def _convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], transposed: _bool, output_padding: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool, cudnn_enabled: _bool, allow_tf32: _bool) -> Tensor: ...
- def _convolution_mode(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: str, dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
- def _copy_from(input: Tensor, dst: Tensor, non_blocking: _bool = False) -> Tensor: ...
- def _copy_from_and_resize(input: Tensor, dst: Tensor) -> Tensor: ...
- def _cslt_compress(input: Tensor) -> Tensor: ...
- def _cslt_sparse_mm(compressed_A: Tensor, dense_B: Tensor, bias: Optional[Tensor] = None, alpha: Optional[Tensor] = None, out_dtype: Optional[_dtype] = None, transpose_result: _bool = False, alg_id: _int = 0) -> Tensor: ...
- def _cslt_sparse_mm_search(compressed_A: Tensor, dense_B: Tensor, bias: Optional[Tensor] = None, alpha: Optional[Tensor] = None, out_dtype: Optional[_dtype] = None, transpose_result: _bool = False) -> _int: ...
- @overload
- def _ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int = 0, zero_infinity: _bool = False) -> Tuple[Tensor, Tensor]: ...
- @overload
- def _ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int = 0, zero_infinity: _bool = False) -> Tuple[Tensor, Tensor]: ...
- @overload
- def _cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int, deterministic: _bool, zero_infinity: _bool) -> Tuple[Tensor, Tensor]: ...
- @overload
- def _cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int, deterministic: _bool, zero_infinity: _bool) -> Tuple[Tensor, Tensor]: ...
- def _cudnn_init_dropout_state(dropout: _float, train: _bool, dropout_seed: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
- def _cudnn_rnn(input: Tensor, weight: Union[Tuple[Tensor, ...], List[Tensor]], weight_stride0: _int, weight_buf: Optional[Tensor], hx: Tensor, cx: Optional[Tensor], mode: _int, hidden_size: Union[_int, SymInt], proj_size: Union[_int, SymInt], num_layers: _int, batch_first: _bool, dropout: _float, train: _bool, bidirectional: _bool, batch_sizes: Sequence[Union[_int, SymInt]], dropout_state: Optional[Tensor]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: ...
- def _cudnn_rnn_flatten_weight(weight_arr: Union[Tuple[Tensor, ...], List[Tensor]], weight_stride0: _int, input_size: Union[_int, SymInt], mode: _int, hidden_size: Union[_int, SymInt], proj_size: Union[_int, SymInt], num_layers: _int, batch_first: _bool, bidirectional: _bool) -> Tensor: ...
- def _cufft_clear_plan_cache(device_index: _int) -> None: ...
- def _cufft_get_plan_cache_max_size(device_index: _int) -> _int: ...
- def _cufft_get_plan_cache_size(device_index: _int) -> _int: ...
- def _cufft_set_plan_cache_max_size(device_index: _int, max_size: _int) -> None: ...
- def _cummax_helper(input: Tensor, values: Tensor, indices: Tensor, dim: _int) -> None: ...
- def _cummin_helper(input: Tensor, values: Tensor, indices: Tensor, dim: _int) -> None: ...
- def _debug_has_internal_overlap(input: Tensor) -> _int: ...
- def _dim_arange(like: Tensor, dim: _int) -> Tensor: ...
- def _dirichlet_grad(x: Tensor, alpha: Tensor, total: Tensor) -> Tensor: ...
- def _disable_functionalization(): ...
- @overload
- def _efficientzerotensor(size: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
- @overload
- def _efficientzerotensor(*size: _int, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
- def _embedding_bag(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool = False, mode: _int = 0, sparse: _bool = False, per_sample_weights: Optional[Tensor] = None, include_last_offset: _bool = False, padding_idx: _int = -1) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
- def _embedding_bag_forward_only(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool = False, mode: _int = 0, sparse: _bool = False, per_sample_weights: Optional[Tensor] = None, include_last_offset: _bool = False, padding_idx: _int = -1) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
- @overload
- def _empty_affine_quantized(size: Sequence[Union[_int, SymInt]], *, scale: _float = 1, zero_point: _int = 0, memory_format: Optional[memory_format] = contiguous_format, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
- @overload
- def _empty_affine_quantized(*size: _int, scale: _float = 1, zero_point: _int = 0, memory_format: Optional[memory_format] = contiguous_format, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
- @overload
- def _empty_per_channel_affine_quantized(size: Sequence[Union[_int, SymInt]], *, scales: Tensor, zero_points: Tensor, axis: _int, memory_format: Optional[memory_format] = contiguous_format, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
- @overload
- def _empty_per_channel_affine_quantized(*size: _int, scales: Tensor, zero_points: Tensor, axis: _int, memory_format: Optional[memory_format] = contiguous_format, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
- def _enable_functionalization(*, reapply_views: _bool = False): ...
- def _euclidean_dist(x1: Tensor, x2: Tensor) -> Tensor: ...
- def _fake_quantize_learnable_per_channel_affine(input: Tensor, scale: Tensor, zero_point: Tensor, axis: _int, quant_min: _int, quant_max: _int, grad_factor: _float = 1.0) -> Tensor: ...
- def _fake_quantize_learnable_per_tensor_affine(input: Tensor, scale: Tensor, zero_point: Tensor, quant_min: _int, quant_max: _int, grad_factor: _float = 1.0) -> Tensor: ...
- def _fake_quantize_per_tensor_affine_cachemask_tensor_qparams(input: Tensor, scale: Tensor, zero_point: Tensor, fake_quant_enabled: Tensor, quant_min: _int, quant_max: _int) -> torch.return_types._fake_quantize_per_tensor_affine_cachemask_tensor_qparams: ...
- def _fft_c2c(input: Tensor, dim: Sequence[Union[_int, SymInt]], normalization: _int, forward: _bool, *, out: Optional[Tensor] = None) -> Tensor: ...
- def _fft_c2r(input: Tensor, dim: _size, normalization: _int, last_dim_size: Union[_int, SymInt], *, out: Optional[Tensor] = None) -> Tensor: ...
- def _fft_r2c(input: Tensor, dim: _size, normalization: _int, onesided: _bool, *, out: Optional[Tensor] = None) -> Tensor: ...
- def _fill_mem_eff_dropout_mask_(input: Tensor, dropout_p: _float, seed: _int, offset: _int) -> Tensor: ...
- def _foobar(input: Tensor, arg1: _bool = True, arg2: _bool = True, *, arg3: _bool = True) -> Tensor: ...
- def _foreach_abs(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
- r"""
- _foreach_abs(self: List[Tensor]) -> List[Tensor]
-
- Apply :func:`torch.abs` to each Tensor of the input list.
- """
- ...
- def _foreach_abs_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
- r"""
- _foreach_abs_(self: List[Tensor]) -> None
-
- Apply :func:`torch.abs` to each Tensor of the input list.
- """
- ...
- def _foreach_acos(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
- r"""
- _foreach_acos(self: List[Tensor]) -> List[Tensor]
-
- Apply :func:`torch.acos` to each Tensor of the input list.
- """
- ...
- def _foreach_acos_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
- r"""
- _foreach_acos_(self: List[Tensor]) -> None
-
- Apply :func:`torch.acos` to each Tensor of the input list.
- """
- ...
- @overload
- def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Union[Number, _complex] = 1) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
- @overload
- def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Union[Number, _complex] = 1) -> None: ...
- @overload
- def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor, *, alpha: Union[Number, _complex] = 1) -> None: ...
- @overload
- def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ...
- @overload
- def _foreach_addcdiv(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_addcdiv(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_addcdiv(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Union[Number, _complex] = 1) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_addcdiv_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
- @overload
- def _foreach_addcdiv_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> None: ...
- @overload
- def _foreach_addcdiv_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Union[Number, _complex] = 1) -> None: ...
- @overload
- def _foreach_addcmul(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_addcmul(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_addcmul(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Union[Number, _complex] = 1) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_addcmul_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
- @overload
- def _foreach_addcmul_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> None: ...
- @overload
- def _foreach_addcmul_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Union[Number, _complex] = 1) -> None: ...
- def _foreach_asin(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
- r"""
- _foreach_asin(self: List[Tensor]) -> List[Tensor]
-
- Apply :func:`torch.asin` to each Tensor of the input list.
- """
- ...
- def _foreach_asin_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
- r"""
- _foreach_asin_(self: List[Tensor]) -> None
-
- Apply :func:`torch.asin` to each Tensor of the input list.
- """
- ...
- def _foreach_atan(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
- r"""
- _foreach_atan(self: List[Tensor]) -> List[Tensor]
-
- Apply :func:`torch.atan` to each Tensor of the input list.
- """
- ...
- def _foreach_atan_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
- r"""
- _foreach_atan_(self: List[Tensor]) -> None
-
- Apply :func:`torch.atan` to each Tensor of the input list.
- """
- ...
- def _foreach_ceil(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
- r"""
- _foreach_ceil(self: List[Tensor]) -> List[Tensor]
-
- Apply :func:`torch.ceil` to each Tensor of the input list.
- """
- ...
- def _foreach_ceil_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
- r"""
- _foreach_ceil_(self: List[Tensor]) -> None
-
- Apply :func:`torch.ceil` to each Tensor of the input list.
- """
- ...
- @overload
- def _foreach_clamp_max(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_clamp_max(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_clamp_max(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_clamp_max_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
- @overload
- def _foreach_clamp_max_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ...
- @overload
- def _foreach_clamp_max_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- @overload
- def _foreach_clamp_min(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_clamp_min(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_clamp_min(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_clamp_min_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
- @overload
- def _foreach_clamp_min_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ...
- @overload
- def _foreach_clamp_min_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- def _foreach_copy_(self: Union[Tuple[Tensor, ...], List[Tensor]], src: Union[Tuple[Tensor, ...], List[Tensor]], non_blocking: _bool = False) -> None: ...
- def _foreach_cos(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
- r"""
- _foreach_cos(self: List[Tensor]) -> List[Tensor]
-
- Apply :func:`torch.cos` to each Tensor of the input list.
- """
- ...
- def _foreach_cos_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
- r"""
- _foreach_cos_(self: List[Tensor]) -> None
-
- Apply :func:`torch.cos` to each Tensor of the input list.
- """
- ...
- def _foreach_cosh(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
- r"""
- _foreach_cosh(self: List[Tensor]) -> List[Tensor]
-
- Apply :func:`torch.cosh` to each Tensor of the input list.
- """
- ...
- def _foreach_cosh_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
- r"""
- _foreach_cosh_(self: List[Tensor]) -> None
-
- Apply :func:`torch.cosh` to each Tensor of the input list.
- """
- ...
- @overload
- def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
- @overload
- def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor) -> None: ...
- @overload
- def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ...
- @overload
- def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- def _foreach_erf(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
- r"""
- _foreach_erf(self: List[Tensor]) -> List[Tensor]
-
- Apply :func:`torch.erf` to each Tensor of the input list.
- """
- ...
- def _foreach_erf_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
- r"""
- _foreach_erf_(self: List[Tensor]) -> None
-
- Apply :func:`torch.erf` to each Tensor of the input list.
- """
- ...
- def _foreach_erfc(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
- r"""
- _foreach_erfc(self: List[Tensor]) -> List[Tensor]
-
- Apply :func:`torch.erfc` to each Tensor of the input list.
- """
- ...
- def _foreach_erfc_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
- r"""
- _foreach_erfc_(self: List[Tensor]) -> None
-
- Apply :func:`torch.erfc` to each Tensor of the input list.
- """
- ...
- def _foreach_exp(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
- r"""
- _foreach_exp(self: List[Tensor]) -> List[Tensor]
-
- Apply :func:`torch.exp` to each Tensor of the input list.
- """
- ...
- def _foreach_exp_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
- r"""
- _foreach_exp_(self: List[Tensor]) -> None
-
- Apply :func:`torch.exp` to each Tensor of the input list.
- """
- ...
- def _foreach_expm1(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
- r"""
- _foreach_expm1(self: List[Tensor]) -> List[Tensor]
-
- Apply :func:`torch.expm1` to each Tensor of the input list.
- """
- ...
- def _foreach_expm1_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
- r"""
- _foreach_expm1_(self: List[Tensor]) -> None
-
- Apply :func:`torch.expm1` to each Tensor of the input list.
- """
- ...
- def _foreach_floor(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
- r"""
- _foreach_floor(self: List[Tensor]) -> List[Tensor]
-
- Apply :func:`torch.floor` to each Tensor of the input list.
- """
- ...
- def _foreach_floor_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
- r"""
- _foreach_floor_(self: List[Tensor]) -> None
-
- Apply :func:`torch.floor` to each Tensor of the input list.
- """
- ...
- def _foreach_frac(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
- r"""
- _foreach_frac(self: List[Tensor]) -> List[Tensor]
-
- Apply :func:`torch.frac` to each Tensor of the input list.
- """
- ...
- def _foreach_frac_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
- r"""
- _foreach_frac_(self: List[Tensor]) -> None
-
- Apply :func:`torch.frac` to each Tensor of the input list.
- """
- ...
- @overload
- def _foreach_lerp(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weight: Union[Number, _complex]) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_lerp(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weights: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_lerp_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weight: Union[Number, _complex]) -> None: ...
- @overload
- def _foreach_lerp_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weights: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- def _foreach_lgamma(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
- r"""
- _foreach_lgamma(self: List[Tensor]) -> List[Tensor]
-
- Apply :func:`torch.lgamma` to each Tensor of the input list.
- """
- ...
- def _foreach_lgamma_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
- r"""
- _foreach_lgamma_(self: List[Tensor]) -> None
-
- Apply :func:`torch.lgamma` to each Tensor of the input list.
- """
- ...
- def _foreach_log(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
- r"""
- _foreach_log(self: List[Tensor]) -> List[Tensor]
-
- Apply :func:`torch.log` to each Tensor of the input list.
- """
- ...
- def _foreach_log10(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
- r"""
- _foreach_log10(self: List[Tensor]) -> List[Tensor]
-
- Apply :func:`torch.log10` to each Tensor of the input list.
- """
- ...
- def _foreach_log10_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
- r"""
- _foreach_log10_(self: List[Tensor]) -> None
-
- Apply :func:`torch.log10` to each Tensor of the input list.
- """
- ...
- def _foreach_log1p(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
- r"""
- _foreach_log1p(self: List[Tensor]) -> List[Tensor]
-
- Apply :func:`torch.log1p` to each Tensor of the input list.
- """
- ...
- def _foreach_log1p_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
- r"""
- _foreach_log1p_(self: List[Tensor]) -> None
-
- Apply :func:`torch.log1p` to each Tensor of the input list.
- """
- ...
- def _foreach_log2(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
- r"""
- _foreach_log2(self: List[Tensor]) -> List[Tensor]
-
- Apply :func:`torch.log2` to each Tensor of the input list.
- """
- ...
- def _foreach_log2_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
- r"""
- _foreach_log2_(self: List[Tensor]) -> None
-
- Apply :func:`torch.log2` to each Tensor of the input list.
- """
- ...
- def _foreach_log_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
- r"""
- _foreach_log_(self: List[Tensor]) -> None
-
- Apply :func:`torch.log` to each Tensor of the input list.
- """
- ...
- def _foreach_max(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_maximum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_maximum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_maximum(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_maximum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
- @overload
- def _foreach_maximum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ...
- @overload
- def _foreach_maximum_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- @overload
- def _foreach_minimum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_minimum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_minimum(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_minimum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
- @overload
- def _foreach_minimum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ...
- @overload
- def _foreach_minimum_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- @overload
- def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
- @overload
- def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor) -> None: ...
- @overload
- def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ...
- @overload
- def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- def _foreach_neg(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
- r"""
- _foreach_neg(self: List[Tensor]) -> List[Tensor]
-
- Apply :func:`torch.neg` to each Tensor of the input list.
- """
- ...
- def _foreach_neg_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
- r"""
- _foreach_neg_(self: List[Tensor]) -> None
-
- Apply :func:`torch.neg` to each Tensor of the input list.
- """
- ...
- def _foreach_norm(self: Union[Tuple[Tensor, ...], List[Tensor]], ord: Union[Number, _complex] = 2, dtype: Optional[_dtype] = None) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_pow(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_pow(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Union[Number, _complex]) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_pow(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_pow(self: Union[Number, _complex], exponent: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_pow_(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Sequence[Union[Number, _complex]]) -> None: ...
- @overload
- def _foreach_pow_(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Union[Number, _complex]) -> None: ...
- @overload
- def _foreach_pow_(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- def _foreach_reciprocal(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
- r"""
- _foreach_reciprocal(self: List[Tensor]) -> List[Tensor]
-
- Apply :func:`torch.reciprocal` to each Tensor of the input list.
- """
- ...
- def _foreach_reciprocal_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
- r"""
- _foreach_reciprocal_(self: List[Tensor]) -> None
-
- Apply :func:`torch.reciprocal` to each Tensor of the input list.
- """
- ...
- def _foreach_round(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
- r"""
- _foreach_round(self: List[Tensor]) -> List[Tensor]
-
- Apply :func:`torch.round` to each Tensor of the input list.
- """
- ...
- def _foreach_round_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
- r"""
- _foreach_round_(self: List[Tensor]) -> None
-
- Apply :func:`torch.round` to each Tensor of the input list.
- """
- ...
- def _foreach_sigmoid(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
- r"""
- _foreach_sigmoid(self: List[Tensor]) -> List[Tensor]
-
- Apply :func:`torch.sigmoid` to each Tensor of the input list.
- """
- ...
- def _foreach_sigmoid_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
- r"""
- _foreach_sigmoid_(self: List[Tensor]) -> None
-
- Apply :func:`torch.sigmoid` to each Tensor of the input list.
- """
- ...
- def _foreach_sign(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ...
- def _foreach_sign_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
- def _foreach_sin(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
- r"""
- _foreach_sin(self: List[Tensor]) -> List[Tensor]
-
- Apply :func:`torch.sin` to each Tensor of the input list.
- """
- ...
- def _foreach_sin_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
- r"""
- _foreach_sin_(self: List[Tensor]) -> None
-
- Apply :func:`torch.sin` to each Tensor of the input list.
- """
- ...
- def _foreach_sinh(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
- r"""
- _foreach_sinh(self: List[Tensor]) -> List[Tensor]
-
- Apply :func:`torch.sinh` to each Tensor of the input list.
- """
- ...
- def _foreach_sinh_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
- r"""
- _foreach_sinh_(self: List[Tensor]) -> None
-
- Apply :func:`torch.sinh` to each Tensor of the input list.
- """
- ...
- def _foreach_sqrt(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
- r"""
- _foreach_sqrt(self: List[Tensor]) -> List[Tensor]
-
- Apply :func:`torch.sqrt` to each Tensor of the input list.
- """
- ...
- def _foreach_sqrt_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
- r"""
- _foreach_sqrt_(self: List[Tensor]) -> None
-
- Apply :func:`torch.sqrt` to each Tensor of the input list.
- """
- ...
- @overload
- def _foreach_sub(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_sub(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Union[Number, _complex] = 1) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_sub(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ...
- @overload
- def _foreach_sub_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ...
- @overload
- def _foreach_sub_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Union[Number, _complex] = 1) -> None: ...
- @overload
- def _foreach_sub_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ...
- def _foreach_tan(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
- r"""
- _foreach_tan(self: List[Tensor]) -> List[Tensor]
-
- Apply :func:`torch.tan` to each Tensor of the input list.
- """
- ...
- def _foreach_tan_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
- r"""
- _foreach_tan_(self: List[Tensor]) -> None
-
- Apply :func:`torch.tan` to each Tensor of the input list.
- """
- ...
- def _foreach_tanh(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
- r"""
- _foreach_tanh(self: List[Tensor]) -> List[Tensor]
-
- Apply :func:`torch.tanh` to each Tensor of the input list.
- """
- ...
- def _foreach_tanh_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
- r"""
- _foreach_tanh_(self: List[Tensor]) -> None
-
- Apply :func:`torch.tanh` to each Tensor of the input list.
- """
- ...
- def _foreach_trunc(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
- r"""
- _foreach_trunc(self: List[Tensor]) -> List[Tensor]
-
- Apply :func:`torch.trunc` to each Tensor of the input list.
- """
- ...
- def _foreach_trunc_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
- r"""
- _foreach_trunc_(self: List[Tensor]) -> None
-
- Apply :func:`torch.trunc` to each Tensor of the input list.
- """
- ...
- def _foreach_zero_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None:
- r"""
- _foreach_zero_(self: List[Tensor]) -> None
-
- Apply :func:`torch.zero` to each Tensor of the input list.
- """
- ...
- def _from_functional_tensor(t: Tensor) -> Tensor: ...
- def _functional_assert_async(input: Tensor, assert_msg: str, dep_token: Tensor) -> Tensor: ...
- def _functional_assert_scalar(self: Union[Number, _complex], assert_msg: str, dep_token: Tensor) -> Tensor: ...
- def _functional_sym_constrain_range(size: Union[Number, _complex], min: Optional[_int], max: Optional[_int], dep_token: Tensor) -> Tensor: ...
- def _functional_sym_constrain_range_for_size(size: Union[Number, _complex], min: Optional[_int], max: Optional[_int], dep_token: Tensor) -> Tensor: ...
- def _functionalize_apply_view_metas(tensor: Tensor, base: Tensor) -> Tensor: ...
- def _functionalize_are_all_mutations_hidden_from_autograd(t: Tensor) -> _bool: ...
- def _functionalize_are_all_mutations_under_no_grad_or_inference_mode(t: Tensor) -> _bool: ...
- def _functionalize_commit_update(t: Tensor) -> None: ...
- def _functionalize_has_metadata_mutation(tensor: Tensor) -> _bool: ...
- def _functionalize_is_symbolic(tensor: Tensor) -> _bool: ...
- def _functionalize_mark_mutation_hidden_from_autograd(t: Tensor) -> None: ...
- def _functionalize_replace(self_: Tensor, other: Tensor) -> None: ...
- def _functionalize_sync(t: Tensor) -> None: ...
- def _functionalize_was_inductor_storage_resized(t: Tensor) -> _bool: ...
- def _functionalize_was_storage_changed(tensor: Tensor) -> _bool: ...
- def _fused_adagrad_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], state_sums: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: _float, lr_decay: _float, weight_decay: _float, eps: _float, maximize: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ...
- @overload
- def _fused_adam_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], exp_avgs: Union[Tuple[Tensor, ...], List[Tensor]], exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], max_exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: Tensor, beta1: _float, beta2: _float, weight_decay: _float, eps: _float, amsgrad: _bool, maximize: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ...
- @overload
- def _fused_adam_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], exp_avgs: Union[Tuple[Tensor, ...], List[Tensor]], exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], max_exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: _float, beta1: _float, beta2: _float, weight_decay: _float, eps: _float, amsgrad: _bool, maximize: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ...
- @overload
- def _fused_adamw_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], exp_avgs: Union[Tuple[Tensor, ...], List[Tensor]], exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], max_exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: Tensor, beta1: _float, beta2: _float, weight_decay: _float, eps: _float, amsgrad: _bool, maximize: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ...
- @overload
- def _fused_adamw_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], exp_avgs: Union[Tuple[Tensor, ...], List[Tensor]], exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], max_exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: _float, beta1: _float, beta2: _float, weight_decay: _float, eps: _float, amsgrad: _bool, maximize: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ...
- def _fused_dropout(input: Tensor, p: _float, generator: Optional[Generator] = None) -> Tuple[Tensor, Tensor]: ...
- def _fused_moving_avg_obs_fq_helper(input: Tensor, observer_on: Tensor, fake_quant_on: Tensor, running_min: Tensor, running_max: Tensor, scale: Tensor, zero_point: Tensor, averaging_const: _float, quant_min: _int, quant_max: _int, ch_axis: _int, per_row_fake_quant: _bool = False, symmetric_quant: _bool = False) -> torch.return_types._fused_moving_avg_obs_fq_helper: ...
- def _fused_sdp_choice(query: Tensor, key: Tensor, value: Tensor, attn_mask: Optional[Tensor] = None, dropout_p: _float = 0.0, is_causal: _bool = False, *, scale: Optional[_float] = None) -> _int: ...
- @overload
- def _fused_sgd_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], momentum_buffer_list: Union[Tuple[Tensor, ...], List[Tensor]], *, weight_decay: _float, momentum: _float, lr: Tensor, dampening: _float, nesterov: _bool, maximize: _bool, is_first_step: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ...
- @overload
- def _fused_sgd_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], momentum_buffer_list: Union[Tuple[Tensor, ...], List[Tensor]], *, weight_decay: _float, momentum: _float, lr: _float, dampening: _float, nesterov: _bool, maximize: _bool, is_first_step: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ...
- def _fw_primal_copy(input: Tensor, level: _int, *, out: Optional[Tensor] = None) -> Tensor: ...
- def _grid_sampler_2d_cpu_fallback(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ...
- def _has_compatible_shallow_copy_type(input: Tensor, from_: Tensor) -> _bool: ...
- def _histogramdd_bin_edges(input: Tensor, bins: _size, *, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> Tuple[Tensor, ...]: ...
- def _histogramdd_from_bin_cts(input: Tensor, bins: _size, *, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> Tensor: ...
- def _histogramdd_from_bin_tensors(input: Tensor, bins: Union[Tuple[Tensor, ...], List[Tensor]], *, weight: Optional[Tensor] = None, density: _bool = False) -> Tensor: ...
- def _index_put_impl_(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False, unsafe: _bool = False) -> Tensor: ...
- def _indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
- def _int_mm(input: Tensor, mat2: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
- def _is_all_true(input: Tensor) -> Tensor: ...
- def _is_any_true(input: Tensor) -> Tensor: ...
- def _is_functional_tensor(t: Tensor) -> _bool: ...
- def _is_zerotensor(input: Tensor) -> _bool: ...
- def _lazy_clone(input: Tensor) -> Tensor: ...
- def _linalg_check_errors(info: Tensor, api_name: str, *, is_matrix: _bool) -> None: ...
- def _linalg_det(A: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types._linalg_det: ...
- def _linalg_eigh(A: Tensor, UPLO: str = "L", compute_v: _bool = True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types._linalg_eigh: ...
- def _linalg_slogdet(A: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types._linalg_slogdet: ...
- def _linalg_solve_ex(A: Tensor, B: Tensor, *, left: _bool = True, check_errors: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types._linalg_solve_ex: ...
- def _linalg_svd(A: Tensor, full_matrices: _bool = False, compute_uv: _bool = True, *, driver: Optional[str] = None, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types._linalg_svd: ...
- def _log_softmax(input: Tensor, dim: _int, half_to_float: _bool, *, out: Optional[Tensor] = None) -> Tensor: ...
- def _log_softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input_dtype: _dtype, *, out: Optional[Tensor] = None) -> Tensor: ...
- def _logcumsumexp(input: Tensor, dim: _int, *, out: Optional[Tensor] = None) -> Tensor: ...
- def _lstm_mps(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]: ...
- def _lu_with_info(input: Tensor, pivot: _bool = True, check_errors: _bool = True) -> torch.return_types._lu_with_info: ...
- def _make_dep_token(*, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
- def _make_dual(primal: Tensor, tangent: Tensor, level: _int) -> Tensor: ...
- def _make_dual_copy(primal: Tensor, tangent: Tensor, level: _int, *, out: Optional[Tensor] = None) -> Tensor: ...
- def _make_per_channel_quantized_tensor(input: Tensor, scale: Tensor, zero_point: Tensor, axis: _int) -> Tensor: ...
- def _make_per_tensor_quantized_tensor(input: Tensor, scale: _float, zero_point: _int) -> Tensor: ...
- def _masked_scale(input: Tensor, mask: Tensor, scale: _float) -> Tensor: ...
- def _masked_softmax(input: Tensor, mask: Tensor, dim: Optional[_int] = None, mask_type: Optional[_int] = None) -> Tensor: ...
- def _mixed_dtypes_linear(input: Tensor, weight: Tensor, scale: Tensor, *, bias: Optional[Tensor] = None, activation: Optional[str] = None) -> Tensor: ...
- def _mkldnn_reshape(input: Tensor, shape: _size) -> Tensor: ...
- def _mkldnn_transpose(input: Tensor, dim0: _int, dim1: _int) -> Tensor: ...
- def _mkldnn_transpose_(input: Tensor, dim0: _int, dim1: _int) -> Tensor: ...
- def _mps_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
- def _mps_convolution_transpose(input: Tensor, weight: Tensor, padding: Sequence[Union[_int, SymInt]], output_padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
- @overload
- def _native_batch_norm_legit(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Tensor, running_var: Tensor, training: _bool, momentum: _float, eps: _float, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> Tuple[Tensor, Tensor, Tensor]: ...
- @overload
- def _native_batch_norm_legit(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], training: _bool, momentum: _float, eps: _float, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> Tuple[Tensor, Tensor, Tensor]: ...
- def _native_batch_norm_legit_no_training(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Tensor, running_var: Tensor, momentum: _float, eps: _float) -> Tuple[Tensor, Tensor, Tensor]: ...
- def _native_multi_head_attention(query: Tensor, key: Tensor, value: Tensor, embed_dim: _int, num_head: _int, qkv_weight: Tensor, qkv_bias: Tensor, proj_weight: Tensor, proj_bias: Tensor, mask: Optional[Tensor] = None, need_weights: _bool = True, average_attn_weights: _bool = True, mask_type: Optional[_int] = None) -> Tuple[Tensor, Tensor]: ...
- def _neg_view(input: Tensor) -> Tensor: ...
- def _neg_view_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
- def _nested_compute_contiguous_strides_offsets(nested_size: Tensor) -> Tuple[Tensor, Tensor]: ...
- def _nested_from_padded(padded: Tensor, cpu_nested_shape_example: Tensor, fuse_transform_0213: _bool = False) -> Tensor: ...
- def _nested_from_padded_and_nested_example(padded: Tensor, nt_example: Tensor) -> Tensor: ...
- def _nested_get_jagged_dummy(any: Tensor) -> Tensor: ...
- def _nested_get_lengths(input: Tensor) -> Tensor: ...
- def _nested_get_offsets(input: Tensor) -> Tensor: ...
- def _nested_get_ragged_idx(input: Tensor) -> _int: ...
- def _nested_get_values(input: Tensor) -> Tensor: ...
- def _nested_get_values_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
- def _nested_tensor_from_mask(t: Tensor, mask: Tensor, mask_check: _bool = True) -> Tensor: ...
- def _nested_tensor_from_mask_left_aligned(t: Tensor, mask: Tensor) -> _bool: ...
- def _nested_tensor_from_tensor_list(list: Union[Tuple[Tensor, ...], List[Tensor]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = None) -> Tensor: ...
- def _nested_tensor_softmax_with_shape(input: Tensor, query: Tensor) -> Tensor: ...
- def _nested_view_from_buffer(input: Tensor, nested_size: Tensor, nested_strides: Tensor, offsets: Tensor) -> Tensor: ...
- def _nested_view_from_buffer_copy(input: Tensor, nested_size: Tensor, nested_strides: Tensor, offsets: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
- def _nested_view_from_jagged(input: Tensor, offsets: Tensor, dummy: Tensor, lengths: Optional[Tensor] = None, ragged_idx: _int = 1) -> Tensor: ...
- def _nested_view_from_jagged_copy(input: Tensor, offsets: Tensor, dummy: Tensor, lengths: Optional[Tensor] = None, ragged_idx: _int = 1, *, out: Optional[Tensor] = None) -> Tensor: ...
- def _nnpack_available() -> _bool: ...
- def _nnpack_spatial_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]], stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1) -> Tensor: ...
- def _pack_padded_sequence(input: Tensor, lengths: Tensor, batch_first: _bool) -> Tuple[Tensor, Tensor]: ...
- def _pad_packed_sequence(data: Tensor, batch_sizes: Tensor, batch_first: _bool, padding_value: Union[Number, _complex], total_length: _int) -> Tuple[Tensor, Tensor]: ...
- def _pin_memory(input: Tensor, device: Optional[Optional[DeviceLikeType]] = None) -> Tensor: ...
- def _prelu_kernel(input: Tensor, weight: Tensor) -> Tensor: ...
- def _print(s: str) -> None: ...
- def _propagate_xla_data(input: Tensor, output: Tensor) -> None: ...
- def _remove_batch_dim(input: Tensor, level: _int, batch_size: _int, out_dim: _int) -> Tensor: ...
- def _reshape_alias_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None) -> Tensor: ...
- def _reshape_from_tensor(input: Tensor, shape: Tensor) -> Tensor: ...
- def _resize_output_(input: Tensor, size: Sequence[Union[_int, SymInt]], device: Optional[DeviceLikeType]) -> Tensor: ...
- def _rowwise_prune(weight: Tensor, mask: Tensor, compressed_indices_dtype: _dtype) -> Tuple[Tensor, Tensor]: ...
- def _sample_dirichlet(input: Tensor, generator: Optional[Generator] = None) -> Tensor: ...
- def _saturate_weight_to_fp16(weight: Tensor) -> Tensor: ...
- def _scaled_dot_product_attention_math(query: Tensor, key: Tensor, value: Tensor, attn_mask: Optional[Tensor] = None, dropout_p: _float = 0.0, is_causal: _bool = False, dropout_mask: Optional[Tensor] = None, *, scale: Optional[_float] = None) -> Tuple[Tensor, Tensor]: ...
- def _scaled_dot_product_cudnn_attention(query: Tensor, key: Tensor, value: Tensor, dropout_p: _float = 0.0, is_causal: _bool = False, return_debug_mask: _bool = False, *, scale: Optional[_float] = None) -> torch.return_types._scaled_dot_product_cudnn_attention: ...
- def _scaled_dot_product_efficient_attention(query: Tensor, key: Tensor, value: Tensor, attn_bias: Optional[Tensor], compute_log_sumexp: _bool, dropout_p: _float = 0.0, is_causal: _bool = False, *, scale: Optional[_float] = None) -> torch.return_types._scaled_dot_product_efficient_attention: ...
- def _scaled_dot_product_flash_attention(query: Tensor, key: Tensor, value: Tensor, dropout_p: _float = 0.0, is_causal: _bool = False, return_debug_mask: _bool = False, *, scale: Optional[_float] = None) -> torch.return_types._scaled_dot_product_flash_attention: ...
- def _scaled_dot_product_flash_attention_for_cpu(query: Tensor, key: Tensor, value: Tensor, dropout_p: _float = 0.0, is_causal: _bool = False, *, attn_mask: Optional[Tensor] = None, scale: Optional[_float] = None) -> torch.return_types._scaled_dot_product_flash_attention_for_cpu: ...
- def _scaled_mm(input: Tensor, mat2: Tensor, *, bias: Optional[Tensor] = None, out_dtype: Optional[_dtype] = None, scale_a: Optional[Tensor] = None, scale_b: Optional[Tensor] = None, scale_result: Optional[Tensor] = None, use_fast_accum: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> Tuple[Tensor, Tensor]: ...
- def _shape_as_tensor(input: Tensor) -> Tensor: ...
- def _sobol_engine_draw(quasi: Tensor, n: _int, sobolstate: Tensor, dimension: _int, num_generated: _int, dtype: Optional[_dtype]) -> Tuple[Tensor, Tensor]: ...
- def _sobol_engine_ff_(input: Tensor, n: _int, sobolstate: Tensor, dimension: _int, num_generated: _int) -> Tensor: ...
- def _sobol_engine_initialize_state_(input: Tensor, dimension: _int) -> Tensor: ...
- def _sobol_engine_scramble_(input: Tensor, ltm: Tensor, dimension: _int) -> Tensor: ...
- def _softmax(input: Tensor, dim: _int, half_to_float: _bool, *, out: Optional[Tensor] = None) -> Tensor: ...
- def _softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input_dtype: _dtype, *, grad_input: Optional[Tensor] = None) -> Tensor: ...
- def _sparse_broadcast_to(input: Tensor, size: _size) -> Tensor: ...
- def _sparse_broadcast_to_copy(input: Tensor, size: _size, *, out: Optional[Tensor] = None) -> Tensor: ...
- def _sparse_csr_prod(input: Tensor, dim: Union[_int, _size], keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor: ...
- def _sparse_csr_sum(input: Tensor, dim: Union[_int, _size], keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor: ...
- def _sparse_log_softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input: Tensor) -> Tensor: ...
- def _sparse_semi_structured_addmm(input: Tensor, mat1: Tensor, mat1_meta: Tensor, mat2: Tensor, *, alpha: Union[Number, _complex] = 1, beta: Union[Number, _complex] = 1, out_dtype: Optional[_dtype] = None) -> Tensor: ...
- def _sparse_semi_structured_apply(input: Tensor, thread_masks: Tensor) -> Tuple[Tensor, Tensor]: ...
- def _sparse_semi_structured_apply_dense(input: Tensor, thread_masks: Tensor) -> Tensor: ...
- def _sparse_semi_structured_linear(input: Tensor, weight: Tensor, meta: Tensor, *, bias: Optional[Tensor] = None, activation: Optional[str] = None, out_dtype: Optional[_dtype] = None) -> Tensor: ...
- def _sparse_semi_structured_mm(mat1: Tensor, mat1_meta: Tensor, mat2: Tensor, *, out_dtype: Optional[_dtype] = None) -> Tensor: ...
- def _sparse_semi_structured_tile(input: Tensor, algorithm: str = "", use_cutlass: _bool = True) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: ...
- def _sparse_softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input: Tensor) -> Tensor: ...
- def _sparse_sparse_matmul(input: Tensor, other: Tensor) -> Tensor: ...
- @overload
- def _sparse_sum(input: Tensor) -> Tensor: ...
- @overload
- def _sparse_sum(input: Tensor, *, dtype: _dtype) -> Tensor: ...
- @overload
- def _sparse_sum(input: Tensor, dim: Union[_int, _size]) -> Tensor: ...
- @overload
- def _sparse_sum(input: Tensor, dim: Union[_int, _size], *, dtype: _dtype) -> Tensor: ...
- def _stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int = 0, *, out: Optional[Tensor] = None) -> Tensor: ...
- def _standard_gamma(input: Tensor, generator: Optional[Generator] = None) -> Tensor: ...
- def _standard_gamma_grad(input: Tensor, output: Tensor) -> Tensor: ...
- def _sync(t: Tensor) -> None: ...
- @overload
- def _test_autograd_multiple_dispatch(input: Tensor) -> Tensor: ...
- @overload
- def _test_autograd_multiple_dispatch(input: Tensor, b: _bool) -> Tensor: ...
- def _test_autograd_multiple_dispatch_view(input: Tensor) -> Tensor: ...
- def _test_autograd_multiple_dispatch_view_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
- def _test_check_tensor(input: Tensor) -> Tensor: ...
- def _test_functorch_fallback(input: Tensor, other: Tensor) -> Tensor: ...
- def _test_parallel_materialize(input: Tensor, num_parallel: _int, skip_first: _bool = False) -> Tensor: ...
- def _test_serialization_subcmul(input: Tensor, other: Tensor, alpha: Union[Number, _complex] = 1) -> Tensor: ...
- def _to_cpu(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ...
- def _to_functional_tensor(t: Tensor) -> Tensor: ...
- def _to_sparse_semi_structured(dense: Tensor) -> Tuple[Tensor, Tensor]: ...
- def _transform_bias_rescale_qkv(qkv: Tensor, qkv_bias: Tensor, num_heads: _int) -> Tuple[Tensor, Tensor, Tensor]: ...
- def _transformer_encoder_layer_fwd(src: Tensor, embed_dim: _int, num_heads: _int, qkv_weight: Tensor, qkv_bias: Tensor, proj_weight: Tensor, proj_bias: Tensor, use_gelu: _bool, norm_first: _bool, eps: _float, norm_weight_1: Tensor, norm_bias_1: Tensor, norm_weight_2: Tensor, norm_bias_2: Tensor, ffn_weight_1: Tensor, ffn_bias_1: Tensor, ffn_weight_2: Tensor, ffn_bias_2: Tensor, mask: Optional[Tensor] = None, mask_type: Optional[_int] = None) -> Tensor: ...
- def _trilinear(i1: Tensor, i2: Tensor, i3: Tensor, expand1: _size, expand2: _size, expand3: _size, sumdim: _size, unroll_dim: _int = 1) -> Tensor: ...
- def _triton_multi_head_attention(query: Tensor, key: Tensor, value: Tensor, embed_dim: _int, num_head: _int, qkv_weight: Tensor, qkv_bias: Tensor, proj_weight: Tensor, proj_bias: Tensor, mask: Optional[Tensor] = None) -> Tensor: ...
- def _triton_scaled_dot_attention(q: Tensor, k: Tensor, v: Tensor, dropout_p: _float = 0.0) -> Tensor: ...
- def _unique(input: Tensor, sorted: _bool = True, return_inverse: _bool = False) -> Tuple[Tensor, Tensor]: ...
- def _unique2(input: Tensor, sorted: _bool = True, return_inverse: _bool = False, return_counts: _bool = False) -> Tuple[Tensor, Tensor, Tensor]: ...
- def _unpack_dual(dual: Tensor, level: _int) -> torch.return_types._unpack_dual: ...
- def _unsafe_index(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]]) -> Tensor: ...
- def _unsafe_index_put(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False) -> Tensor: ...
- @overload
- def _use_cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int) -> _bool: ...
- @overload
- def _use_cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int) -> _bool: ...
- def _use_cudnn_rnn_flatten_weight() -> _bool: ...
- def _validate_compressed_sparse_indices(is_crow: _bool, compressed_idx: Tensor, plain_idx: Tensor, cdim: _int, dim: _int, nnz: _int) -> None: ...
- def _validate_sparse_bsc_tensor_args(ccol_indices: Tensor, row_indices: Tensor, values: Tensor, size: _size) -> None: ...
- def _validate_sparse_bsr_tensor_args(crow_indices: Tensor, col_indices: Tensor, values: Tensor, size: _size) -> None: ...
- def _validate_sparse_compressed_tensor_args(compressed_indices: Tensor, plain_indices: Tensor, values: Tensor, size: _size, layout: _layout) -> None: ...
- def _validate_sparse_coo_tensor_args(indices: Tensor, values: Tensor, size: _size, is_coalesced: Optional[_bool] = None) -> None: ...
- def _validate_sparse_csc_tensor_args(ccol_indices: Tensor, row_indices: Tensor, values: Tensor, size: _size) -> None: ...
- def _validate_sparse_csr_tensor_args(crow_indices: Tensor, col_indices: Tensor, values: Tensor, size: _size) -> None: ...
- def _values_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
- def _weight_int4pack_mm(input: Tensor, mat2: Tensor, qGroupSize: _int, qScaleAndZeros: Tensor) -> Tensor: ...
- def _weight_int8pack_mm(input: Tensor, mat2: Tensor, scales: Tensor) -> Tensor: ...
- def _weight_norm(v: Tensor, g: Tensor, dim: _int = 0) -> Tensor: ...
- def _weight_norm_interface(v: Tensor, g: Tensor, dim: _int = 0) -> Tuple[Tensor, Tensor]: ...
- def abs(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- abs(input, *, out=None) -> Tensor
-
- Computes the absolute value of each element in :attr:`input`.
-
- .. math::
- \text{out}_{i} = |\text{input}_{i}|
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.abs(torch.tensor([-1, -2, 3]))
- tensor([ 1, 2, 3])
- """
- ...
- def abs_(input: Tensor) -> Tensor: ...
- def absolute(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- absolute(input, *, out=None) -> Tensor
-
- Alias for :func:`torch.abs`
- """
- ...
- def acos(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- acos(input, *, out=None) -> Tensor
-
- Computes the inverse cosine of each element in :attr:`input`.
-
- .. math::
- \text{out}_{i} = \cos^{-1}(\text{input}_{i})
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(4)
- >>> a
- tensor([ 0.3348, -0.5889, 0.2005, -0.1584])
- >>> torch.acos(a)
- tensor([ 1.2294, 2.2004, 1.3690, 1.7298])
- """
- ...
- def acos_(input: Tensor) -> Tensor: ...
- def acosh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- acosh(input, *, out=None) -> Tensor
-
- Returns a new tensor with the inverse hyperbolic cosine of the elements of :attr:`input`.
-
- .. math::
- \text{out}_{i} = \cosh^{-1}(\text{input}_{i})
-
- Note:
- The domain of the inverse hyperbolic cosine is `[1, inf)` and values outside this range
- will be mapped to ``NaN``, except for `+ INF` for which the output is mapped to `+ INF`.
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword arguments:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(4).uniform_(1, 2)
- >>> a
- tensor([ 1.3192, 1.9915, 1.9674, 1.7151 ])
- >>> torch.acosh(a)
- tensor([ 0.7791, 1.3120, 1.2979, 1.1341 ])
- """
- ...
- def acosh_(input: Tensor) -> Tensor: ...
- def adaptive_avg_pool1d(input: Tensor, output_size: Union[_int, _size]) -> Tensor: ...
- def adaptive_max_pool1d(input: Tensor, output_size: Union[_int, _size]) -> Tuple[Tensor, Tensor]: ...
- @overload
- def add(input: Union[Tensor, Number, _complex], other: Union[Tensor, Number, _complex], *, alpha: Optional[Union[Number, _complex]] = 1, out: Optional[Tensor] = None) -> Tensor:
- r"""
- add(input, other, *, alpha=1, out=None) -> Tensor
-
- Adds :attr:`other`, scaled by :attr:`alpha`, to :attr:`input`.
-
- .. math::
- \text{{out}}_i = \text{{input}}_i + \text{{alpha}} \times \text{{other}}_i
-
-
- Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
- :ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
-
- Args:
- input (Tensor): the input tensor.
- other (Tensor or Number): the tensor or number to add to :attr:`input`.
-
- Keyword arguments:
- alpha (Number): the multiplier for :attr:`other`.
- out (Tensor, optional): the output tensor.
-
- Examples::
-
- >>> a = torch.randn(4)
- >>> a
- tensor([ 0.0202, 1.0985, 1.3506, -0.6056])
- >>> torch.add(a, 20)
- tensor([ 20.0202, 21.0985, 21.3506, 19.3944])
-
- >>> b = torch.randn(4)
- >>> b
- tensor([-0.9732, -0.3497, 0.6245, 0.4022])
- >>> c = torch.randn(4, 1)
- >>> c
- tensor([[ 0.3743],
- [-1.7724],
- [-0.5811],
- [-0.8017]])
- >>> torch.add(b, c, alpha=10)
- tensor([[ 2.7695, 3.3930, 4.3672, 4.1450],
- [-18.6971, -18.0736, -17.0994, -17.3216],
- [ -6.7845, -6.1610, -5.1868, -5.4090],
- [ -8.9902, -8.3667, -7.3925, -7.6147]])
- """
- ...
- @overload
- def add(self: Tensor, alpha: Union[Number, _complex], other: Tensor) -> Tensor:
- r"""
- add(input, other, *, alpha=1, out=None) -> Tensor
-
- Adds :attr:`other`, scaled by :attr:`alpha`, to :attr:`input`.
-
- .. math::
- \text{{out}}_i = \text{{input}}_i + \text{{alpha}} \times \text{{other}}_i
-
-
- Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
- :ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
-
- Args:
- input (Tensor): the input tensor.
- other (Tensor or Number): the tensor or number to add to :attr:`input`.
-
- Keyword arguments:
- alpha (Number): the multiplier for :attr:`other`.
- out (Tensor, optional): the output tensor.
-
- Examples::
-
- >>> a = torch.randn(4)
- >>> a
- tensor([ 0.0202, 1.0985, 1.3506, -0.6056])
- >>> torch.add(a, 20)
- tensor([ 20.0202, 21.0985, 21.3506, 19.3944])
-
- >>> b = torch.randn(4)
- >>> b
- tensor([-0.9732, -0.3497, 0.6245, 0.4022])
- >>> c = torch.randn(4, 1)
- >>> c
- tensor([[ 0.3743],
- [-1.7724],
- [-0.5811],
- [-0.8017]])
- >>> torch.add(b, c, alpha=10)
- tensor([[ 2.7695, 3.3930, 4.3672, 4.1450],
- [-18.6971, -18.0736, -17.0994, -17.3216],
- [ -6.7845, -6.1610, -5.1868, -5.4090],
- [ -8.9902, -8.3667, -7.3925, -7.6147]])
- """
- ...
- @overload
- def add(self: Tensor, alpha: Union[Number, _complex], other: Tensor, *, out: Tensor) -> Tensor:
- r"""
- add(input, other, *, alpha=1, out=None) -> Tensor
-
- Adds :attr:`other`, scaled by :attr:`alpha`, to :attr:`input`.
-
- .. math::
- \text{{out}}_i = \text{{input}}_i + \text{{alpha}} \times \text{{other}}_i
-
-
- Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
- :ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
-
- Args:
- input (Tensor): the input tensor.
- other (Tensor or Number): the tensor or number to add to :attr:`input`.
-
- Keyword arguments:
- alpha (Number): the multiplier for :attr:`other`.
- out (Tensor, optional): the output tensor.
-
- Examples::
-
- >>> a = torch.randn(4)
- >>> a
- tensor([ 0.0202, 1.0985, 1.3506, -0.6056])
- >>> torch.add(a, 20)
- tensor([ 20.0202, 21.0985, 21.3506, 19.3944])
-
- >>> b = torch.randn(4)
- >>> b
- tensor([-0.9732, -0.3497, 0.6245, 0.4022])
- >>> c = torch.randn(4, 1)
- >>> c
- tensor([[ 0.3743],
- [-1.7724],
- [-0.5811],
- [-0.8017]])
- >>> torch.add(b, c, alpha=10)
- tensor([[ 2.7695, 3.3930, 4.3672, 4.1450],
- [-18.6971, -18.0736, -17.0994, -17.3216],
- [ -6.7845, -6.1610, -5.1868, -5.4090],
- [ -8.9902, -8.3667, -7.3925, -7.6147]])
- """
- ...
- @overload
- def addbmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], batch1: Tensor, batch2: Tensor) -> Tensor:
- r"""
- addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
-
- Performs a batch matrix-matrix product of matrices stored
- in :attr:`batch1` and :attr:`batch2`,
- with a reduced add step (all matrix multiplications get accumulated
- along the first dimension).
- :attr:`input` is added to the final result.
-
- :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the
- same number of matrices.
-
- If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
- :math:`(b \times m \times p)` tensor, :attr:`input` must be
- :ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
- and :attr:`out` will be a :math:`(n \times p)` tensor.
-
- .. math::
- out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i)
-
- If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
- it will not be propagated.
-
- For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha`
- must be real numbers, otherwise they should be integers.
-
- This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
-
- On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
-
- Args:
- batch1 (Tensor): the first batch of matrices to be multiplied
- batch2 (Tensor): the second batch of matrices to be multiplied
-
- Keyword args:
- beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
- input (Tensor): matrix to be added
- alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`)
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> M = torch.randn(3, 5)
- >>> batch1 = torch.randn(10, 3, 4)
- >>> batch2 = torch.randn(10, 4, 5)
- >>> torch.addbmm(M, batch1, batch2)
- tensor([[ 6.6311, 0.0503, 6.9768, -12.0362, -2.1653],
- [ -4.8185, -1.4255, -6.6760, 8.9453, 2.5743],
- [ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]])
- """
- ...
- @overload
- def addbmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor:
- r"""
- addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
-
- Performs a batch matrix-matrix product of matrices stored
- in :attr:`batch1` and :attr:`batch2`,
- with a reduced add step (all matrix multiplications get accumulated
- along the first dimension).
- :attr:`input` is added to the final result.
-
- :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the
- same number of matrices.
-
- If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
- :math:`(b \times m \times p)` tensor, :attr:`input` must be
- :ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
- and :attr:`out` will be a :math:`(n \times p)` tensor.
-
- .. math::
- out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i)
-
- If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
- it will not be propagated.
-
- For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha`
- must be real numbers, otherwise they should be integers.
-
- This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
-
- On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
-
- Args:
- batch1 (Tensor): the first batch of matrices to be multiplied
- batch2 (Tensor): the second batch of matrices to be multiplied
-
- Keyword args:
- beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
- input (Tensor): matrix to be added
- alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`)
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> M = torch.randn(3, 5)
- >>> batch1 = torch.randn(10, 3, 4)
- >>> batch2 = torch.randn(10, 4, 5)
- >>> torch.addbmm(M, batch1, batch2)
- tensor([[ 6.6311, 0.0503, 6.9768, -12.0362, -2.1653],
- [ -4.8185, -1.4255, -6.6760, 8.9453, 2.5743],
- [ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]])
- """
- ...
- @overload
- def addbmm(input: Tensor, batch1: Tensor, batch2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor:
- r"""
- addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
-
- Performs a batch matrix-matrix product of matrices stored
- in :attr:`batch1` and :attr:`batch2`,
- with a reduced add step (all matrix multiplications get accumulated
- along the first dimension).
- :attr:`input` is added to the final result.
-
- :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the
- same number of matrices.
-
- If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
- :math:`(b \times m \times p)` tensor, :attr:`input` must be
- :ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
- and :attr:`out` will be a :math:`(n \times p)` tensor.
-
- .. math::
- out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i)
-
- If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
- it will not be propagated.
-
- For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha`
- must be real numbers, otherwise they should be integers.
-
- This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
-
- On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
-
- Args:
- batch1 (Tensor): the first batch of matrices to be multiplied
- batch2 (Tensor): the second batch of matrices to be multiplied
-
- Keyword args:
- beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
- input (Tensor): matrix to be added
- alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`)
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> M = torch.randn(3, 5)
- >>> batch1 = torch.randn(10, 3, 4)
- >>> batch2 = torch.randn(10, 4, 5)
- >>> torch.addbmm(M, batch1, batch2)
- tensor([[ 6.6311, 0.0503, 6.9768, -12.0362, -2.1653],
- [ -4.8185, -1.4255, -6.6760, 8.9453, 2.5743],
- [ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]])
- """
- ...
- @overload
- def addbmm(beta: Union[Number, _complex], self: Tensor, batch1: Tensor, batch2: Tensor) -> Tensor:
- r"""
- addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
-
- Performs a batch matrix-matrix product of matrices stored
- in :attr:`batch1` and :attr:`batch2`,
- with a reduced add step (all matrix multiplications get accumulated
- along the first dimension).
- :attr:`input` is added to the final result.
-
- :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the
- same number of matrices.
-
- If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
- :math:`(b \times m \times p)` tensor, :attr:`input` must be
- :ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
- and :attr:`out` will be a :math:`(n \times p)` tensor.
-
- .. math::
- out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i)
-
- If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
- it will not be propagated.
-
- For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha`
- must be real numbers, otherwise they should be integers.
-
- This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
-
- On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
-
- Args:
- batch1 (Tensor): the first batch of matrices to be multiplied
- batch2 (Tensor): the second batch of matrices to be multiplied
-
- Keyword args:
- beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
- input (Tensor): matrix to be added
- alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`)
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> M = torch.randn(3, 5)
- >>> batch1 = torch.randn(10, 3, 4)
- >>> batch2 = torch.randn(10, 4, 5)
- >>> torch.addbmm(M, batch1, batch2)
- tensor([[ 6.6311, 0.0503, 6.9768, -12.0362, -2.1653],
- [ -4.8185, -1.4255, -6.6760, 8.9453, 2.5743],
- [ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]])
- """
- ...
- @overload
- def addbmm(beta: Union[Number, _complex], self: Tensor, batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor:
- r"""
- addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
-
- Performs a batch matrix-matrix product of matrices stored
- in :attr:`batch1` and :attr:`batch2`,
- with a reduced add step (all matrix multiplications get accumulated
- along the first dimension).
- :attr:`input` is added to the final result.
-
- :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the
- same number of matrices.
-
- If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
- :math:`(b \times m \times p)` tensor, :attr:`input` must be
- :ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
- and :attr:`out` will be a :math:`(n \times p)` tensor.
-
- .. math::
- out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i)
-
- If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
- it will not be propagated.
-
- For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha`
- must be real numbers, otherwise they should be integers.
-
- This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
-
- On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
-
- Args:
- batch1 (Tensor): the first batch of matrices to be multiplied
- batch2 (Tensor): the second batch of matrices to be multiplied
-
- Keyword args:
- beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
- input (Tensor): matrix to be added
- alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`)
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> M = torch.randn(3, 5)
- >>> batch1 = torch.randn(10, 3, 4)
- >>> batch2 = torch.randn(10, 4, 5)
- >>> torch.addbmm(M, batch1, batch2)
- tensor([[ 6.6311, 0.0503, 6.9768, -12.0362, -2.1653],
- [ -4.8185, -1.4255, -6.6760, 8.9453, 2.5743],
- [ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]])
- """
- ...
- @overload
- def addcdiv(self: Tensor, value: Union[Number, _complex], tensor1: Tensor, tensor2: Tensor) -> Tensor:
- r"""
- addcdiv(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
-
- Performs the element-wise division of :attr:`tensor1` by :attr:`tensor2`,
- multiplies the result by the scalar :attr:`value` and adds it to :attr:`input`.
-
- .. warning::
- Integer division with addcdiv is no longer supported, and in a future
- release addcdiv will perform a true division of tensor1 and tensor2.
- The historic addcdiv behavior can be implemented as
- (input + value * torch.trunc(tensor1 / tensor2)).to(input.dtype)
- for integer inputs and as (input + value * tensor1 / tensor2) for float inputs.
- The future addcdiv behavior is just the latter implementation:
- (input + value * tensor1 / tensor2), for all dtypes.
-
- .. math::
- \text{out}_i = \text{input}_i + \text{value} \times \frac{\text{tensor1}_i}{\text{tensor2}_i}
-
-
- The shapes of :attr:`input`, :attr:`tensor1`, and :attr:`tensor2` must be
- :ref:`broadcastable <broadcasting-semantics>`.
-
- For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
- a real number, otherwise an integer.
-
- Args:
- input (Tensor): the tensor to be added
- tensor1 (Tensor): the numerator tensor
- tensor2 (Tensor): the denominator tensor
-
- Keyword args:
- value (Number, optional): multiplier for :math:`\text{tensor1} / \text{tensor2}`
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> t = torch.randn(1, 3)
- >>> t1 = torch.randn(3, 1)
- >>> t2 = torch.randn(1, 3)
- >>> torch.addcdiv(t, t1, t2, value=0.1)
- tensor([[-0.2312, -3.6496, 0.1312],
- [-1.0428, 3.4292, -0.1030],
- [-0.5369, -0.9829, 0.0430]])
- """
- ...
- @overload
- def addcdiv(self: Tensor, value: Union[Number, _complex], tensor1: Tensor, tensor2: Tensor, *, out: Tensor) -> Tensor:
- r"""
- addcdiv(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
-
- Performs the element-wise division of :attr:`tensor1` by :attr:`tensor2`,
- multiplies the result by the scalar :attr:`value` and adds it to :attr:`input`.
-
- .. warning::
- Integer division with addcdiv is no longer supported, and in a future
- release addcdiv will perform a true division of tensor1 and tensor2.
- The historic addcdiv behavior can be implemented as
- (input + value * torch.trunc(tensor1 / tensor2)).to(input.dtype)
- for integer inputs and as (input + value * tensor1 / tensor2) for float inputs.
- The future addcdiv behavior is just the latter implementation:
- (input + value * tensor1 / tensor2), for all dtypes.
-
- .. math::
- \text{out}_i = \text{input}_i + \text{value} \times \frac{\text{tensor1}_i}{\text{tensor2}_i}
-
-
- The shapes of :attr:`input`, :attr:`tensor1`, and :attr:`tensor2` must be
- :ref:`broadcastable <broadcasting-semantics>`.
-
- For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
- a real number, otherwise an integer.
-
- Args:
- input (Tensor): the tensor to be added
- tensor1 (Tensor): the numerator tensor
- tensor2 (Tensor): the denominator tensor
-
- Keyword args:
- value (Number, optional): multiplier for :math:`\text{tensor1} / \text{tensor2}`
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> t = torch.randn(1, 3)
- >>> t1 = torch.randn(3, 1)
- >>> t2 = torch.randn(1, 3)
- >>> torch.addcdiv(t, t1, t2, value=0.1)
- tensor([[-0.2312, -3.6496, 0.1312],
- [-1.0428, 3.4292, -0.1030],
- [-0.5369, -0.9829, 0.0430]])
- """
- ...
- @overload
- def addcdiv(input: Tensor, tensor1: Tensor, tensor2: Tensor, *, value: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor:
- r"""
- addcdiv(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
-
- Performs the element-wise division of :attr:`tensor1` by :attr:`tensor2`,
- multiplies the result by the scalar :attr:`value` and adds it to :attr:`input`.
-
- .. warning::
- Integer division with addcdiv is no longer supported, and in a future
- release addcdiv will perform a true division of tensor1 and tensor2.
- The historic addcdiv behavior can be implemented as
- (input + value * torch.trunc(tensor1 / tensor2)).to(input.dtype)
- for integer inputs and as (input + value * tensor1 / tensor2) for float inputs.
- The future addcdiv behavior is just the latter implementation:
- (input + value * tensor1 / tensor2), for all dtypes.
-
- .. math::
- \text{out}_i = \text{input}_i + \text{value} \times \frac{\text{tensor1}_i}{\text{tensor2}_i}
-
-
- The shapes of :attr:`input`, :attr:`tensor1`, and :attr:`tensor2` must be
- :ref:`broadcastable <broadcasting-semantics>`.
-
- For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
- a real number, otherwise an integer.
-
- Args:
- input (Tensor): the tensor to be added
- tensor1 (Tensor): the numerator tensor
- tensor2 (Tensor): the denominator tensor
-
- Keyword args:
- value (Number, optional): multiplier for :math:`\text{tensor1} / \text{tensor2}`
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> t = torch.randn(1, 3)
- >>> t1 = torch.randn(3, 1)
- >>> t2 = torch.randn(1, 3)
- >>> torch.addcdiv(t, t1, t2, value=0.1)
- tensor([[-0.2312, -3.6496, 0.1312],
- [-1.0428, 3.4292, -0.1030],
- [-0.5369, -0.9829, 0.0430]])
- """
- ...
- @overload
- def addcmul(self: Tensor, value: Union[Number, _complex], tensor1: Tensor, tensor2: Tensor) -> Tensor:
- r"""
- addcmul(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
-
- Performs the element-wise multiplication of :attr:`tensor1`
- by :attr:`tensor2`, multiplies the result by the scalar :attr:`value`
- and adds it to :attr:`input`.
-
- .. math::
- \text{out}_i = \text{input}_i + \text{value} \times \text{tensor1}_i \times \text{tensor2}_i
-
- The shapes of :attr:`tensor`, :attr:`tensor1`, and :attr:`tensor2` must be
- :ref:`broadcastable <broadcasting-semantics>`.
-
- For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
- a real number, otherwise an integer.
-
- Args:
- input (Tensor): the tensor to be added
- tensor1 (Tensor): the tensor to be multiplied
- tensor2 (Tensor): the tensor to be multiplied
-
- Keyword args:
- value (Number, optional): multiplier for :math:`tensor1 .* tensor2`
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> t = torch.randn(1, 3)
- >>> t1 = torch.randn(3, 1)
- >>> t2 = torch.randn(1, 3)
- >>> torch.addcmul(t, t1, t2, value=0.1)
- tensor([[-0.8635, -0.6391, 1.6174],
- [-0.7617, -0.5879, 1.7388],
- [-0.8353, -0.6249, 1.6511]])
- """
- ...
- @overload
- def addcmul(self: Tensor, value: Union[Number, _complex], tensor1: Tensor, tensor2: Tensor, *, out: Tensor) -> Tensor:
- r"""
- addcmul(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
-
- Performs the element-wise multiplication of :attr:`tensor1`
- by :attr:`tensor2`, multiplies the result by the scalar :attr:`value`
- and adds it to :attr:`input`.
-
- .. math::
- \text{out}_i = \text{input}_i + \text{value} \times \text{tensor1}_i \times \text{tensor2}_i
-
- The shapes of :attr:`tensor`, :attr:`tensor1`, and :attr:`tensor2` must be
- :ref:`broadcastable <broadcasting-semantics>`.
-
- For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
- a real number, otherwise an integer.
-
- Args:
- input (Tensor): the tensor to be added
- tensor1 (Tensor): the tensor to be multiplied
- tensor2 (Tensor): the tensor to be multiplied
-
- Keyword args:
- value (Number, optional): multiplier for :math:`tensor1 .* tensor2`
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> t = torch.randn(1, 3)
- >>> t1 = torch.randn(3, 1)
- >>> t2 = torch.randn(1, 3)
- >>> torch.addcmul(t, t1, t2, value=0.1)
- tensor([[-0.8635, -0.6391, 1.6174],
- [-0.7617, -0.5879, 1.7388],
- [-0.8353, -0.6249, 1.6511]])
- """
- ...
- @overload
- def addcmul(input: Tensor, tensor1: Tensor, tensor2: Tensor, *, value: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor:
- r"""
- addcmul(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
-
- Performs the element-wise multiplication of :attr:`tensor1`
- by :attr:`tensor2`, multiplies the result by the scalar :attr:`value`
- and adds it to :attr:`input`.
-
- .. math::
- \text{out}_i = \text{input}_i + \text{value} \times \text{tensor1}_i \times \text{tensor2}_i
-
- The shapes of :attr:`tensor`, :attr:`tensor1`, and :attr:`tensor2` must be
- :ref:`broadcastable <broadcasting-semantics>`.
-
- For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
- a real number, otherwise an integer.
-
- Args:
- input (Tensor): the tensor to be added
- tensor1 (Tensor): the tensor to be multiplied
- tensor2 (Tensor): the tensor to be multiplied
-
- Keyword args:
- value (Number, optional): multiplier for :math:`tensor1 .* tensor2`
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> t = torch.randn(1, 3)
- >>> t1 = torch.randn(3, 1)
- >>> t2 = torch.randn(1, 3)
- >>> torch.addcmul(t, t1, t2, value=0.1)
- tensor([[-0.8635, -0.6391, 1.6174],
- [-0.7617, -0.5879, 1.7388],
- [-0.8353, -0.6249, 1.6511]])
- """
- ...
- @overload
- def addmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat1: Tensor, mat2: Tensor) -> Tensor:
- r"""
- addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
-
- Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`.
- The matrix :attr:`input` is added to the final result.
-
- If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
- :math:`(m \times p)` tensor, then :attr:`input` must be
- :ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
- and :attr:`out` will be a :math:`(n \times p)` tensor.
-
- :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
- :attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively.
-
- .. math::
- \text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i)
-
- If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
- it will not be propagated.
-
- For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
- :attr:`alpha` must be real numbers, otherwise they should be integers.
-
- This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`. If
- :attr:`input` is sparse the result will have the same layout and if :attr:`out`
- is provided it must have the same layout as :attr:`input`.
-
-
- .. warning::
- Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported,
- or may not have autograd support. If you notice missing functionality please
- open a feature request.
-
- This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
-
- On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
-
- Args:
- input (Tensor): matrix to be added
- mat1 (Tensor): the first matrix to be matrix multiplied
- mat2 (Tensor): the second matrix to be matrix multiplied
-
- Keyword args:
- beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
- alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> M = torch.randn(2, 3)
- >>> mat1 = torch.randn(2, 3)
- >>> mat2 = torch.randn(3, 3)
- >>> torch.addmm(M, mat1, mat2)
- tensor([[-4.8716, 1.4671, -1.3746],
- [ 0.7573, -3.9555, -2.8681]])
- """
- ...
- @overload
- def addmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat1: Tensor, mat2: Tensor, *, out: Tensor) -> Tensor:
- r"""
- addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
-
- Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`.
- The matrix :attr:`input` is added to the final result.
-
- If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
- :math:`(m \times p)` tensor, then :attr:`input` must be
- :ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
- and :attr:`out` will be a :math:`(n \times p)` tensor.
-
- :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
- :attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively.
-
- .. math::
- \text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i)
-
- If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
- it will not be propagated.
-
- For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
- :attr:`alpha` must be real numbers, otherwise they should be integers.
-
- This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`. If
- :attr:`input` is sparse the result will have the same layout and if :attr:`out`
- is provided it must have the same layout as :attr:`input`.
-
-
- .. warning::
- Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported,
- or may not have autograd support. If you notice missing functionality please
- open a feature request.
-
- This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
-
- On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
-
- Args:
- input (Tensor): matrix to be added
- mat1 (Tensor): the first matrix to be matrix multiplied
- mat2 (Tensor): the second matrix to be matrix multiplied
-
- Keyword args:
- beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
- alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> M = torch.randn(2, 3)
- >>> mat1 = torch.randn(2, 3)
- >>> mat2 = torch.randn(3, 3)
- >>> torch.addmm(M, mat1, mat2)
- tensor([[-4.8716, 1.4671, -1.3746],
- [ 0.7573, -3.9555, -2.8681]])
- """
- ...
- @overload
- def addmm(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor:
- r"""
- addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
-
- Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`.
- The matrix :attr:`input` is added to the final result.
-
- If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
- :math:`(m \times p)` tensor, then :attr:`input` must be
- :ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
- and :attr:`out` will be a :math:`(n \times p)` tensor.
-
- :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
- :attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively.
-
- .. math::
- \text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i)
-
- If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
- it will not be propagated.
-
- For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
- :attr:`alpha` must be real numbers, otherwise they should be integers.
-
- This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`. If
- :attr:`input` is sparse the result will have the same layout and if :attr:`out`
- is provided it must have the same layout as :attr:`input`.
-
-
- .. warning::
- Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported,
- or may not have autograd support. If you notice missing functionality please
- open a feature request.
-
- This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
-
- On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
-
- Args:
- input (Tensor): matrix to be added
- mat1 (Tensor): the first matrix to be matrix multiplied
- mat2 (Tensor): the second matrix to be matrix multiplied
-
- Keyword args:
- beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
- alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> M = torch.randn(2, 3)
- >>> mat1 = torch.randn(2, 3)
- >>> mat2 = torch.randn(3, 3)
- >>> torch.addmm(M, mat1, mat2)
- tensor([[-4.8716, 1.4671, -1.3746],
- [ 0.7573, -3.9555, -2.8681]])
- """
- ...
- @overload
- def addmm(beta: Union[Number, _complex], self: Tensor, mat1: Tensor, mat2: Tensor) -> Tensor:
- r"""
- addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
-
- Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`.
- The matrix :attr:`input` is added to the final result.
-
- If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
- :math:`(m \times p)` tensor, then :attr:`input` must be
- :ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
- and :attr:`out` will be a :math:`(n \times p)` tensor.
-
- :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
- :attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively.
-
- .. math::
- \text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i)
-
- If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
- it will not be propagated.
-
- For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
- :attr:`alpha` must be real numbers, otherwise they should be integers.
-
- This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`. If
- :attr:`input` is sparse the result will have the same layout and if :attr:`out`
- is provided it must have the same layout as :attr:`input`.
-
-
- .. warning::
- Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported,
- or may not have autograd support. If you notice missing functionality please
- open a feature request.
-
- This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
-
- On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
-
- Args:
- input (Tensor): matrix to be added
- mat1 (Tensor): the first matrix to be matrix multiplied
- mat2 (Tensor): the second matrix to be matrix multiplied
-
- Keyword args:
- beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
- alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> M = torch.randn(2, 3)
- >>> mat1 = torch.randn(2, 3)
- >>> mat2 = torch.randn(3, 3)
- >>> torch.addmm(M, mat1, mat2)
- tensor([[-4.8716, 1.4671, -1.3746],
- [ 0.7573, -3.9555, -2.8681]])
- """
- ...
- @overload
- def addmm(beta: Union[Number, _complex], self: Tensor, mat1: Tensor, mat2: Tensor, *, out: Tensor) -> Tensor:
- r"""
- addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
-
- Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`.
- The matrix :attr:`input` is added to the final result.
-
- If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
- :math:`(m \times p)` tensor, then :attr:`input` must be
- :ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
- and :attr:`out` will be a :math:`(n \times p)` tensor.
-
- :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
- :attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively.
-
- .. math::
- \text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i)
-
- If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
- it will not be propagated.
-
- For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
- :attr:`alpha` must be real numbers, otherwise they should be integers.
-
- This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`. If
- :attr:`input` is sparse the result will have the same layout and if :attr:`out`
- is provided it must have the same layout as :attr:`input`.
-
-
- .. warning::
- Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported,
- or may not have autograd support. If you notice missing functionality please
- open a feature request.
-
- This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
-
- On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
-
- Args:
- input (Tensor): matrix to be added
- mat1 (Tensor): the first matrix to be matrix multiplied
- mat2 (Tensor): the second matrix to be matrix multiplied
-
- Keyword args:
- beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
- alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> M = torch.randn(2, 3)
- >>> mat1 = torch.randn(2, 3)
- >>> mat2 = torch.randn(3, 3)
- >>> torch.addmm(M, mat1, mat2)
- tensor([[-4.8716, 1.4671, -1.3746],
- [ 0.7573, -3.9555, -2.8681]])
- """
- ...
- @overload
- def addmv(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat: Tensor, vec: Tensor) -> Tensor:
- r"""
- addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor
-
- Performs a matrix-vector product of the matrix :attr:`mat` and
- the vector :attr:`vec`.
- The vector :attr:`input` is added to the final result.
-
- If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
- size `m`, then :attr:`input` must be
- :ref:`broadcastable <broadcasting-semantics>` with a 1-D tensor of size `n` and
- :attr:`out` will be 1-D tensor of size `n`.
-
- :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
- :attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively.
-
- .. math::
- \text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec})
-
- If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
- it will not be propagated.
-
- For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
- :attr:`alpha` must be real numbers, otherwise they should be integers.
-
- Args:
- input (Tensor): vector to be added
- mat (Tensor): matrix to be matrix multiplied
- vec (Tensor): vector to be matrix multiplied
-
- Keyword args:
- beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
- alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`)
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> M = torch.randn(2)
- >>> mat = torch.randn(2, 3)
- >>> vec = torch.randn(3)
- >>> torch.addmv(M, mat, vec)
- tensor([-0.3768, -5.5565])
- """
- ...
- @overload
- def addmv(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat: Tensor, vec: Tensor, *, out: Tensor) -> Tensor:
- r"""
- addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor
-
- Performs a matrix-vector product of the matrix :attr:`mat` and
- the vector :attr:`vec`.
- The vector :attr:`input` is added to the final result.
-
- If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
- size `m`, then :attr:`input` must be
- :ref:`broadcastable <broadcasting-semantics>` with a 1-D tensor of size `n` and
- :attr:`out` will be 1-D tensor of size `n`.
-
- :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
- :attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively.
-
- .. math::
- \text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec})
-
- If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
- it will not be propagated.
-
- For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
- :attr:`alpha` must be real numbers, otherwise they should be integers.
-
- Args:
- input (Tensor): vector to be added
- mat (Tensor): matrix to be matrix multiplied
- vec (Tensor): vector to be matrix multiplied
-
- Keyword args:
- beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
- alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`)
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> M = torch.randn(2)
- >>> mat = torch.randn(2, 3)
- >>> vec = torch.randn(3)
- >>> torch.addmv(M, mat, vec)
- tensor([-0.3768, -5.5565])
- """
- ...
- @overload
- def addmv(input: Tensor, mat: Tensor, vec: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor:
- r"""
- addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor
-
- Performs a matrix-vector product of the matrix :attr:`mat` and
- the vector :attr:`vec`.
- The vector :attr:`input` is added to the final result.
-
- If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
- size `m`, then :attr:`input` must be
- :ref:`broadcastable <broadcasting-semantics>` with a 1-D tensor of size `n` and
- :attr:`out` will be 1-D tensor of size `n`.
-
- :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
- :attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively.
-
- .. math::
- \text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec})
-
- If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
- it will not be propagated.
-
- For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
- :attr:`alpha` must be real numbers, otherwise they should be integers.
-
- Args:
- input (Tensor): vector to be added
- mat (Tensor): matrix to be matrix multiplied
- vec (Tensor): vector to be matrix multiplied
-
- Keyword args:
- beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
- alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`)
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> M = torch.randn(2)
- >>> mat = torch.randn(2, 3)
- >>> vec = torch.randn(3)
- >>> torch.addmv(M, mat, vec)
- tensor([-0.3768, -5.5565])
- """
- ...
- @overload
- def addmv(beta: Union[Number, _complex], self: Tensor, mat: Tensor, vec: Tensor) -> Tensor:
- r"""
- addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor
-
- Performs a matrix-vector product of the matrix :attr:`mat` and
- the vector :attr:`vec`.
- The vector :attr:`input` is added to the final result.
-
- If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
- size `m`, then :attr:`input` must be
- :ref:`broadcastable <broadcasting-semantics>` with a 1-D tensor of size `n` and
- :attr:`out` will be 1-D tensor of size `n`.
-
- :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
- :attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively.
-
- .. math::
- \text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec})
-
- If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
- it will not be propagated.
-
- For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
- :attr:`alpha` must be real numbers, otherwise they should be integers.
-
- Args:
- input (Tensor): vector to be added
- mat (Tensor): matrix to be matrix multiplied
- vec (Tensor): vector to be matrix multiplied
-
- Keyword args:
- beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
- alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`)
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> M = torch.randn(2)
- >>> mat = torch.randn(2, 3)
- >>> vec = torch.randn(3)
- >>> torch.addmv(M, mat, vec)
- tensor([-0.3768, -5.5565])
- """
- ...
- @overload
- def addmv(beta: Union[Number, _complex], self: Tensor, mat: Tensor, vec: Tensor, *, out: Tensor) -> Tensor:
- r"""
- addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor
-
- Performs a matrix-vector product of the matrix :attr:`mat` and
- the vector :attr:`vec`.
- The vector :attr:`input` is added to the final result.
-
- If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
- size `m`, then :attr:`input` must be
- :ref:`broadcastable <broadcasting-semantics>` with a 1-D tensor of size `n` and
- :attr:`out` will be 1-D tensor of size `n`.
-
- :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
- :attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively.
-
- .. math::
- \text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec})
-
- If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
- it will not be propagated.
-
- For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
- :attr:`alpha` must be real numbers, otherwise they should be integers.
-
- Args:
- input (Tensor): vector to be added
- mat (Tensor): matrix to be matrix multiplied
- vec (Tensor): vector to be matrix multiplied
-
- Keyword args:
- beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
- alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`)
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> M = torch.randn(2)
- >>> mat = torch.randn(2, 3)
- >>> vec = torch.randn(3)
- >>> torch.addmv(M, mat, vec)
- tensor([-0.3768, -5.5565])
- """
- ...
- @overload
- def addmv_(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat: Tensor, vec: Tensor) -> Tensor: ...
- @overload
- def addmv_(input: Tensor, mat: Tensor, vec: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor: ...
- @overload
- def addmv_(beta: Union[Number, _complex], self: Tensor, mat: Tensor, vec: Tensor) -> Tensor: ...
- @overload
- def addr(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], vec1: Tensor, vec2: Tensor) -> Tensor:
- r"""
- addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor
-
- Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2`
- and adds it to the matrix :attr:`input`.
-
- Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the
- outer product between :attr:`vec1` and :attr:`vec2` and the added matrix
- :attr:`input` respectively.
-
- .. math::
- \text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2})
-
- If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
- it will not be propagated.
-
- If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector
- of size `m`, then :attr:`input` must be
- :ref:`broadcastable <broadcasting-semantics>` with a matrix of size
- :math:`(n \times m)` and :attr:`out` will be a matrix of size
- :math:`(n \times m)`.
-
- Args:
- input (Tensor): matrix to be added
- vec1 (Tensor): the first vector of the outer product
- vec2 (Tensor): the second vector of the outer product
-
- Keyword args:
- beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
- alpha (Number, optional): multiplier for :math:`\text{vec1} \otimes \text{vec2}` (:math:`\alpha`)
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> vec1 = torch.arange(1., 4.)
- >>> vec2 = torch.arange(1., 3.)
- >>> M = torch.zeros(3, 2)
- >>> torch.addr(M, vec1, vec2)
- tensor([[ 1., 2.],
- [ 2., 4.],
- [ 3., 6.]])
- """
- ...
- @overload
- def addr(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], vec1: Tensor, vec2: Tensor, *, out: Tensor) -> Tensor:
- r"""
- addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor
-
- Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2`
- and adds it to the matrix :attr:`input`.
-
- Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the
- outer product between :attr:`vec1` and :attr:`vec2` and the added matrix
- :attr:`input` respectively.
-
- .. math::
- \text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2})
-
- If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
- it will not be propagated.
-
- If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector
- of size `m`, then :attr:`input` must be
- :ref:`broadcastable <broadcasting-semantics>` with a matrix of size
- :math:`(n \times m)` and :attr:`out` will be a matrix of size
- :math:`(n \times m)`.
-
- Args:
- input (Tensor): matrix to be added
- vec1 (Tensor): the first vector of the outer product
- vec2 (Tensor): the second vector of the outer product
-
- Keyword args:
- beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
- alpha (Number, optional): multiplier for :math:`\text{vec1} \otimes \text{vec2}` (:math:`\alpha`)
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> vec1 = torch.arange(1., 4.)
- >>> vec2 = torch.arange(1., 3.)
- >>> M = torch.zeros(3, 2)
- >>> torch.addr(M, vec1, vec2)
- tensor([[ 1., 2.],
- [ 2., 4.],
- [ 3., 6.]])
- """
- ...
- @overload
- def addr(input: Tensor, vec1: Tensor, vec2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor:
- r"""
- addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor
-
- Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2`
- and adds it to the matrix :attr:`input`.
-
- Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the
- outer product between :attr:`vec1` and :attr:`vec2` and the added matrix
- :attr:`input` respectively.
-
- .. math::
- \text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2})
-
- If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
- it will not be propagated.
-
- If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector
- of size `m`, then :attr:`input` must be
- :ref:`broadcastable <broadcasting-semantics>` with a matrix of size
- :math:`(n \times m)` and :attr:`out` will be a matrix of size
- :math:`(n \times m)`.
-
- Args:
- input (Tensor): matrix to be added
- vec1 (Tensor): the first vector of the outer product
- vec2 (Tensor): the second vector of the outer product
-
- Keyword args:
- beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
- alpha (Number, optional): multiplier for :math:`\text{vec1} \otimes \text{vec2}` (:math:`\alpha`)
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> vec1 = torch.arange(1., 4.)
- >>> vec2 = torch.arange(1., 3.)
- >>> M = torch.zeros(3, 2)
- >>> torch.addr(M, vec1, vec2)
- tensor([[ 1., 2.],
- [ 2., 4.],
- [ 3., 6.]])
- """
- ...
- @overload
- def addr(beta: Union[Number, _complex], self: Tensor, vec1: Tensor, vec2: Tensor) -> Tensor:
- r"""
- addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor
-
- Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2`
- and adds it to the matrix :attr:`input`.
-
- Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the
- outer product between :attr:`vec1` and :attr:`vec2` and the added matrix
- :attr:`input` respectively.
-
- .. math::
- \text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2})
-
- If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
- it will not be propagated.
-
- If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector
- of size `m`, then :attr:`input` must be
- :ref:`broadcastable <broadcasting-semantics>` with a matrix of size
- :math:`(n \times m)` and :attr:`out` will be a matrix of size
- :math:`(n \times m)`.
-
- Args:
- input (Tensor): matrix to be added
- vec1 (Tensor): the first vector of the outer product
- vec2 (Tensor): the second vector of the outer product
-
- Keyword args:
- beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
- alpha (Number, optional): multiplier for :math:`\text{vec1} \otimes \text{vec2}` (:math:`\alpha`)
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> vec1 = torch.arange(1., 4.)
- >>> vec2 = torch.arange(1., 3.)
- >>> M = torch.zeros(3, 2)
- >>> torch.addr(M, vec1, vec2)
- tensor([[ 1., 2.],
- [ 2., 4.],
- [ 3., 6.]])
- """
- ...
- @overload
- def addr(beta: Union[Number, _complex], self: Tensor, vec1: Tensor, vec2: Tensor, *, out: Tensor) -> Tensor:
- r"""
- addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor
-
- Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2`
- and adds it to the matrix :attr:`input`.
-
- Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the
- outer product between :attr:`vec1` and :attr:`vec2` and the added matrix
- :attr:`input` respectively.
-
- .. math::
- \text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2})
-
- If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
- it will not be propagated.
-
- If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector
- of size `m`, then :attr:`input` must be
- :ref:`broadcastable <broadcasting-semantics>` with a matrix of size
- :math:`(n \times m)` and :attr:`out` will be a matrix of size
- :math:`(n \times m)`.
-
- Args:
- input (Tensor): matrix to be added
- vec1 (Tensor): the first vector of the outer product
- vec2 (Tensor): the second vector of the outer product
-
- Keyword args:
- beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
- alpha (Number, optional): multiplier for :math:`\text{vec1} \otimes \text{vec2}` (:math:`\alpha`)
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> vec1 = torch.arange(1., 4.)
- >>> vec2 = torch.arange(1., 3.)
- >>> M = torch.zeros(3, 2)
- >>> torch.addr(M, vec1, vec2)
- tensor([[ 1., 2.],
- [ 2., 4.],
- [ 3., 6.]])
- """
- ...
- def adjoint(input: Tensor) -> Tensor:
- r"""
- adjoint(Tensor) -> Tensor
- Returns a view of the tensor conjugated and with the last two dimensions transposed.
-
- ``x.adjoint()`` is equivalent to ``x.transpose(-2, -1).conj()`` for complex tensors and
- to ``x.transpose(-2, -1)`` for real tensors.
-
- Example::
- >>> x = torch.arange(4, dtype=torch.float)
- >>> A = torch.complex(x, x).reshape(2, 2)
- >>> A
- tensor([[0.+0.j, 1.+1.j],
- [2.+2.j, 3.+3.j]])
- >>> A.adjoint()
- tensor([[0.-0.j, 2.-2.j],
- [1.-1.j, 3.-3.j]])
- >>> (A.adjoint() == A.mH).all()
- tensor(True)
- """
- ...
- def affine_grid_generator(theta: Tensor, size: Sequence[Union[_int, SymInt]], align_corners: _bool) -> Tensor: ...
- def alias_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- Performs the same operation as :func:`torch.alias`, but all output tensors
- are freshly created instead of aliasing the input.
- """
- ...
- @overload
- def all(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- all(input) -> Tensor
-
- Tests if all elements in :attr:`input` evaluate to `True`.
-
- .. note:: This function matches the behaviour of NumPy in returning
- output of dtype `bool` for all supported dtypes except `uint8`.
- For `uint8` the dtype of output is `uint8` itself.
-
- Example::
-
- >>> a = torch.rand(1, 2).bool()
- >>> a
- tensor([[False, True]], dtype=torch.bool)
- >>> torch.all(a)
- tensor(False, dtype=torch.bool)
- >>> a = torch.arange(0, 3)
- >>> a
- tensor([0, 1, 2])
- >>> torch.all(a)
- tensor(False)
-
- .. function:: all(input, dim, keepdim=False, *, out=None) -> Tensor
- :noindex:
-
- For each row of :attr:`input` in the given dimension :attr:`dim`,
- returns `True` if all elements in the row evaluate to `True` and `False` otherwise.
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
- dim (int or tuple of ints): the dimension or dimensions to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.rand(4, 2).bool()
- >>> a
- tensor([[True, True],
- [True, False],
- [True, True],
- [True, True]], dtype=torch.bool)
- >>> torch.all(a, dim=1)
- tensor([ True, False, True, True], dtype=torch.bool)
- >>> torch.all(a, dim=0)
- tensor([ True, False], dtype=torch.bool)
- """
- ...
- @overload
- def all(input: Tensor, dim: Optional[_size] = None, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- all(input) -> Tensor
-
- Tests if all elements in :attr:`input` evaluate to `True`.
-
- .. note:: This function matches the behaviour of NumPy in returning
- output of dtype `bool` for all supported dtypes except `uint8`.
- For `uint8` the dtype of output is `uint8` itself.
-
- Example::
-
- >>> a = torch.rand(1, 2).bool()
- >>> a
- tensor([[False, True]], dtype=torch.bool)
- >>> torch.all(a)
- tensor(False, dtype=torch.bool)
- >>> a = torch.arange(0, 3)
- >>> a
- tensor([0, 1, 2])
- >>> torch.all(a)
- tensor(False)
-
- .. function:: all(input, dim, keepdim=False, *, out=None) -> Tensor
- :noindex:
-
- For each row of :attr:`input` in the given dimension :attr:`dim`,
- returns `True` if all elements in the row evaluate to `True` and `False` otherwise.
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
- dim (int or tuple of ints): the dimension or dimensions to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.rand(4, 2).bool()
- >>> a
- tensor([[True, True],
- [True, False],
- [True, True],
- [True, True]], dtype=torch.bool)
- >>> torch.all(a, dim=1)
- tensor([ True, False, True, True], dtype=torch.bool)
- >>> torch.all(a, dim=0)
- tensor([ True, False], dtype=torch.bool)
- """
- ...
- @overload
- def all(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- all(input) -> Tensor
-
- Tests if all elements in :attr:`input` evaluate to `True`.
-
- .. note:: This function matches the behaviour of NumPy in returning
- output of dtype `bool` for all supported dtypes except `uint8`.
- For `uint8` the dtype of output is `uint8` itself.
-
- Example::
-
- >>> a = torch.rand(1, 2).bool()
- >>> a
- tensor([[False, True]], dtype=torch.bool)
- >>> torch.all(a)
- tensor(False, dtype=torch.bool)
- >>> a = torch.arange(0, 3)
- >>> a
- tensor([0, 1, 2])
- >>> torch.all(a)
- tensor(False)
-
- .. function:: all(input, dim, keepdim=False, *, out=None) -> Tensor
- :noindex:
-
- For each row of :attr:`input` in the given dimension :attr:`dim`,
- returns `True` if all elements in the row evaluate to `True` and `False` otherwise.
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
- dim (int or tuple of ints): the dimension or dimensions to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.rand(4, 2).bool()
- >>> a
- tensor([[True, True],
- [True, False],
- [True, True],
- [True, True]], dtype=torch.bool)
- >>> torch.all(a, dim=1)
- tensor([ True, False, True, True], dtype=torch.bool)
- >>> torch.all(a, dim=0)
- tensor([ True, False], dtype=torch.bool)
- """
- ...
- @overload
- def all(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- all(input) -> Tensor
-
- Tests if all elements in :attr:`input` evaluate to `True`.
-
- .. note:: This function matches the behaviour of NumPy in returning
- output of dtype `bool` for all supported dtypes except `uint8`.
- For `uint8` the dtype of output is `uint8` itself.
-
- Example::
-
- >>> a = torch.rand(1, 2).bool()
- >>> a
- tensor([[False, True]], dtype=torch.bool)
- >>> torch.all(a)
- tensor(False, dtype=torch.bool)
- >>> a = torch.arange(0, 3)
- >>> a
- tensor([0, 1, 2])
- >>> torch.all(a)
- tensor(False)
-
- .. function:: all(input, dim, keepdim=False, *, out=None) -> Tensor
- :noindex:
-
- For each row of :attr:`input` in the given dimension :attr:`dim`,
- returns `True` if all elements in the row evaluate to `True` and `False` otherwise.
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
- dim (int or tuple of ints): the dimension or dimensions to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.rand(4, 2).bool()
- >>> a
- tensor([[True, True],
- [True, False],
- [True, True],
- [True, True]], dtype=torch.bool)
- >>> torch.all(a, dim=1)
- tensor([ True, False, True, True], dtype=torch.bool)
- >>> torch.all(a, dim=0)
- tensor([ True, False], dtype=torch.bool)
- """
- ...
- def allclose(input: Tensor, other: Tensor, rtol: _float = 1e-05, atol: _float = 1e-08, equal_nan: _bool = False) -> _bool:
- r"""
- allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False) -> bool
-
- This function checks if :attr:`input` and :attr:`other` satisfy the condition:
-
- .. math::
- \lvert \text{input} - \text{other} \rvert \leq \texttt{atol} + \texttt{rtol} \times \lvert \text{other} \rvert
-
- elementwise, for all elements of :attr:`input` and :attr:`other`. The behaviour of this function is analogous to
- `numpy.allclose <https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html>`_
-
- Args:
- input (Tensor): first tensor to compare
- other (Tensor): second tensor to compare
- atol (float, optional): absolute tolerance. Default: 1e-08
- rtol (float, optional): relative tolerance. Default: 1e-05
- equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False``
-
- Example::
-
- >>> torch.allclose(torch.tensor([10000., 1e-07]), torch.tensor([10000.1, 1e-08]))
- False
- >>> torch.allclose(torch.tensor([10000., 1e-08]), torch.tensor([10000.1, 1e-09]))
- True
- >>> torch.allclose(torch.tensor([1.0, float('nan')]), torch.tensor([1.0, float('nan')]))
- False
- >>> torch.allclose(torch.tensor([1.0, float('nan')]), torch.tensor([1.0, float('nan')]), equal_nan=True)
- True
- """
- ...
- def alpha_dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ...
- def alpha_dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ...
- def amax(input: Tensor, dim: Union[_int, _size] = (), keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- amax(input, dim, keepdim=False, *, out=None) -> Tensor
-
- Returns the maximum value of each slice of the :attr:`input` tensor in the given
- dimension(s) :attr:`dim`.
-
- .. note::
- The difference between ``max``/``min`` and ``amax``/``amin`` is:
- - ``amax``/``amin`` supports reducing on multiple dimensions,
- - ``amax``/``amin`` does not return indices,
- - ``amax``/``amin`` evenly distributes gradient between equal values,
- while ``max(dim)``/``min(dim)`` propagates gradient only to a single
- index in the source tensor.
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
- dim (int or tuple of ints): the dimension or dimensions to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(4, 4)
- >>> a
- tensor([[ 0.8177, 1.4878, -0.2491, 0.9130],
- [-0.7158, 1.1775, 2.0992, 0.4817],
- [-0.0053, 0.0164, -1.3738, -0.0507],
- [ 1.9700, 1.1106, -1.0318, -1.0816]])
- >>> torch.amax(a, 1)
- tensor([1.4878, 2.0992, 0.0164, 1.9700])
- """
- ...
- def amin(input: Tensor, dim: Union[_int, _size] = (), keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- amin(input, dim, keepdim=False, *, out=None) -> Tensor
-
- Returns the minimum value of each slice of the :attr:`input` tensor in the given
- dimension(s) :attr:`dim`.
-
- .. note::
- The difference between ``max``/``min`` and ``amax``/``amin`` is:
- - ``amax``/``amin`` supports reducing on multiple dimensions,
- - ``amax``/``amin`` does not return indices,
- - ``amax``/``amin`` evenly distributes gradient between equal values,
- while ``max(dim)``/``min(dim)`` propagates gradient only to a single
- index in the source tensor.
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
- dim (int or tuple of ints): the dimension or dimensions to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(4, 4)
- >>> a
- tensor([[ 0.6451, -0.4866, 0.2987, -1.3312],
- [-0.5744, 1.2980, 1.8397, -0.2713],
- [ 0.9128, 0.9214, -1.7268, -0.2995],
- [ 0.9023, 0.4853, 0.9075, -1.6165]])
- >>> torch.amin(a, 1)
- tensor([-1.3312, -0.5744, -1.7268, -1.6165])
- """
- ...
- def aminmax(input: Tensor, *, dim: Optional[_int] = None, keepdim: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.aminmax:
- r"""
- aminmax(input, *, dim=None, keepdim=False, out=None) -> (Tensor min, Tensor max)
-
- Computes the minimum and maximum values of the :attr:`input` tensor.
-
- Args:
- input (Tensor):
- The input tensor
-
- Keyword Args:
- dim (Optional[int]):
- The dimension along which to compute the values. If `None`,
- computes the values over the entire :attr:`input` tensor.
- Default is `None`.
- keepdim (bool):
- If `True`, the reduced dimensions will be kept in the output
- tensor as dimensions with size 1 for broadcasting, otherwise
- they will be removed, as if calling (:func:`torch.squeeze`).
- Default is `False`.
- out (Optional[Tuple[Tensor, Tensor]]):
- Optional tensors on which to write the result. Must have the same
- shape and dtype as the expected output.
- Default is `None`.
-
- Returns:
- A named tuple `(min, max)` containing the minimum and maximum values.
-
- Raises:
- RuntimeError
- If any of the dimensions to compute the values over has size 0.
-
- .. note::
- NaN values are propagated to the output if at least one value is NaN.
-
- .. seealso::
- :func:`torch.amin` computes just the minimum value
- :func:`torch.amax` computes just the maximum value
-
- Example::
-
- >>> torch.aminmax(torch.tensor([1, -3, 5]))
- torch.return_types.aminmax(
- min=tensor(-3),
- max=tensor(5))
-
- >>> # aminmax propagates NaNs
- >>> torch.aminmax(torch.tensor([1, -3, 5, torch.nan]))
- torch.return_types.aminmax(
- min=tensor(nan),
- max=tensor(nan))
-
- >>> t = torch.arange(10).view(2, 5)
- >>> t
- tensor([[0, 1, 2, 3, 4],
- [5, 6, 7, 8, 9]])
- >>> t.aminmax(dim=0, keepdim=True)
- torch.return_types.aminmax(
- min=tensor([[0, 1, 2, 3, 4]]),
- max=tensor([[5, 6, 7, 8, 9]]))
- """
- ...
- def angle(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- angle(input, *, out=None) -> Tensor
-
- Computes the element-wise angle (in radians) of the given :attr:`input` tensor.
-
- .. math::
- \text{out}_{i} = angle(\text{input}_{i})
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- .. note:: Starting in PyTorch 1.8, angle returns pi for negative real numbers,
- zero for non-negative real numbers, and propagates NaNs. Previously
- the function would return zero for all real numbers and not propagate
- floating-point NaNs.
-
- Example::
-
- >>> torch.angle(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))*180/3.14159
- tensor([ 135., 135, -45])
- """
- ...
- @overload
- def any(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- any(input) -> Tensor
-
- Tests if any element in :attr:`input` evaluates to `True`.
-
- .. note:: This function matches the behaviour of NumPy in returning
- output of dtype `bool` for all supported dtypes except `uint8`.
- For `uint8` the dtype of output is `uint8` itself.
-
- Example::
-
- >>> a = torch.rand(1, 2).bool()
- >>> a
- tensor([[False, True]], dtype=torch.bool)
- >>> torch.any(a)
- tensor(True, dtype=torch.bool)
- >>> a = torch.arange(0, 3)
- >>> a
- tensor([0, 1, 2])
- >>> torch.any(a)
- tensor(True)
-
- .. function:: any(input, dim, keepdim=False, *, out=None) -> Tensor
- :noindex:
-
- For each row of :attr:`input` in the given dimension :attr:`dim`,
- returns `True` if any element in the row evaluate to `True` and `False` otherwise.
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
- dim (int or tuple of ints): the dimension or dimensions to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(4, 2) < 0
- >>> a
- tensor([[ True, True],
- [False, True],
- [ True, True],
- [False, False]])
- >>> torch.any(a, 1)
- tensor([ True, True, True, False])
- >>> torch.any(a, 0)
- tensor([True, True])
- """
- ...
- @overload
- def any(input: Tensor, dim: Optional[_size] = None, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- any(input) -> Tensor
-
- Tests if any element in :attr:`input` evaluates to `True`.
-
- .. note:: This function matches the behaviour of NumPy in returning
- output of dtype `bool` for all supported dtypes except `uint8`.
- For `uint8` the dtype of output is `uint8` itself.
-
- Example::
-
- >>> a = torch.rand(1, 2).bool()
- >>> a
- tensor([[False, True]], dtype=torch.bool)
- >>> torch.any(a)
- tensor(True, dtype=torch.bool)
- >>> a = torch.arange(0, 3)
- >>> a
- tensor([0, 1, 2])
- >>> torch.any(a)
- tensor(True)
-
- .. function:: any(input, dim, keepdim=False, *, out=None) -> Tensor
- :noindex:
-
- For each row of :attr:`input` in the given dimension :attr:`dim`,
- returns `True` if any element in the row evaluate to `True` and `False` otherwise.
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
- dim (int or tuple of ints): the dimension or dimensions to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(4, 2) < 0
- >>> a
- tensor([[ True, True],
- [False, True],
- [ True, True],
- [False, False]])
- >>> torch.any(a, 1)
- tensor([ True, True, True, False])
- >>> torch.any(a, 0)
- tensor([True, True])
- """
- ...
- @overload
- def any(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- any(input) -> Tensor
-
- Tests if any element in :attr:`input` evaluates to `True`.
-
- .. note:: This function matches the behaviour of NumPy in returning
- output of dtype `bool` for all supported dtypes except `uint8`.
- For `uint8` the dtype of output is `uint8` itself.
-
- Example::
-
- >>> a = torch.rand(1, 2).bool()
- >>> a
- tensor([[False, True]], dtype=torch.bool)
- >>> torch.any(a)
- tensor(True, dtype=torch.bool)
- >>> a = torch.arange(0, 3)
- >>> a
- tensor([0, 1, 2])
- >>> torch.any(a)
- tensor(True)
-
- .. function:: any(input, dim, keepdim=False, *, out=None) -> Tensor
- :noindex:
-
- For each row of :attr:`input` in the given dimension :attr:`dim`,
- returns `True` if any element in the row evaluate to `True` and `False` otherwise.
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
- dim (int or tuple of ints): the dimension or dimensions to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(4, 2) < 0
- >>> a
- tensor([[ True, True],
- [False, True],
- [ True, True],
- [False, False]])
- >>> torch.any(a, 1)
- tensor([ True, True, True, False])
- >>> torch.any(a, 0)
- tensor([True, True])
- """
- ...
- @overload
- def any(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- any(input) -> Tensor
-
- Tests if any element in :attr:`input` evaluates to `True`.
-
- .. note:: This function matches the behaviour of NumPy in returning
- output of dtype `bool` for all supported dtypes except `uint8`.
- For `uint8` the dtype of output is `uint8` itself.
-
- Example::
-
- >>> a = torch.rand(1, 2).bool()
- >>> a
- tensor([[False, True]], dtype=torch.bool)
- >>> torch.any(a)
- tensor(True, dtype=torch.bool)
- >>> a = torch.arange(0, 3)
- >>> a
- tensor([0, 1, 2])
- >>> torch.any(a)
- tensor(True)
-
- .. function:: any(input, dim, keepdim=False, *, out=None) -> Tensor
- :noindex:
-
- For each row of :attr:`input` in the given dimension :attr:`dim`,
- returns `True` if any element in the row evaluate to `True` and `False` otherwise.
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
- dim (int or tuple of ints): the dimension or dimensions to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(4, 2) < 0
- >>> a
- tensor([[ True, True],
- [False, True],
- [ True, True],
- [False, False]])
- >>> torch.any(a, 1)
- tensor([ True, True, True, False])
- >>> torch.any(a, 0)
- tensor([True, True])
- """
- ...
- @overload
- def arange(start: Number, end: Number, step: Number, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
- r"""
- arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil`
- with values from the interval ``[start, end)`` taken with common difference
- :attr:`step` beginning from `start`.
-
- Note that non-integer :attr:`step` is subject to floating point rounding errors when
- comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end`
- in such cases.
-
- .. math::
- \text{out}_{{i+1}} = \text{out}_{i} + \text{step}
-
- Args:
- start (Number): the starting value for the set of points. Default: ``0``.
- end (Number): the ending value for the set of points
- step (Number): the gap between each pair of adjacent points. Default: ``1``.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). If `dtype` is not given, infer the data type from the other input
- arguments. If any of `start`, `end`, or `stop` are floating-point, the
- `dtype` is inferred to be the default dtype, see
- :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
- be `torch.int64`.
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Example::
-
- >>> torch.arange(5)
- tensor([ 0, 1, 2, 3, 4])
- >>> torch.arange(1, 4)
- tensor([ 1, 2, 3])
- >>> torch.arange(1, 2.5, 0.5)
- tensor([ 1.0000, 1.5000, 2.0000])
- """
- ...
- @overload
- def arange(start: Number, end: Number, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
- r"""
- arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil`
- with values from the interval ``[start, end)`` taken with common difference
- :attr:`step` beginning from `start`.
-
- Note that non-integer :attr:`step` is subject to floating point rounding errors when
- comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end`
- in such cases.
-
- .. math::
- \text{out}_{{i+1}} = \text{out}_{i} + \text{step}
-
- Args:
- start (Number): the starting value for the set of points. Default: ``0``.
- end (Number): the ending value for the set of points
- step (Number): the gap between each pair of adjacent points. Default: ``1``.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). If `dtype` is not given, infer the data type from the other input
- arguments. If any of `start`, `end`, or `stop` are floating-point, the
- `dtype` is inferred to be the default dtype, see
- :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
- be `torch.int64`.
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Example::
-
- >>> torch.arange(5)
- tensor([ 0, 1, 2, 3, 4])
- >>> torch.arange(1, 4)
- tensor([ 1, 2, 3])
- >>> torch.arange(1, 2.5, 0.5)
- tensor([ 1.0000, 1.5000, 2.0000])
- """
- ...
- @overload
- def arange(end: Number, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
- r"""
- arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil`
- with values from the interval ``[start, end)`` taken with common difference
- :attr:`step` beginning from `start`.
-
- Note that non-integer :attr:`step` is subject to floating point rounding errors when
- comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end`
- in such cases.
-
- .. math::
- \text{out}_{{i+1}} = \text{out}_{i} + \text{step}
-
- Args:
- start (Number): the starting value for the set of points. Default: ``0``.
- end (Number): the ending value for the set of points
- step (Number): the gap between each pair of adjacent points. Default: ``1``.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). If `dtype` is not given, infer the data type from the other input
- arguments. If any of `start`, `end`, or `stop` are floating-point, the
- `dtype` is inferred to be the default dtype, see
- :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
- be `torch.int64`.
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Example::
-
- >>> torch.arange(5)
- tensor([ 0, 1, 2, 3, 4])
- >>> torch.arange(1, 4)
- tensor([ 1, 2, 3])
- >>> torch.arange(1, 2.5, 0.5)
- tensor([ 1.0000, 1.5000, 2.0000])
- """
- ...
- @overload
- def arange(end: Union[Number, _complex], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil`
- with values from the interval ``[start, end)`` taken with common difference
- :attr:`step` beginning from `start`.
-
- Note that non-integer :attr:`step` is subject to floating point rounding errors when
- comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end`
- in such cases.
-
- .. math::
- \text{out}_{{i+1}} = \text{out}_{i} + \text{step}
-
- Args:
- start (Number): the starting value for the set of points. Default: ``0``.
- end (Number): the ending value for the set of points
- step (Number): the gap between each pair of adjacent points. Default: ``1``.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). If `dtype` is not given, infer the data type from the other input
- arguments. If any of `start`, `end`, or `stop` are floating-point, the
- `dtype` is inferred to be the default dtype, see
- :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
- be `torch.int64`.
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Example::
-
- >>> torch.arange(5)
- tensor([ 0, 1, 2, 3, 4])
- >>> torch.arange(1, 4)
- tensor([ 1, 2, 3])
- >>> torch.arange(1, 2.5, 0.5)
- tensor([ 1.0000, 1.5000, 2.0000])
- """
- ...
- @overload
- def arange(start: Union[Number, _complex], end: Union[Number, _complex], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil`
- with values from the interval ``[start, end)`` taken with common difference
- :attr:`step` beginning from `start`.
-
- Note that non-integer :attr:`step` is subject to floating point rounding errors when
- comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end`
- in such cases.
-
- .. math::
- \text{out}_{{i+1}} = \text{out}_{i} + \text{step}
-
- Args:
- start (Number): the starting value for the set of points. Default: ``0``.
- end (Number): the ending value for the set of points
- step (Number): the gap between each pair of adjacent points. Default: ``1``.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). If `dtype` is not given, infer the data type from the other input
- arguments. If any of `start`, `end`, or `stop` are floating-point, the
- `dtype` is inferred to be the default dtype, see
- :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
- be `torch.int64`.
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Example::
-
- >>> torch.arange(5)
- tensor([ 0, 1, 2, 3, 4])
- >>> torch.arange(1, 4)
- tensor([ 1, 2, 3])
- >>> torch.arange(1, 2.5, 0.5)
- tensor([ 1.0000, 1.5000, 2.0000])
- """
- ...
- @overload
- def arange(start: Union[Number, _complex], end: Union[Number, _complex], step: Union[Number, _complex] = 1, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil`
- with values from the interval ``[start, end)`` taken with common difference
- :attr:`step` beginning from `start`.
-
- Note that non-integer :attr:`step` is subject to floating point rounding errors when
- comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end`
- in such cases.
-
- .. math::
- \text{out}_{{i+1}} = \text{out}_{i} + \text{step}
-
- Args:
- start (Number): the starting value for the set of points. Default: ``0``.
- end (Number): the ending value for the set of points
- step (Number): the gap between each pair of adjacent points. Default: ``1``.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). If `dtype` is not given, infer the data type from the other input
- arguments. If any of `start`, `end`, or `stop` are floating-point, the
- `dtype` is inferred to be the default dtype, see
- :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
- be `torch.int64`.
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Example::
-
- >>> torch.arange(5)
- tensor([ 0, 1, 2, 3, 4])
- >>> torch.arange(1, 4)
- tensor([ 1, 2, 3])
- >>> torch.arange(1, 2.5, 0.5)
- tensor([ 1.0000, 1.5000, 2.0000])
- """
- ...
- def arccos(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- arccos(input, *, out=None) -> Tensor
-
- Alias for :func:`torch.acos`.
- """
- ...
- def arccos_(input: Tensor) -> Tensor: ...
- def arccosh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- arccosh(input, *, out=None) -> Tensor
-
- Alias for :func:`torch.acosh`.
- """
- ...
- def arccosh_(input: Tensor) -> Tensor: ...
- def arcsin(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- arcsin(input, *, out=None) -> Tensor
-
- Alias for :func:`torch.asin`.
- """
- ...
- def arcsin_(input: Tensor) -> Tensor: ...
- def arcsinh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- arcsinh(input, *, out=None) -> Tensor
-
- Alias for :func:`torch.asinh`.
- """
- ...
- def arcsinh_(input: Tensor) -> Tensor: ...
- def arctan(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- arctan(input, *, out=None) -> Tensor
-
- Alias for :func:`torch.atan`.
- """
- ...
- def arctan2(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- arctan2(input, other, *, out=None) -> Tensor
- Alias for :func:`torch.atan2`.
- """
- ...
- def arctan_(input: Tensor) -> Tensor: ...
- def arctanh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- arctanh(input, *, out=None) -> Tensor
-
- Alias for :func:`torch.atanh`.
- """
- ...
- def arctanh_(input: Tensor) -> Tensor: ...
- def argmax(input: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- argmax(input) -> LongTensor
-
- Returns the indices of the maximum value of all elements in the :attr:`input` tensor.
-
- This is the second value returned by :meth:`torch.max`. See its
- documentation for the exact semantics of this method.
-
- .. note:: If there are multiple maximal values then the indices of the first maximal value are returned.
-
- Args:
- input (Tensor): the input tensor.
-
- Example::
-
- >>> a = torch.randn(4, 4)
- >>> a
- tensor([[ 1.3398, 0.2663, -0.2686, 0.2450],
- [-0.7401, -0.8805, -0.3402, -1.1936],
- [ 0.4907, -1.3948, -1.0691, -0.3132],
- [-1.6092, 0.5419, -0.2993, 0.3195]])
- >>> torch.argmax(a)
- tensor(0)
-
- .. function:: argmax(input, dim, keepdim=False) -> LongTensor
- :noindex:
-
- Returns the indices of the maximum values of a tensor across a dimension.
-
- This is the second value returned by :meth:`torch.max`. See its
- documentation for the exact semantics of this method.
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the dimension to reduce. If ``None``, the argmax of the flattened input is returned.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Example::
-
- >>> a = torch.randn(4, 4)
- >>> a
- tensor([[ 1.3398, 0.2663, -0.2686, 0.2450],
- [-0.7401, -0.8805, -0.3402, -1.1936],
- [ 0.4907, -1.3948, -1.0691, -0.3132],
- [-1.6092, 0.5419, -0.2993, 0.3195]])
- >>> torch.argmax(a, dim=1)
- tensor([ 0, 2, 0, 1])
- """
- ...
- def argmin(input: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- argmin(input, dim=None, keepdim=False) -> LongTensor
-
- Returns the indices of the minimum value(s) of the flattened tensor or along a dimension
-
- This is the second value returned by :meth:`torch.min`. See its
- documentation for the exact semantics of this method.
-
- .. note:: If there are multiple minimal values then the indices of the first minimal value are returned.
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the dimension to reduce. If ``None``, the argmin of the flattened input is returned.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Example::
-
- >>> a = torch.randn(4, 4)
- >>> a
- tensor([[ 0.1139, 0.2254, -0.1381, 0.3687],
- [ 1.0100, -1.1975, -0.0102, -0.4732],
- [-0.9240, 0.1207, -0.7506, -1.0213],
- [ 1.7809, -1.2960, 0.9384, 0.1438]])
- >>> torch.argmin(a)
- tensor(13)
- >>> torch.argmin(a, dim=1)
- tensor([ 2, 1, 3, 1])
- >>> torch.argmin(a, dim=1, keepdim=True)
- tensor([[2],
- [1],
- [3],
- [1]])
- """
- ...
- @overload
- def argsort(input: Tensor, *, stable: _bool, dim: _int = -1, descending: _bool = False) -> Tensor:
- r"""
- argsort(input, dim=-1, descending=False, stable=False) -> Tensor
-
- Returns the indices that sort a tensor along a given dimension in ascending
- order by value.
-
- This is the second value returned by :meth:`torch.sort`. See its documentation
- for the exact semantics of this method.
-
- If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving
- the order of equivalent elements. If ``False``, the relative order of values
- which compare equal is not guaranteed. ``True`` is slower.
-
- Args:
- input (Tensor): the input tensor.
- dim (int, optional): the dimension to sort along
- descending (bool, optional): controls the sorting order (ascending or descending)
- stable (bool, optional): controls the relative order of equivalent elements
-
- Example::
-
- >>> a = torch.randn(4, 4)
- >>> a
- tensor([[ 0.0785, 1.5267, -0.8521, 0.4065],
- [ 0.1598, 0.0788, -0.0745, -1.2700],
- [ 1.2208, 1.0722, -0.7064, 1.2564],
- [ 0.0669, -0.2318, -0.8229, -0.9280]])
-
-
- >>> torch.argsort(a, dim=1)
- tensor([[2, 0, 3, 1],
- [3, 2, 1, 0],
- [2, 1, 0, 3],
- [3, 2, 1, 0]])
- """
- ...
- @overload
- def argsort(input: Tensor, dim: _int = -1, descending: _bool = False) -> Tensor:
- r"""
- argsort(input, dim=-1, descending=False, stable=False) -> Tensor
-
- Returns the indices that sort a tensor along a given dimension in ascending
- order by value.
-
- This is the second value returned by :meth:`torch.sort`. See its documentation
- for the exact semantics of this method.
-
- If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving
- the order of equivalent elements. If ``False``, the relative order of values
- which compare equal is not guaranteed. ``True`` is slower.
-
- Args:
- input (Tensor): the input tensor.
- dim (int, optional): the dimension to sort along
- descending (bool, optional): controls the sorting order (ascending or descending)
- stable (bool, optional): controls the relative order of equivalent elements
-
- Example::
-
- >>> a = torch.randn(4, 4)
- >>> a
- tensor([[ 0.0785, 1.5267, -0.8521, 0.4065],
- [ 0.1598, 0.0788, -0.0745, -1.2700],
- [ 1.2208, 1.0722, -0.7064, 1.2564],
- [ 0.0669, -0.2318, -0.8229, -0.9280]])
-
-
- >>> torch.argsort(a, dim=1)
- tensor([[2, 0, 3, 1],
- [3, 2, 1, 0],
- [2, 1, 0, 3],
- [3, 2, 1, 0]])
- """
- ...
- @overload
- def argsort(input: Tensor, dim: Union[str, ellipsis, None], descending: _bool = False) -> Tensor:
- r"""
- argsort(input, dim=-1, descending=False, stable=False) -> Tensor
-
- Returns the indices that sort a tensor along a given dimension in ascending
- order by value.
-
- This is the second value returned by :meth:`torch.sort`. See its documentation
- for the exact semantics of this method.
-
- If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving
- the order of equivalent elements. If ``False``, the relative order of values
- which compare equal is not guaranteed. ``True`` is slower.
-
- Args:
- input (Tensor): the input tensor.
- dim (int, optional): the dimension to sort along
- descending (bool, optional): controls the sorting order (ascending or descending)
- stable (bool, optional): controls the relative order of equivalent elements
-
- Example::
-
- >>> a = torch.randn(4, 4)
- >>> a
- tensor([[ 0.0785, 1.5267, -0.8521, 0.4065],
- [ 0.1598, 0.0788, -0.0745, -1.2700],
- [ 1.2208, 1.0722, -0.7064, 1.2564],
- [ 0.0669, -0.2318, -0.8229, -0.9280]])
-
-
- >>> torch.argsort(a, dim=1)
- tensor([[2, 0, 3, 1],
- [3, 2, 1, 0],
- [2, 1, 0, 3],
- [3, 2, 1, 0]])
- """
- ...
- def argwhere(input: Tensor) -> Tensor:
- r"""
- argwhere(input) -> Tensor
-
- Returns a tensor containing the indices of all non-zero elements of
- :attr:`input`. Each row in the result contains the indices of a non-zero
- element in :attr:`input`. The result is sorted lexicographically, with
- the last index changing the fastest (C-style).
-
- If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor
- :attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of
- non-zero elements in the :attr:`input` tensor.
-
- .. note::
- This function is similar to NumPy's `argwhere`.
-
- When :attr:`input` is on CUDA, this function causes host-device synchronization.
-
- Args:
- {input}
-
- Example::
-
- >>> t = torch.tensor([1, 0, 1])
- >>> torch.argwhere(t)
- tensor([[0],
- [2]])
- >>> t = torch.tensor([[1, 0, 1], [0, 1, 1]])
- >>> torch.argwhere(t)
- tensor([[0, 0],
- [0, 2],
- [1, 1],
- [1, 2]])
- """
- ...
- def as_strided(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None) -> Tensor:
- r"""
- as_strided(input, size, stride, storage_offset=None) -> Tensor
-
- Create a view of an existing `torch.Tensor` :attr:`input` with specified
- :attr:`size`, :attr:`stride` and :attr:`storage_offset`.
-
- .. warning::
- Prefer using other view functions, like :meth:`torch.Tensor.expand`,
- to setting a view's strides manually with `as_strided`, as this
- function's behavior depends on the implementation of a tensor's storage.
- The constructed view of the storage must only refer to elements within
- the storage or a runtime error will be thrown, and if the view is
- "overlapped" (with multiple indices referring to the same element in
- memory) its behavior is undefined.
-
- Args:
- input (Tensor): the input tensor.
- size (tuple or ints): the shape of the output tensor
- stride (tuple or ints): the stride of the output tensor
- storage_offset (int, optional): the offset in the underlying storage of the output tensor.
- If ``None``, the storage_offset of the output tensor will match the input tensor.
-
- Example::
-
- >>> x = torch.randn(3, 3)
- >>> x
- tensor([[ 0.9039, 0.6291, 1.0795],
- [ 0.1586, 2.1939, -0.4900],
- [-0.1909, -0.7503, 1.9355]])
- >>> t = torch.as_strided(x, (2, 2), (1, 2))
- >>> t
- tensor([[0.9039, 1.0795],
- [0.6291, 0.1586]])
- >>> t = torch.as_strided(x, (2, 2), (1, 2), 1)
- tensor([[0.6291, 0.1586],
- [1.0795, 2.1939]])
- """
- ...
- def as_strided_(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None) -> Tensor: ...
- def as_strided_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- Performs the same operation as :func:`torch.as_strided`, but all output tensors
- are freshly created instead of aliasing the input.
- """
- ...
- def as_strided_scatter(input: Tensor, src: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None) -> Tensor:
- r"""
- as_strided_scatter(input, src, size, stride, storage_offset=None) -> Tensor
-
- Embeds the values of the :attr:`src` tensor into :attr:`input` along
- the elements corresponding to the result of calling
- input.as_strided(size, stride, storage_offset).
-
- This function returns a tensor with fresh storage; it does not
- return a view.
-
- Args:
- input (Tensor): the input tensor.
- size (tuple or ints): the shape of the output tensor
- stride (tuple or ints): the stride of the output tensor
- storage_offset (int, optional): the offset in the underlying storage of the output tensor
-
- .. note::
-
- :attr:`src` must be of the proper size in order to be embedded
- into :attr:`input`. Specifically, it should have the same shape as
- `torch.as_strided(input, size, stride, storage_offset)`
-
- Example::
-
- >>> a = torch.arange(4).reshape(2, 2) + 1
- >>> a
- tensor([[1, 2],
- [3, 4]])
- >>> b = torch.zeros(3, 3)
- >>> b
- tensor([[0., 0., 0.],
- [0., 0., 0.],
- [0., 0., 0.]])
- >>> torch.as_strided_scatter(b, a, (2, 2), (1, 2))
- tensor([[1., 3., 2.],
- [4., 0., 0.],
- [0., 0., 0.]])
- """
- ...
- def as_tensor(data: Any, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None) -> Tensor:
- r"""
- as_tensor(data, dtype=None, device=None) -> Tensor
-
- Converts :attr:`data` into a tensor, sharing data and preserving autograd
- history if possible.
-
- If :attr:`data` is already a tensor with the requested dtype and device
- then :attr:`data` itself is returned, but if :attr:`data` is a
- tensor with a different dtype or device then it's copied as if using
- `data.to(dtype=dtype, device=device)`.
-
- If :attr:`data` is a NumPy array (an ndarray) with the same dtype and device then a
- tensor is constructed using :func:`torch.from_numpy`.
-
- .. seealso::
-
- :func:`torch.tensor` never shares its data and creates a new "leaf tensor" (see :doc:`/notes/autograd`).
-
-
- Args:
- data (array_like): Initial data for the tensor. Can be a list, tuple,
- NumPy ``ndarray``, scalar, and other types.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, infers data type from :attr:`data`.
- device (:class:`torch.device`, optional): the device of the constructed tensor. If None and data is a tensor
- then the device of data is used. If None and data is not a tensor then
- the result tensor is constructed on the current device.
-
-
- Example::
-
- >>> a = numpy.array([1, 2, 3])
- >>> t = torch.as_tensor(a)
- >>> t
- tensor([ 1, 2, 3])
- >>> t[0] = -1
- >>> a
- array([-1, 2, 3])
-
- >>> a = numpy.array([1, 2, 3])
- >>> t = torch.as_tensor(a, device=torch.device('cuda'))
- >>> t
- tensor([ 1, 2, 3])
- >>> t[0] = -1
- >>> a
- array([1, 2, 3])
- """
- ...
- def asarray(obj: Any, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, copy: Optional[_bool] = None, requires_grad: _bool = False) -> Tensor:
- r"""
- asarray(obj, *, dtype=None, device=None, copy=None, requires_grad=False) -> Tensor
-
- Converts :attr:`obj` to a tensor.
-
- :attr:`obj` can be one of:
-
- 1. a tensor
- 2. a NumPy array or a NumPy scalar
- 3. a DLPack capsule
- 4. an object that implements Python's buffer protocol
- 5. a scalar
- 6. a sequence of scalars
-
- When :attr:`obj` is a tensor, NumPy array, or DLPack capsule the returned tensor will,
- by default, not require a gradient, have the same datatype as :attr:`obj`, be on the
- same device, and share memory with it. These properties can be controlled with the
- :attr:`dtype`, :attr:`device`, :attr:`copy`, and :attr:`requires_grad` keyword arguments.
- If the returned tensor is of a different datatype, on a different device, or a copy is
- requested then it will not share its memory with :attr:`obj`. If :attr:`requires_grad`
- is ``True`` then the returned tensor will require a gradient, and if :attr:`obj` is
- also a tensor with an autograd history then the returned tensor will have the same history.
-
- When :attr:`obj` is not a tensor, NumPy array, or DLPack capsule but implements Python's
- buffer protocol then the buffer is interpreted as an array of bytes grouped according to
- the size of the datatype passed to the :attr:`dtype` keyword argument. (If no datatype is
- passed then the default floating point datatype is used, instead.) The returned tensor
- will have the specified datatype (or default floating point datatype if none is specified)
- and, by default, be on the CPU device and share memory with the buffer.
-
- When :attr:`obj` is a NumPy scalar, the returned tensor will be a 0-dimensional tensor on
- the CPU and that doesn't share its memory (i.e. ``copy=True``). By default datatype will
- be the PyTorch datatype corresponding to the NumPy's scalar's datatype.
-
- When :attr:`obj` is none of the above but a scalar, or a sequence of scalars then the
- returned tensor will, by default, infer its datatype from the scalar values, be on the
- current default device, and not share its memory.
-
- .. seealso::
-
- :func:`torch.tensor` creates a tensor that always copies the data from the input object.
- :func:`torch.from_numpy` creates a tensor that always shares memory from NumPy arrays.
- :func:`torch.frombuffer` creates a tensor that always shares memory from objects that
- implement the buffer protocol.
- :func:`torch.from_dlpack` creates a tensor that always shares memory from
- DLPack capsules.
-
- Args:
- obj (object): a tensor, NumPy array, DLPack Capsule, object that implements Python's
- buffer protocol, scalar, or sequence of scalars.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the datatype of the returned tensor.
- Default: ``None``, which causes the datatype of the returned tensor to be
- inferred from :attr:`obj`.
- copy (bool, optional): controls whether the returned tensor shares memory with :attr:`obj`.
- Default: ``None``, which causes the returned tensor to share memory with :attr:`obj`
- whenever possible. If ``True`` then the returned tensor does not share its memory.
- If ``False`` then the returned tensor shares its memory with :attr:`obj` and an
- error is thrown if it cannot.
- device (:class:`torch.device`, optional): the device of the returned tensor.
- Default: ``None``, which causes the device of :attr:`obj` to be used. Or, if
- :attr:`obj` is a Python sequence, the current default device will be used.
- requires_grad (bool, optional): whether the returned tensor requires grad.
- Default: ``False``, which causes the returned tensor not to require a gradient.
- If ``True``, then the returned tensor will require a gradient, and if :attr:`obj`
- is also a tensor with an autograd history then the returned tensor will have
- the same history.
-
- Example::
-
- >>> a = torch.tensor([1, 2, 3])
- >>> # Shares memory with tensor 'a'
- >>> b = torch.asarray(a)
- >>> a.data_ptr() == b.data_ptr()
- True
- >>> # Forces memory copy
- >>> c = torch.asarray(a, copy=True)
- >>> a.data_ptr() == c.data_ptr()
- False
-
- >>> a = torch.tensor([1., 2., 3.], requires_grad=True)
- >>> b = a + 2
- >>> b
- tensor([3., 4., 5.], grad_fn=<AddBackward0>)
- >>> # Shares memory with tensor 'b', with no grad
- >>> c = torch.asarray(b)
- >>> c
- tensor([3., 4., 5.])
- >>> # Shares memory with tensor 'b', retaining autograd history
- >>> d = torch.asarray(b, requires_grad=True)
- >>> d
- tensor([3., 4., 5.], grad_fn=<AddBackward0>)
-
- >>> array = numpy.array([1, 2, 3])
- >>> # Shares memory with array 'array'
- >>> t1 = torch.asarray(array)
- >>> array.__array_interface__['data'][0] == t1.data_ptr()
- True
- >>> # Copies memory due to dtype mismatch
- >>> t2 = torch.asarray(array, dtype=torch.float32)
- >>> array.__array_interface__['data'][0] == t2.data_ptr()
- False
-
- >>> scalar = numpy.float64(0.5)
- >>> torch.asarray(scalar)
- tensor(0.5000, dtype=torch.float64)
- """
- ...
- def asin(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- asin(input, *, out=None) -> Tensor
-
- Returns a new tensor with the arcsine of the elements of :attr:`input`.
-
- .. math::
- \text{out}_{i} = \sin^{-1}(\text{input}_{i})
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(4)
- >>> a
- tensor([-0.5962, 1.4985, -0.4396, 1.4525])
- >>> torch.asin(a)
- tensor([-0.6387, nan, -0.4552, nan])
- """
- ...
- def asin_(input: Tensor) -> Tensor: ...
- def asinh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- asinh(input, *, out=None) -> Tensor
-
- Returns a new tensor with the inverse hyperbolic sine of the elements of :attr:`input`.
-
- .. math::
- \text{out}_{i} = \sinh^{-1}(\text{input}_{i})
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword arguments:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(4)
- >>> a
- tensor([ 0.1606, -1.4267, -1.0899, -1.0250 ])
- >>> torch.asinh(a)
- tensor([ 0.1599, -1.1534, -0.9435, -0.8990 ])
- """
- ...
- def asinh_(input: Tensor) -> Tensor: ...
- def atan(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- atan(input, *, out=None) -> Tensor
-
- Returns a new tensor with the arctangent of the elements of :attr:`input`.
-
- .. math::
- \text{out}_{i} = \tan^{-1}(\text{input}_{i})
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(4)
- >>> a
- tensor([ 0.2341, 0.2539, -0.6256, -0.6448])
- >>> torch.atan(a)
- tensor([ 0.2299, 0.2487, -0.5591, -0.5727])
- """
- ...
- def atan2(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- atan2(input, other, *, out=None) -> Tensor
-
- Element-wise arctangent of :math:`\text{input}_{i} / \text{other}_{i}`
- with consideration of the quadrant. Returns a new tensor with the signed angles
- in radians between vector :math:`(\text{other}_{i}, \text{input}_{i})`
- and vector :math:`(1, 0)`. (Note that :math:`\text{other}_{i}`, the second
- parameter, is the x-coordinate, while :math:`\text{input}_{i}`, the first
- parameter, is the y-coordinate.)
-
- The shapes of ``input`` and ``other`` must be
- :ref:`broadcastable <broadcasting-semantics>`.
-
- Args:
- input (Tensor): the first input tensor
- other (Tensor): the second input tensor
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(4)
- >>> a
- tensor([ 0.9041, 0.0196, -0.3108, -2.4423])
- >>> torch.atan2(a, torch.randn(4))
- tensor([ 0.9833, 0.0811, -1.9743, -1.4151])
- """
- ...
- def atan_(input: Tensor) -> Tensor: ...
- def atanh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- atanh(input, *, out=None) -> Tensor
-
- Returns a new tensor with the inverse hyperbolic tangent of the elements of :attr:`input`.
-
- Note:
- The domain of the inverse hyperbolic tangent is `(-1, 1)` and values outside this range
- will be mapped to ``NaN``, except for the values `1` and `-1` for which the output is
- mapped to `+/-INF` respectively.
-
- .. math::
- \text{out}_{i} = \tanh^{-1}(\text{input}_{i})
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword arguments:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(4).uniform_(-1, 1)
- >>> a
- tensor([ -0.9385, 0.2968, -0.8591, -0.1871 ])
- >>> torch.atanh(a)
- tensor([ -1.7253, 0.3060, -1.2899, -0.1893 ])
- """
- ...
- def atanh_(input: Tensor) -> Tensor: ...
- def avg_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, ceil_mode: _bool = False, count_include_pad: _bool = True) -> Tensor: ...
- @overload
- def baddbmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], batch1: Tensor, batch2: Tensor) -> Tensor:
- r"""
- baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
-
- Performs a batch matrix-matrix product of matrices in :attr:`batch1`
- and :attr:`batch2`.
- :attr:`input` is added to the final result.
-
- :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same
- number of matrices.
-
- If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
- :math:`(b \times m \times p)` tensor, then :attr:`input` must be
- :ref:`broadcastable <broadcasting-semantics>` with a
- :math:`(b \times n \times p)` tensor and :attr:`out` will be a
- :math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the
- same as the scaling factors used in :meth:`torch.addbmm`.
-
- .. math::
- \text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i)
-
- If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
- it will not be propagated.
-
- For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
- :attr:`alpha` must be real numbers, otherwise they should be integers.
-
- This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
-
- On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
-
- Args:
- input (Tensor): the tensor to be added
- batch1 (Tensor): the first batch of matrices to be multiplied
- batch2 (Tensor): the second batch of matrices to be multiplied
-
- Keyword args:
- beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
- alpha (Number, optional): multiplier for :math:`\text{batch1} \mathbin{@} \text{batch2}` (:math:`\alpha`)
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> M = torch.randn(10, 3, 5)
- >>> batch1 = torch.randn(10, 3, 4)
- >>> batch2 = torch.randn(10, 4, 5)
- >>> torch.baddbmm(M, batch1, batch2).size()
- torch.Size([10, 3, 5])
- """
- ...
- @overload
- def baddbmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor:
- r"""
- baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
-
- Performs a batch matrix-matrix product of matrices in :attr:`batch1`
- and :attr:`batch2`.
- :attr:`input` is added to the final result.
-
- :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same
- number of matrices.
-
- If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
- :math:`(b \times m \times p)` tensor, then :attr:`input` must be
- :ref:`broadcastable <broadcasting-semantics>` with a
- :math:`(b \times n \times p)` tensor and :attr:`out` will be a
- :math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the
- same as the scaling factors used in :meth:`torch.addbmm`.
-
- .. math::
- \text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i)
-
- If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
- it will not be propagated.
-
- For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
- :attr:`alpha` must be real numbers, otherwise they should be integers.
-
- This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
-
- On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
-
- Args:
- input (Tensor): the tensor to be added
- batch1 (Tensor): the first batch of matrices to be multiplied
- batch2 (Tensor): the second batch of matrices to be multiplied
-
- Keyword args:
- beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
- alpha (Number, optional): multiplier for :math:`\text{batch1} \mathbin{@} \text{batch2}` (:math:`\alpha`)
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> M = torch.randn(10, 3, 5)
- >>> batch1 = torch.randn(10, 3, 4)
- >>> batch2 = torch.randn(10, 4, 5)
- >>> torch.baddbmm(M, batch1, batch2).size()
- torch.Size([10, 3, 5])
- """
- ...
- @overload
- def baddbmm(input: Tensor, batch1: Tensor, batch2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor:
- r"""
- baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
-
- Performs a batch matrix-matrix product of matrices in :attr:`batch1`
- and :attr:`batch2`.
- :attr:`input` is added to the final result.
-
- :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same
- number of matrices.
-
- If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
- :math:`(b \times m \times p)` tensor, then :attr:`input` must be
- :ref:`broadcastable <broadcasting-semantics>` with a
- :math:`(b \times n \times p)` tensor and :attr:`out` will be a
- :math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the
- same as the scaling factors used in :meth:`torch.addbmm`.
-
- .. math::
- \text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i)
-
- If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
- it will not be propagated.
-
- For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
- :attr:`alpha` must be real numbers, otherwise they should be integers.
-
- This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
-
- On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
-
- Args:
- input (Tensor): the tensor to be added
- batch1 (Tensor): the first batch of matrices to be multiplied
- batch2 (Tensor): the second batch of matrices to be multiplied
-
- Keyword args:
- beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
- alpha (Number, optional): multiplier for :math:`\text{batch1} \mathbin{@} \text{batch2}` (:math:`\alpha`)
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> M = torch.randn(10, 3, 5)
- >>> batch1 = torch.randn(10, 3, 4)
- >>> batch2 = torch.randn(10, 4, 5)
- >>> torch.baddbmm(M, batch1, batch2).size()
- torch.Size([10, 3, 5])
- """
- ...
- @overload
- def baddbmm(beta: Union[Number, _complex], self: Tensor, batch1: Tensor, batch2: Tensor) -> Tensor:
- r"""
- baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
-
- Performs a batch matrix-matrix product of matrices in :attr:`batch1`
- and :attr:`batch2`.
- :attr:`input` is added to the final result.
-
- :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same
- number of matrices.
-
- If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
- :math:`(b \times m \times p)` tensor, then :attr:`input` must be
- :ref:`broadcastable <broadcasting-semantics>` with a
- :math:`(b \times n \times p)` tensor and :attr:`out` will be a
- :math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the
- same as the scaling factors used in :meth:`torch.addbmm`.
-
- .. math::
- \text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i)
-
- If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
- it will not be propagated.
-
- For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
- :attr:`alpha` must be real numbers, otherwise they should be integers.
-
- This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
-
- On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
-
- Args:
- input (Tensor): the tensor to be added
- batch1 (Tensor): the first batch of matrices to be multiplied
- batch2 (Tensor): the second batch of matrices to be multiplied
-
- Keyword args:
- beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
- alpha (Number, optional): multiplier for :math:`\text{batch1} \mathbin{@} \text{batch2}` (:math:`\alpha`)
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> M = torch.randn(10, 3, 5)
- >>> batch1 = torch.randn(10, 3, 4)
- >>> batch2 = torch.randn(10, 4, 5)
- >>> torch.baddbmm(M, batch1, batch2).size()
- torch.Size([10, 3, 5])
- """
- ...
- @overload
- def baddbmm(beta: Union[Number, _complex], self: Tensor, batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor:
- r"""
- baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
-
- Performs a batch matrix-matrix product of matrices in :attr:`batch1`
- and :attr:`batch2`.
- :attr:`input` is added to the final result.
-
- :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same
- number of matrices.
-
- If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
- :math:`(b \times m \times p)` tensor, then :attr:`input` must be
- :ref:`broadcastable <broadcasting-semantics>` with a
- :math:`(b \times n \times p)` tensor and :attr:`out` will be a
- :math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the
- same as the scaling factors used in :meth:`torch.addbmm`.
-
- .. math::
- \text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i)
-
- If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
- it will not be propagated.
-
- For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
- :attr:`alpha` must be real numbers, otherwise they should be integers.
-
- This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
-
- On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
-
- Args:
- input (Tensor): the tensor to be added
- batch1 (Tensor): the first batch of matrices to be multiplied
- batch2 (Tensor): the second batch of matrices to be multiplied
-
- Keyword args:
- beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
- alpha (Number, optional): multiplier for :math:`\text{batch1} \mathbin{@} \text{batch2}` (:math:`\alpha`)
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> M = torch.randn(10, 3, 5)
- >>> batch1 = torch.randn(10, 3, 4)
- >>> batch2 = torch.randn(10, 4, 5)
- >>> torch.baddbmm(M, batch1, batch2).size()
- torch.Size([10, 3, 5])
- """
- ...
- @overload
- def bartlett_window(window_length: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- bartlett_window(window_length, periodic=True, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Bartlett window function.
-
- .. math::
- w[n] = 1 - \left| \frac{2n}{N-1} - 1 \right| = \begin{cases}
- \frac{2n}{N - 1} & \text{if } 0 \leq n \leq \frac{N - 1}{2} \\
- 2 - \frac{2n}{N - 1} & \text{if } \frac{N - 1}{2} < n < N \\
- \end{cases},
-
- where :math:`N` is the full window size.
-
- The input :attr:`window_length` is a positive integer controlling the
- returned window size. :attr:`periodic` flag determines whether the returned
- window trims off the last duplicate value from the symmetric window and is
- ready to be used as a periodic window with functions like
- :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
- above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
- ``torch.bartlett_window(L, periodic=True)`` equal to
- ``torch.bartlett_window(L + 1, periodic=False)[:-1])``.
-
- .. note::
- If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
-
- Arguments:
- window_length (int): the size of returned window
- periodic (bool, optional): If True, returns a window to be used as periodic
- function. If False, return a symmetric window.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported.
- layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
- ``torch.strided`` (dense layout) is supported.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Returns:
- Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window
- """
- ...
- @overload
- def bartlett_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- bartlett_window(window_length, periodic=True, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Bartlett window function.
-
- .. math::
- w[n] = 1 - \left| \frac{2n}{N-1} - 1 \right| = \begin{cases}
- \frac{2n}{N - 1} & \text{if } 0 \leq n \leq \frac{N - 1}{2} \\
- 2 - \frac{2n}{N - 1} & \text{if } \frac{N - 1}{2} < n < N \\
- \end{cases},
-
- where :math:`N` is the full window size.
-
- The input :attr:`window_length` is a positive integer controlling the
- returned window size. :attr:`periodic` flag determines whether the returned
- window trims off the last duplicate value from the symmetric window and is
- ready to be used as a periodic window with functions like
- :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
- above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
- ``torch.bartlett_window(L, periodic=True)`` equal to
- ``torch.bartlett_window(L + 1, periodic=False)[:-1])``.
-
- .. note::
- If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
-
- Arguments:
- window_length (int): the size of returned window
- periodic (bool, optional): If True, returns a window to be used as periodic
- function. If False, return a symmetric window.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported.
- layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
- ``torch.strided`` (dense layout) is supported.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Returns:
- Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window
- """
- ...
- def batch_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, momentum: _float, eps: _float, cudnn_enabled: _bool) -> Tensor: ...
- def batch_norm_backward_elemt(grad_out: Tensor, input: Tensor, mean: Tensor, invstd: Tensor, weight: Optional[Tensor], sum_dy: Tensor, sum_dy_xmu: Tensor, count: Tensor) -> Tensor: ...
- def batch_norm_backward_reduce(grad_out: Tensor, input: Tensor, mean: Tensor, invstd: Tensor, weight: Optional[Tensor], input_g: _bool, weight_g: _bool, bias_g: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
- def batch_norm_elemt(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], mean: Tensor, invstd: Tensor, eps: _float, *, out: Optional[Tensor] = None) -> Tensor: ...
- def batch_norm_gather_stats(input: Tensor, mean: Tensor, invstd: Tensor, running_mean: Optional[Tensor], running_var: Optional[Tensor], momentum: _float, eps: _float, count: _int) -> Tuple[Tensor, Tensor]: ...
- def batch_norm_gather_stats_with_counts(input: Tensor, mean: Tensor, invstd: Tensor, running_mean: Optional[Tensor], running_var: Optional[Tensor], momentum: _float, eps: _float, counts: Tensor) -> Tuple[Tensor, Tensor]: ...
- def batch_norm_stats(input: Tensor, eps: _float) -> Tuple[Tensor, Tensor]: ...
- def batch_norm_update_stats(input: Tensor, running_mean: Optional[Tensor], running_var: Optional[Tensor], momentum: _float) -> Tuple[Tensor, Tensor]: ...
- @overload
- def bernoulli(input: Tensor, *, generator: Optional[Generator] = None, out: Optional[Tensor] = None) -> Tensor:
- r"""
- bernoulli(input, *, generator=None, out=None) -> Tensor
-
- Draws binary random numbers (0 or 1) from a Bernoulli distribution.
-
- The :attr:`input` tensor should be a tensor containing probabilities
- to be used for drawing the binary random number.
- Hence, all values in :attr:`input` have to be in the range:
- :math:`0 \leq \text{input}_i \leq 1`.
-
- The :math:`\text{i}^{th}` element of the output tensor will draw a
- value :math:`1` according to the :math:`\text{i}^{th}` probability value given
- in :attr:`input`.
-
- .. math::
- \text{out}_{i} \sim \mathrm{Bernoulli}(p = \text{input}_{i})
-
- The returned :attr:`out` tensor only has values 0 or 1 and is of the same
- shape as :attr:`input`.
-
- :attr:`out` can have integral ``dtype``, but :attr:`input` must have floating
- point ``dtype``.
-
- Args:
- input (Tensor): the input tensor of probability values for the Bernoulli distribution
-
- Keyword args:
- generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.empty(3, 3).uniform_(0, 1) # generate a uniform random matrix with range [0, 1]
- >>> a
- tensor([[ 0.1737, 0.0950, 0.3609],
- [ 0.7148, 0.0289, 0.2676],
- [ 0.9456, 0.8937, 0.7202]])
- >>> torch.bernoulli(a)
- tensor([[ 1., 0., 0.],
- [ 0., 0., 0.],
- [ 1., 1., 1.]])
-
- >>> a = torch.ones(3, 3) # probability of drawing "1" is 1
- >>> torch.bernoulli(a)
- tensor([[ 1., 1., 1.],
- [ 1., 1., 1.],
- [ 1., 1., 1.]])
- >>> a = torch.zeros(3, 3) # probability of drawing "1" is 0
- >>> torch.bernoulli(a)
- tensor([[ 0., 0., 0.],
- [ 0., 0., 0.],
- [ 0., 0., 0.]])
- """
- ...
- @overload
- def bernoulli(input: Tensor, p: _float, *, generator: Optional[Generator] = None) -> Tensor:
- r"""
- bernoulli(input, *, generator=None, out=None) -> Tensor
-
- Draws binary random numbers (0 or 1) from a Bernoulli distribution.
-
- The :attr:`input` tensor should be a tensor containing probabilities
- to be used for drawing the binary random number.
- Hence, all values in :attr:`input` have to be in the range:
- :math:`0 \leq \text{input}_i \leq 1`.
-
- The :math:`\text{i}^{th}` element of the output tensor will draw a
- value :math:`1` according to the :math:`\text{i}^{th}` probability value given
- in :attr:`input`.
-
- .. math::
- \text{out}_{i} \sim \mathrm{Bernoulli}(p = \text{input}_{i})
-
- The returned :attr:`out` tensor only has values 0 or 1 and is of the same
- shape as :attr:`input`.
-
- :attr:`out` can have integral ``dtype``, but :attr:`input` must have floating
- point ``dtype``.
-
- Args:
- input (Tensor): the input tensor of probability values for the Bernoulli distribution
-
- Keyword args:
- generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.empty(3, 3).uniform_(0, 1) # generate a uniform random matrix with range [0, 1]
- >>> a
- tensor([[ 0.1737, 0.0950, 0.3609],
- [ 0.7148, 0.0289, 0.2676],
- [ 0.9456, 0.8937, 0.7202]])
- >>> torch.bernoulli(a)
- tensor([[ 1., 0., 0.],
- [ 0., 0., 0.],
- [ 1., 1., 1.]])
-
- >>> a = torch.ones(3, 3) # probability of drawing "1" is 1
- >>> torch.bernoulli(a)
- tensor([[ 1., 1., 1.],
- [ 1., 1., 1.],
- [ 1., 1., 1.]])
- >>> a = torch.zeros(3, 3) # probability of drawing "1" is 0
- >>> torch.bernoulli(a)
- tensor([[ 0., 0., 0.],
- [ 0., 0., 0.],
- [ 0., 0., 0.]])
- """
- ...
- def bilinear(input1: Tensor, input2: Tensor, weight: Tensor, bias: Optional[Tensor] = None) -> Tensor: ...
- def binary_cross_entropy_with_logits(input: Tensor, target: Tensor, weight: Optional[Tensor] = None, pos_weight: Optional[Tensor] = None, reduction: _int = 1) -> Tensor: ...
- def bincount(input: Tensor, weights: Optional[Tensor] = None, minlength: _int = 0) -> Tensor:
- r"""
- bincount(input, weights=None, minlength=0) -> Tensor
-
- Count the frequency of each value in an array of non-negative ints.
-
- The number of bins (size 1) is one larger than the largest value in
- :attr:`input` unless :attr:`input` is empty, in which case the result is a
- tensor of size 0. If :attr:`minlength` is specified, the number of bins is at least
- :attr:`minlength` and if :attr:`input` is empty, then the result is tensor of size
- :attr:`minlength` filled with zeros. If ``n`` is the value at position ``i``,
- ``out[n] += weights[i]`` if :attr:`weights` is specified else
- ``out[n] += 1``.
-
- Note:
- This operation may produce nondeterministic gradients when given tensors on a CUDA device. See :doc:`/notes/randomness` for more information.
-
- Arguments:
- input (Tensor): 1-d int tensor
- weights (Tensor): optional, weight for each value in the input tensor.
- Should be of same size as input tensor.
- minlength (int): optional, minimum number of bins. Should be non-negative.
-
- Returns:
- output (Tensor): a tensor of shape ``Size([max(input) + 1])`` if
- :attr:`input` is non-empty, else ``Size(0)``
-
- Example::
-
- >>> input = torch.randint(0, 8, (5,), dtype=torch.int64)
- >>> weights = torch.linspace(0, 1, steps=5)
- >>> input, weights
- (tensor([4, 3, 6, 3, 4]),
- tensor([ 0.0000, 0.2500, 0.5000, 0.7500, 1.0000])
-
- >>> torch.bincount(input)
- tensor([0, 0, 0, 2, 2, 0, 1])
-
- >>> input.bincount(weights)
- tensor([0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 0.0000, 0.5000])
- """
- ...
- def binomial(count: Tensor, prob: Tensor, generator: Optional[Generator] = None) -> Tensor: ...
- @overload
- def bitwise_and(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- bitwise_and(input, other, *, out=None) -> Tensor
-
- Computes the bitwise AND of :attr:`input` and :attr:`other`. The input tensor must be of
- integral or Boolean types. For bool tensors, it computes the logical AND.
-
- Args:
- input: the first input tensor
- other: the second input tensor
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.bitwise_and(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
- tensor([1, 0, 3], dtype=torch.int8)
- >>> torch.bitwise_and(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
- tensor([ False, True, False])
- """
- ...
- @overload
- def bitwise_and(self: Union[Number, _complex], other: Tensor) -> Tensor:
- r"""
- bitwise_and(input, other, *, out=None) -> Tensor
-
- Computes the bitwise AND of :attr:`input` and :attr:`other`. The input tensor must be of
- integral or Boolean types. For bool tensors, it computes the logical AND.
-
- Args:
- input: the first input tensor
- other: the second input tensor
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.bitwise_and(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
- tensor([1, 0, 3], dtype=torch.int8)
- >>> torch.bitwise_and(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
- tensor([ False, True, False])
- """
- ...
- @overload
- def bitwise_and(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- bitwise_and(input, other, *, out=None) -> Tensor
-
- Computes the bitwise AND of :attr:`input` and :attr:`other`. The input tensor must be of
- integral or Boolean types. For bool tensors, it computes the logical AND.
-
- Args:
- input: the first input tensor
- other: the second input tensor
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.bitwise_and(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
- tensor([1, 0, 3], dtype=torch.int8)
- >>> torch.bitwise_and(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
- tensor([ False, True, False])
- """
- ...
- @overload
- def bitwise_left_shift(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- bitwise_left_shift(input, other, *, out=None) -> Tensor
-
- Computes the left arithmetic shift of :attr:`input` by :attr:`other` bits.
- The input tensor must be of integral type. This operator supports
- :ref:`broadcasting to a common shape <broadcasting-semantics>` and
- :ref:`type promotion <type-promotion-doc>`.
-
- The operation applied is:
-
- .. math::
- \text{out}_i = \text{input}_i << \text{other}_i
-
- Args:
- input (Tensor or Scalar): the first input tensor
- other (Tensor or Scalar): the second input tensor
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.bitwise_left_shift(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
- tensor([-2, -2, 24], dtype=torch.int8)
- """
- ...
- @overload
- def bitwise_left_shift(self: Union[Number, _complex], other: Tensor) -> Tensor:
- r"""
- bitwise_left_shift(input, other, *, out=None) -> Tensor
-
- Computes the left arithmetic shift of :attr:`input` by :attr:`other` bits.
- The input tensor must be of integral type. This operator supports
- :ref:`broadcasting to a common shape <broadcasting-semantics>` and
- :ref:`type promotion <type-promotion-doc>`.
-
- The operation applied is:
-
- .. math::
- \text{out}_i = \text{input}_i << \text{other}_i
-
- Args:
- input (Tensor or Scalar): the first input tensor
- other (Tensor or Scalar): the second input tensor
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.bitwise_left_shift(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
- tensor([-2, -2, 24], dtype=torch.int8)
- """
- ...
- @overload
- def bitwise_left_shift(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- bitwise_left_shift(input, other, *, out=None) -> Tensor
-
- Computes the left arithmetic shift of :attr:`input` by :attr:`other` bits.
- The input tensor must be of integral type. This operator supports
- :ref:`broadcasting to a common shape <broadcasting-semantics>` and
- :ref:`type promotion <type-promotion-doc>`.
-
- The operation applied is:
-
- .. math::
- \text{out}_i = \text{input}_i << \text{other}_i
-
- Args:
- input (Tensor or Scalar): the first input tensor
- other (Tensor or Scalar): the second input tensor
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.bitwise_left_shift(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
- tensor([-2, -2, 24], dtype=torch.int8)
- """
- ...
- def bitwise_not(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- bitwise_not(input, *, out=None) -> Tensor
-
- Computes the bitwise NOT of the given input tensor. The input tensor must be of
- integral or Boolean types. For bool tensors, it computes the logical NOT.
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.bitwise_not(torch.tensor([-1, -2, 3], dtype=torch.int8))
- tensor([ 0, 1, -4], dtype=torch.int8)
- """
- ...
- @overload
- def bitwise_or(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- bitwise_or(input, other, *, out=None) -> Tensor
-
- Computes the bitwise OR of :attr:`input` and :attr:`other`. The input tensor must be of
- integral or Boolean types. For bool tensors, it computes the logical OR.
-
- Args:
- input: the first input tensor
- other: the second input tensor
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.bitwise_or(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
- tensor([-1, -2, 3], dtype=torch.int8)
- >>> torch.bitwise_or(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
- tensor([ True, True, False])
- """
- ...
- @overload
- def bitwise_or(self: Union[Number, _complex], other: Tensor) -> Tensor:
- r"""
- bitwise_or(input, other, *, out=None) -> Tensor
-
- Computes the bitwise OR of :attr:`input` and :attr:`other`. The input tensor must be of
- integral or Boolean types. For bool tensors, it computes the logical OR.
-
- Args:
- input: the first input tensor
- other: the second input tensor
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.bitwise_or(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
- tensor([-1, -2, 3], dtype=torch.int8)
- >>> torch.bitwise_or(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
- tensor([ True, True, False])
- """
- ...
- @overload
- def bitwise_or(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- bitwise_or(input, other, *, out=None) -> Tensor
-
- Computes the bitwise OR of :attr:`input` and :attr:`other`. The input tensor must be of
- integral or Boolean types. For bool tensors, it computes the logical OR.
-
- Args:
- input: the first input tensor
- other: the second input tensor
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.bitwise_or(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
- tensor([-1, -2, 3], dtype=torch.int8)
- >>> torch.bitwise_or(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
- tensor([ True, True, False])
- """
- ...
- @overload
- def bitwise_right_shift(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- bitwise_right_shift(input, other, *, out=None) -> Tensor
-
- Computes the right arithmetic shift of :attr:`input` by :attr:`other` bits.
- The input tensor must be of integral type. This operator supports
- :ref:`broadcasting to a common shape <broadcasting-semantics>` and
- :ref:`type promotion <type-promotion-doc>`.
- In any case, if the value of the right operand is negative or is greater
- or equal to the number of bits in the promoted left operand, the behavior is undefined.
-
- The operation applied is:
-
- .. math::
- \text{out}_i = \text{input}_i >> \text{other}_i
-
- Args:
- input (Tensor or Scalar): the first input tensor
- other (Tensor or Scalar): the second input tensor
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.bitwise_right_shift(torch.tensor([-2, -7, 31], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
- tensor([-1, -7, 3], dtype=torch.int8)
- """
- ...
- @overload
- def bitwise_right_shift(self: Union[Number, _complex], other: Tensor) -> Tensor:
- r"""
- bitwise_right_shift(input, other, *, out=None) -> Tensor
-
- Computes the right arithmetic shift of :attr:`input` by :attr:`other` bits.
- The input tensor must be of integral type. This operator supports
- :ref:`broadcasting to a common shape <broadcasting-semantics>` and
- :ref:`type promotion <type-promotion-doc>`.
- In any case, if the value of the right operand is negative or is greater
- or equal to the number of bits in the promoted left operand, the behavior is undefined.
-
- The operation applied is:
-
- .. math::
- \text{out}_i = \text{input}_i >> \text{other}_i
-
- Args:
- input (Tensor or Scalar): the first input tensor
- other (Tensor or Scalar): the second input tensor
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.bitwise_right_shift(torch.tensor([-2, -7, 31], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
- tensor([-1, -7, 3], dtype=torch.int8)
- """
- ...
- @overload
- def bitwise_right_shift(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- bitwise_right_shift(input, other, *, out=None) -> Tensor
-
- Computes the right arithmetic shift of :attr:`input` by :attr:`other` bits.
- The input tensor must be of integral type. This operator supports
- :ref:`broadcasting to a common shape <broadcasting-semantics>` and
- :ref:`type promotion <type-promotion-doc>`.
- In any case, if the value of the right operand is negative or is greater
- or equal to the number of bits in the promoted left operand, the behavior is undefined.
-
- The operation applied is:
-
- .. math::
- \text{out}_i = \text{input}_i >> \text{other}_i
-
- Args:
- input (Tensor or Scalar): the first input tensor
- other (Tensor or Scalar): the second input tensor
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.bitwise_right_shift(torch.tensor([-2, -7, 31], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
- tensor([-1, -7, 3], dtype=torch.int8)
- """
- ...
- @overload
- def bitwise_xor(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- bitwise_xor(input, other, *, out=None) -> Tensor
-
- Computes the bitwise XOR of :attr:`input` and :attr:`other`. The input tensor must be of
- integral or Boolean types. For bool tensors, it computes the logical XOR.
-
- Args:
- input: the first input tensor
- other: the second input tensor
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.bitwise_xor(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
- tensor([-2, -2, 0], dtype=torch.int8)
- >>> torch.bitwise_xor(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
- tensor([ True, False, False])
- """
- ...
- @overload
- def bitwise_xor(self: Union[Number, _complex], other: Tensor) -> Tensor:
- r"""
- bitwise_xor(input, other, *, out=None) -> Tensor
-
- Computes the bitwise XOR of :attr:`input` and :attr:`other`. The input tensor must be of
- integral or Boolean types. For bool tensors, it computes the logical XOR.
-
- Args:
- input: the first input tensor
- other: the second input tensor
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.bitwise_xor(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
- tensor([-2, -2, 0], dtype=torch.int8)
- >>> torch.bitwise_xor(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
- tensor([ True, False, False])
- """
- ...
- @overload
- def bitwise_xor(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- bitwise_xor(input, other, *, out=None) -> Tensor
-
- Computes the bitwise XOR of :attr:`input` and :attr:`other`. The input tensor must be of
- integral or Boolean types. For bool tensors, it computes the logical XOR.
-
- Args:
- input: the first input tensor
- other: the second input tensor
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.bitwise_xor(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
- tensor([-2, -2, 0], dtype=torch.int8)
- >>> torch.bitwise_xor(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
- tensor([ True, False, False])
- """
- ...
- @overload
- def blackman_window(window_length: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- blackman_window(window_length, periodic=True, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Blackman window function.
-
- .. math::
- w[n] = 0.42 - 0.5 \cos \left( \frac{2 \pi n}{N - 1} \right) + 0.08 \cos \left( \frac{4 \pi n}{N - 1} \right)
-
- where :math:`N` is the full window size.
-
- The input :attr:`window_length` is a positive integer controlling the
- returned window size. :attr:`periodic` flag determines whether the returned
- window trims off the last duplicate value from the symmetric window and is
- ready to be used as a periodic window with functions like
- :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
- above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
- ``torch.blackman_window(L, periodic=True)`` equal to
- ``torch.blackman_window(L + 1, periodic=False)[:-1])``.
-
- .. note::
- If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
-
- Arguments:
- window_length (int): the size of returned window
- periodic (bool, optional): If True, returns a window to be used as periodic
- function. If False, return a symmetric window.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported.
- layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
- ``torch.strided`` (dense layout) is supported.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Returns:
- Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window
- """
- ...
- @overload
- def blackman_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- blackman_window(window_length, periodic=True, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Blackman window function.
-
- .. math::
- w[n] = 0.42 - 0.5 \cos \left( \frac{2 \pi n}{N - 1} \right) + 0.08 \cos \left( \frac{4 \pi n}{N - 1} \right)
-
- where :math:`N` is the full window size.
-
- The input :attr:`window_length` is a positive integer controlling the
- returned window size. :attr:`periodic` flag determines whether the returned
- window trims off the last duplicate value from the symmetric window and is
- ready to be used as a periodic window with functions like
- :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
- above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
- ``torch.blackman_window(L, periodic=True)`` equal to
- ``torch.blackman_window(L + 1, periodic=False)[:-1])``.
-
- .. note::
- If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
-
- Arguments:
- window_length (int): the size of returned window
- periodic (bool, optional): If True, returns a window to be used as periodic
- function. If False, return a symmetric window.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported.
- layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
- ``torch.strided`` (dense layout) is supported.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Returns:
- Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window
- """
- ...
- def bmm(input: Tensor, mat2: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- bmm(input, mat2, *, out=None) -> Tensor
-
- Performs a batch matrix-matrix product of matrices stored in :attr:`input`
- and :attr:`mat2`.
-
- :attr:`input` and :attr:`mat2` must be 3-D tensors each containing
- the same number of matrices.
-
- If :attr:`input` is a :math:`(b \times n \times m)` tensor, :attr:`mat2` is a
- :math:`(b \times m \times p)` tensor, :attr:`out` will be a
- :math:`(b \times n \times p)` tensor.
-
- .. math::
- \text{out}_i = \text{input}_i \mathbin{@} \text{mat2}_i
-
- This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
-
- On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
-
- .. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
- For broadcasting matrix products, see :func:`torch.matmul`.
-
- Args:
- input (Tensor): the first batch of matrices to be multiplied
- mat2 (Tensor): the second batch of matrices to be multiplied
-
- Keyword Args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> input = torch.randn(10, 3, 4)
- >>> mat2 = torch.randn(10, 4, 5)
- >>> res = torch.bmm(input, mat2)
- >>> res.size()
- torch.Size([10, 3, 5])
- """
- ...
- def broadcast_to(input: Tensor, size: Sequence[Union[_int, SymInt]]) -> Tensor:
- r"""
- broadcast_to(input, shape) -> Tensor
-
- Broadcasts :attr:`input` to the shape :attr:`\shape`.
- Equivalent to calling ``input.expand(shape)``. See :meth:`~Tensor.expand` for details.
-
- Args:
- input (Tensor): the input tensor.
- shape (list, tuple, or :class:`torch.Size`): the new shape.
-
- Example::
-
- >>> x = torch.tensor([1, 2, 3])
- >>> torch.broadcast_to(x, (3, 3))
- tensor([[1, 2, 3],
- [1, 2, 3],
- [1, 2, 3]])
- """
- ...
- @overload
- def bucketize(input: Tensor, boundaries: Tensor, *, out_int32: _bool = False, right: _bool = False, out: Optional[Tensor] = None) -> Tensor:
- r"""
- bucketize(input, boundaries, *, out_int32=False, right=False, out=None) -> Tensor
-
- Returns the indices of the buckets to which each value in the :attr:`input` belongs, where the
- boundaries of the buckets are set by :attr:`boundaries`. Return a new tensor with the same size
- as :attr:`input`. If :attr:`right` is False (default), then the left boundary is open. Note that
- this behavior is opposite the behavior of
- `numpy.digitize <https://docs.scipy.org/doc/numpy/reference/generated/numpy.digitize.html>`_.
- More formally, the returned index satisfies the following rules:
-
- .. list-table::
- :widths: 15 85
- :header-rows: 1
-
- * - :attr:`right`
- - *returned index satisfies*
- * - False
- - ``boundaries[i-1] < input[m][n]...[l][x] <= boundaries[i]``
- * - True
- - ``boundaries[i-1] <= input[m][n]...[l][x] < boundaries[i]``
-
- Args:
- input (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s).
- boundaries (Tensor): 1-D tensor, must contain a strictly increasing sequence, or the return value is undefined.
-
- Keyword args:
- out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise.
- Default value is False, i.e. default output data type is torch.int64.
- right (bool, optional): if False, return the first suitable location that is found. If True, return the
- last such index. If no suitable index found, return 0 for non-numerical value
- (eg. nan, inf) or the size of :attr:`boundaries` (one pass the last index).
- In other words, if False, gets the lower bound index for each value in :attr:`input`
- from :attr:`boundaries`. If True, gets the upper bound index instead.
- Default value is False.
- out (Tensor, optional): the output tensor, must be the same size as :attr:`input` if provided.
-
-
- Example::
-
- >>> boundaries = torch.tensor([1, 3, 5, 7, 9])
- >>> boundaries
- tensor([1, 3, 5, 7, 9])
- >>> v = torch.tensor([[3, 6, 9], [3, 6, 9]])
- >>> v
- tensor([[3, 6, 9],
- [3, 6, 9]])
- >>> torch.bucketize(v, boundaries)
- tensor([[1, 3, 4],
- [1, 3, 4]])
- >>> torch.bucketize(v, boundaries, right=True)
- tensor([[2, 3, 5],
- [2, 3, 5]])
- """
- ...
- @overload
- def bucketize(self: Union[Number, _complex], boundaries: Tensor, *, out_int32: _bool = False, right: _bool = False) -> Tensor:
- r"""
- bucketize(input, boundaries, *, out_int32=False, right=False, out=None) -> Tensor
-
- Returns the indices of the buckets to which each value in the :attr:`input` belongs, where the
- boundaries of the buckets are set by :attr:`boundaries`. Return a new tensor with the same size
- as :attr:`input`. If :attr:`right` is False (default), then the left boundary is open. Note that
- this behavior is opposite the behavior of
- `numpy.digitize <https://docs.scipy.org/doc/numpy/reference/generated/numpy.digitize.html>`_.
- More formally, the returned index satisfies the following rules:
-
- .. list-table::
- :widths: 15 85
- :header-rows: 1
-
- * - :attr:`right`
- - *returned index satisfies*
- * - False
- - ``boundaries[i-1] < input[m][n]...[l][x] <= boundaries[i]``
- * - True
- - ``boundaries[i-1] <= input[m][n]...[l][x] < boundaries[i]``
-
- Args:
- input (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s).
- boundaries (Tensor): 1-D tensor, must contain a strictly increasing sequence, or the return value is undefined.
-
- Keyword args:
- out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise.
- Default value is False, i.e. default output data type is torch.int64.
- right (bool, optional): if False, return the first suitable location that is found. If True, return the
- last such index. If no suitable index found, return 0 for non-numerical value
- (eg. nan, inf) or the size of :attr:`boundaries` (one pass the last index).
- In other words, if False, gets the lower bound index for each value in :attr:`input`
- from :attr:`boundaries`. If True, gets the upper bound index instead.
- Default value is False.
- out (Tensor, optional): the output tensor, must be the same size as :attr:`input` if provided.
-
-
- Example::
-
- >>> boundaries = torch.tensor([1, 3, 5, 7, 9])
- >>> boundaries
- tensor([1, 3, 5, 7, 9])
- >>> v = torch.tensor([[3, 6, 9], [3, 6, 9]])
- >>> v
- tensor([[3, 6, 9],
- [3, 6, 9]])
- >>> torch.bucketize(v, boundaries)
- tensor([[1, 3, 4],
- [1, 3, 4]])
- >>> torch.bucketize(v, boundaries, right=True)
- tensor([[2, 3, 5],
- [2, 3, 5]])
- """
- ...
- def can_cast(from_: _dtype, to: _dtype) -> _bool:
- r"""
- can_cast(from_, to) -> bool
-
- Determines if a type conversion is allowed under PyTorch casting rules
- described in the type promotion :ref:`documentation <type-promotion-doc>`.
-
- Args:
- from\_ (dtype): The original :class:`torch.dtype`.
- to (dtype): The target :class:`torch.dtype`.
-
- Example::
-
- >>> torch.can_cast(torch.double, torch.float)
- True
- >>> torch.can_cast(torch.float, torch.int)
- False
- """
- ...
- @overload
- def cat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int = 0, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- cat(tensors, dim=0, *, out=None) -> Tensor
-
- Concatenates the given sequence of :attr:`seq` tensors in the given dimension.
- All tensors must either have the same shape (except in the concatenating
- dimension) or be a 1-D empty tensor with size ``(0,)``.
-
- :func:`torch.cat` can be seen as an inverse operation for :func:`torch.split`
- and :func:`torch.chunk`.
-
- :func:`torch.cat` can be best understood via examples.
-
- .. seealso::
-
- :func:`torch.stack` concatenates the given sequence along a new dimension.
-
- Args:
- tensors (sequence of Tensors): any python sequence of tensors of the same type.
- Non-empty tensors provided must have the same shape, except in the
- cat dimension.
- dim (int, optional): the dimension over which the tensors are concatenated
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> x = torch.randn(2, 3)
- >>> x
- tensor([[ 0.6580, -1.0969, -0.4614],
- [-0.1034, -0.5790, 0.1497]])
- >>> torch.cat((x, x, x), 0)
- tensor([[ 0.6580, -1.0969, -0.4614],
- [-0.1034, -0.5790, 0.1497],
- [ 0.6580, -1.0969, -0.4614],
- [-0.1034, -0.5790, 0.1497],
- [ 0.6580, -1.0969, -0.4614],
- [-0.1034, -0.5790, 0.1497]])
- >>> torch.cat((x, x, x), 1)
- tensor([[ 0.6580, -1.0969, -0.4614, 0.6580, -1.0969, -0.4614, 0.6580,
- -1.0969, -0.4614],
- [-0.1034, -0.5790, 0.1497, -0.1034, -0.5790, 0.1497, -0.1034,
- -0.5790, 0.1497]])
- """
- ...
- @overload
- def cat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: Union[str, ellipsis, None], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- cat(tensors, dim=0, *, out=None) -> Tensor
-
- Concatenates the given sequence of :attr:`seq` tensors in the given dimension.
- All tensors must either have the same shape (except in the concatenating
- dimension) or be a 1-D empty tensor with size ``(0,)``.
-
- :func:`torch.cat` can be seen as an inverse operation for :func:`torch.split`
- and :func:`torch.chunk`.
-
- :func:`torch.cat` can be best understood via examples.
-
- .. seealso::
-
- :func:`torch.stack` concatenates the given sequence along a new dimension.
-
- Args:
- tensors (sequence of Tensors): any python sequence of tensors of the same type.
- Non-empty tensors provided must have the same shape, except in the
- cat dimension.
- dim (int, optional): the dimension over which the tensors are concatenated
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> x = torch.randn(2, 3)
- >>> x
- tensor([[ 0.6580, -1.0969, -0.4614],
- [-0.1034, -0.5790, 0.1497]])
- >>> torch.cat((x, x, x), 0)
- tensor([[ 0.6580, -1.0969, -0.4614],
- [-0.1034, -0.5790, 0.1497],
- [ 0.6580, -1.0969, -0.4614],
- [-0.1034, -0.5790, 0.1497],
- [ 0.6580, -1.0969, -0.4614],
- [-0.1034, -0.5790, 0.1497]])
- >>> torch.cat((x, x, x), 1)
- tensor([[ 0.6580, -1.0969, -0.4614, 0.6580, -1.0969, -0.4614, 0.6580,
- -1.0969, -0.4614],
- [-0.1034, -0.5790, 0.1497, -0.1034, -0.5790, 0.1497, -0.1034,
- -0.5790, 0.1497]])
- """
- ...
- def ccol_indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
- def ceil(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- ceil(input, *, out=None) -> Tensor
-
- Returns a new tensor with the ceil of the elements of :attr:`input`,
- the smallest integer greater than or equal to each element.
-
- For integer inputs, follows the array-api convention of returning a
- copy of the input tensor.
-
- .. math::
- \text{out}_{i} = \left\lceil \text{input}_{i} \right\rceil
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(4)
- >>> a
- tensor([-0.6341, -1.4208, -1.0900, 0.5826])
- >>> torch.ceil(a)
- tensor([-0., -1., -1., 1.])
- """
- ...
- def ceil_(input: Tensor) -> Tensor: ...
- def celu(input: Tensor, alpha: Union[Number, _complex] = 1.0) -> Tensor: ...
- def celu_(input: Tensor, alpha: Union[Number, _complex] = 1.0) -> Tensor: ...
- def channel_shuffle(input: Tensor, groups: Union[_int, SymInt]) -> Tensor: ...
- def cholesky(input: Tensor, upper: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- cholesky(input, upper=False, *, out=None) -> Tensor
-
- Computes the Cholesky decomposition of a symmetric positive-definite
- matrix :math:`A` or for batches of symmetric positive-definite matrices.
-
- If :attr:`upper` is ``True``, the returned matrix ``U`` is upper-triangular, and
- the decomposition has the form:
-
- .. math::
-
- A = U^TU
-
- If :attr:`upper` is ``False``, the returned matrix ``L`` is lower-triangular, and
- the decomposition has the form:
-
- .. math::
-
- A = LL^T
-
- If :attr:`upper` is ``True``, and :math:`A` is a batch of symmetric positive-definite
- matrices, then the returned tensor will be composed of upper-triangular Cholesky factors
- of each of the individual matrices. Similarly, when :attr:`upper` is ``False``, the returned
- tensor will be composed of lower-triangular Cholesky factors of each of the individual
- matrices.
-
- .. warning::
-
- :func:`torch.cholesky` is deprecated in favor of :func:`torch.linalg.cholesky`
- and will be removed in a future PyTorch release.
-
- ``L = torch.cholesky(A)`` should be replaced with
-
- .. code:: python
-
- L = torch.linalg.cholesky(A)
-
- ``U = torch.cholesky(A, upper=True)`` should be replaced with
-
- .. code:: python
-
- U = torch.linalg.cholesky(A).mH
-
- This transform will produce equivalent results for all valid (symmetric positive definite) inputs.
-
- Args:
- input (Tensor): the input tensor :math:`A` of size :math:`(*, n, n)` where `*` is zero or more
- batch dimensions consisting of symmetric positive-definite matrices.
- upper (bool, optional): flag that indicates whether to return a
- upper or lower triangular matrix. Default: ``False``
-
- Keyword args:
- out (Tensor, optional): the output matrix
-
- Example::
-
- >>> a = torch.randn(3, 3)
- >>> a = a @ a.mT + 1e-3 # make symmetric positive-definite
- >>> l = torch.cholesky(a)
- >>> a
- tensor([[ 2.4112, -0.7486, 1.4551],
- [-0.7486, 1.3544, 0.1294],
- [ 1.4551, 0.1294, 1.6724]])
- >>> l
- tensor([[ 1.5528, 0.0000, 0.0000],
- [-0.4821, 1.0592, 0.0000],
- [ 0.9371, 0.5487, 0.7023]])
- >>> l @ l.mT
- tensor([[ 2.4112, -0.7486, 1.4551],
- [-0.7486, 1.3544, 0.1294],
- [ 1.4551, 0.1294, 1.6724]])
- >>> a = torch.randn(3, 2, 2) # Example for batched input
- >>> a = a @ a.mT + 1e-03 # make symmetric positive-definite
- >>> l = torch.cholesky(a)
- >>> z = l @ l.mT
- >>> torch.dist(z, a)
- tensor(2.3842e-07)
- """
- ...
- def cholesky_inverse(input: Tensor, upper: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- cholesky_inverse(L, upper=False, *, out=None) -> Tensor
-
- Computes the inverse of a complex Hermitian or real symmetric
- positive-definite matrix given its Cholesky decomposition.
-
- Let :math:`A` be a complex Hermitian or real symmetric positive-definite matrix,
- and :math:`L` its Cholesky decomposition such that:
-
- .. math::
-
- A = LL^{\text{H}}
-
- where :math:`L^{\text{H}}` is the conjugate transpose when :math:`L` is complex,
- and the transpose when :math:`L` is real-valued.
-
- Computes the inverse matrix :math:`A^{-1}`.
-
- Supports input of float, double, cfloat and cdouble dtypes.
- Also supports batches of matrices, and if :math:`A` is a batch of matrices
- then the output has the same batch dimensions.
-
- Args:
- L (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
- consisting of lower or upper triangular Cholesky decompositions of
- symmetric or Hermitian positive-definite matrices.
- upper (bool, optional): flag that indicates whether :math:`L` is lower triangular
- or upper triangular. Default: ``False``
-
- Keyword args:
- out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
-
- Example::
-
- >>> A = torch.randn(3, 3)
- >>> A = A @ A.T + torch.eye(3) * 1e-3 # Creates a symmetric positive-definite matrix
- >>> L = torch.linalg.cholesky(A) # Extract Cholesky decomposition
- >>> torch.cholesky_inverse(L)
- tensor([[ 1.9314, 1.2251, -0.0889],
- [ 1.2251, 2.4439, 0.2122],
- [-0.0889, 0.2122, 0.1412]])
- >>> A.inverse()
- tensor([[ 1.9314, 1.2251, -0.0889],
- [ 1.2251, 2.4439, 0.2122],
- [-0.0889, 0.2122, 0.1412]])
-
- >>> A = torch.randn(3, 2, 2, dtype=torch.complex64)
- >>> A = A @ A.mH + torch.eye(2) * 1e-3 # Batch of Hermitian positive-definite matrices
- >>> L = torch.linalg.cholesky(A)
- >>> torch.dist(torch.inverse(A), torch.cholesky_inverse(L))
- tensor(5.6358e-7)
- """
- ...
- def cholesky_solve(input: Tensor, input2: Tensor, upper: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- cholesky_solve(B, L, upper=False, *, out=None) -> Tensor
-
- Computes the solution of a system of linear equations with complex Hermitian
- or real symmetric positive-definite lhs given its Cholesky decomposition.
-
- Let :math:`A` be a complex Hermitian or real symmetric positive-definite matrix,
- and :math:`L` its Cholesky decomposition such that:
-
- .. math::
-
- A = LL^{\text{H}}
-
- where :math:`L^{\text{H}}` is the conjugate transpose when :math:`L` is complex,
- and the transpose when :math:`L` is real-valued.
-
- Returns the solution :math:`X` of the following linear system:
-
- .. math::
-
- AX = B
-
- Supports inputs of float, double, cfloat and cdouble dtypes.
- Also supports batches of matrices, and if :math:`A` or :math:`B` is a batch of matrices
- then the output has the same batch dimensions.
-
- Args:
- B (Tensor): right-hand side tensor of shape `(*, n, k)`
- where :math:`*` is zero or more batch dimensions
- L (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
- consisting of lower or upper triangular Cholesky decompositions of
- symmetric or Hermitian positive-definite matrices.
- upper (bool, optional): flag that indicates whether :math:`L` is lower triangular
- or upper triangular. Default: ``False``.
-
- Keyword args:
- out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
-
- Example::
-
- >>> A = torch.randn(3, 3)
- >>> A = A @ A.T + torch.eye(3) * 1e-3 # Creates a symmetric positive-definite matrix
- >>> L = torch.linalg.cholesky(A) # Extract Cholesky decomposition
- >>> B = torch.randn(3, 2)
- >>> torch.cholesky_solve(B, L)
- tensor([[ -8.1625, 19.6097],
- [ -5.8398, 14.2387],
- [ -4.3771, 10.4173]])
- >>> A.inverse() @ B
- tensor([[ -8.1626, 19.6097],
- [ -5.8398, 14.2387],
- [ -4.3771, 10.4173]])
-
- >>> A = torch.randn(3, 2, 2, dtype=torch.complex64)
- >>> A = A @ A.mH + torch.eye(2) * 1e-3 # Batch of Hermitian positive-definite matrices
- >>> L = torch.linalg.cholesky(A)
- >>> B = torch.randn(2, 1, dtype=torch.complex64)
- >>> X = torch.cholesky_solve(B, L)
- >>> torch.dist(X, A.inverse() @ B)
- tensor(1.6881e-5)
- """
- ...
- def choose_qparams_optimized(input: Tensor, numel: _int, n_bins: _int, ratio: _float, bit_width: _int) -> Tuple[Tensor, Tensor]: ...
- def chunk(input: Tensor, chunks: _int, dim: _int = 0) -> Tuple[Tensor, ...]:
- r"""
- chunk(input, chunks, dim=0) -> List of Tensors
-
- Attempts to split a tensor into the specified number of chunks. Each chunk is a view of
- the input tensor.
-
-
- .. note::
-
- This function may return fewer than the specified number of chunks!
-
- .. seealso::
-
- :func:`torch.tensor_split` a function that always returns exactly the specified number of chunks
-
- If the tensor size along the given dimension :attr:`dim` is divisible by :attr:`chunks`,
- all returned chunks will be the same size.
- If the tensor size along the given dimension :attr:`dim` is not divisible by :attr:`chunks`,
- all returned chunks will be the same size, except the last one.
- If such division is not possible, this function may return fewer
- than the specified number of chunks.
-
- Arguments:
- input (Tensor): the tensor to split
- chunks (int): number of chunks to return
- dim (int): dimension along which to split the tensor
-
- Example:
- >>> torch.arange(11).chunk(6)
- (tensor([0, 1]),
- tensor([2, 3]),
- tensor([4, 5]),
- tensor([6, 7]),
- tensor([8, 9]),
- tensor([10]))
- >>> torch.arange(12).chunk(6)
- (tensor([0, 1]),
- tensor([2, 3]),
- tensor([4, 5]),
- tensor([6, 7]),
- tensor([8, 9]),
- tensor([10, 11]))
- >>> torch.arange(13).chunk(6)
- (tensor([0, 1, 2]),
- tensor([3, 4, 5]),
- tensor([6, 7, 8]),
- tensor([ 9, 10, 11]),
- tensor([12]))
- """
- ...
- @overload
- def clamp(input: Tensor, min: Optional[Tensor] = None, max: Optional[Tensor] = None, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- clamp(input, min=None, max=None, *, out=None) -> Tensor
-
- Clamps all elements in :attr:`input` into the range `[` :attr:`min`, :attr:`max` `]`.
- Letting min_value and max_value be :attr:`min` and :attr:`max`, respectively, this returns:
-
- .. math::
- y_i = \min(\max(x_i, \text{min\_value}_i), \text{max\_value}_i)
-
- If :attr:`min` is ``None``, there is no lower bound.
- Or, if :attr:`max` is ``None`` there is no upper bound.
-
-
- .. note::
- If :attr:`min` is greater than :attr:`max` :func:`torch.clamp(..., min, max) <torch.clamp>`
- sets all elements in :attr:`input` to the value of :attr:`max`.
-
- Args:
- input (Tensor): the input tensor.
- min (Number or Tensor, optional): lower-bound of the range to be clamped to
- max (Number or Tensor, optional): upper-bound of the range to be clamped to
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(4)
- >>> a
- tensor([-1.7120, 0.1734, -0.0478, -0.0922])
- >>> torch.clamp(a, min=-0.5, max=0.5)
- tensor([-0.5000, 0.1734, -0.0478, -0.0922])
-
- >>> min = torch.linspace(-1, 1, steps=4)
- >>> torch.clamp(a, min=min)
- tensor([-1.0000, 0.1734, 0.3333, 1.0000])
- """
- ...
- @overload
- def clamp(input: Tensor, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- clamp(input, min=None, max=None, *, out=None) -> Tensor
-
- Clamps all elements in :attr:`input` into the range `[` :attr:`min`, :attr:`max` `]`.
- Letting min_value and max_value be :attr:`min` and :attr:`max`, respectively, this returns:
-
- .. math::
- y_i = \min(\max(x_i, \text{min\_value}_i), \text{max\_value}_i)
-
- If :attr:`min` is ``None``, there is no lower bound.
- Or, if :attr:`max` is ``None`` there is no upper bound.
-
-
- .. note::
- If :attr:`min` is greater than :attr:`max` :func:`torch.clamp(..., min, max) <torch.clamp>`
- sets all elements in :attr:`input` to the value of :attr:`max`.
-
- Args:
- input (Tensor): the input tensor.
- min (Number or Tensor, optional): lower-bound of the range to be clamped to
- max (Number or Tensor, optional): upper-bound of the range to be clamped to
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(4)
- >>> a
- tensor([-1.7120, 0.1734, -0.0478, -0.0922])
- >>> torch.clamp(a, min=-0.5, max=0.5)
- tensor([-0.5000, 0.1734, -0.0478, -0.0922])
-
- >>> min = torch.linspace(-1, 1, steps=4)
- >>> torch.clamp(a, min=min)
- tensor([-1.0000, 0.1734, 0.3333, 1.0000])
- """
- ...
- @overload
- def clamp_(input: Tensor, min: Optional[Tensor] = None, max: Optional[Tensor] = None) -> Tensor: ...
- @overload
- def clamp_(input: Tensor, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None) -> Tensor: ...
- @overload
- def clamp_max(input: Tensor, max: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
- @overload
- def clamp_max(input: Tensor, max: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
- @overload
- def clamp_max_(input: Tensor, max: Tensor) -> Tensor: ...
- @overload
- def clamp_max_(input: Tensor, max: Union[Number, _complex]) -> Tensor: ...
- @overload
- def clamp_min(input: Tensor, min: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
- @overload
- def clamp_min(input: Tensor, min: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
- @overload
- def clamp_min_(input: Tensor, min: Tensor) -> Tensor: ...
- @overload
- def clamp_min_(input: Tensor, min: Union[Number, _complex]) -> Tensor: ...
- @overload
- def clip(input: Tensor, min: Optional[Tensor] = None, max: Optional[Tensor] = None, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- clip(input, min=None, max=None, *, out=None) -> Tensor
-
- Alias for :func:`torch.clamp`.
- """
- ...
- @overload
- def clip(input: Tensor, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- clip(input, min=None, max=None, *, out=None) -> Tensor
-
- Alias for :func:`torch.clamp`.
- """
- ...
- @overload
- def clip_(input: Tensor, min: Optional[Tensor] = None, max: Optional[Tensor] = None) -> Tensor: ...
- @overload
- def clip_(input: Tensor, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None) -> Tensor: ...
- def clone(input: Tensor, *, memory_format: Optional[memory_format] = None) -> Tensor:
- r"""
- clone(input, *, memory_format=torch.preserve_format) -> Tensor
-
- Returns a copy of :attr:`input`.
-
- .. note::
-
- This function is differentiable, so gradients will flow back from the
- result of this operation to :attr:`input`. To create a tensor without an
- autograd relationship to :attr:`input` see :meth:`~Tensor.detach`.
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned tensor. Default: ``torch.preserve_format``.
- """
- ...
- def col_indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- Performs the same operation as :func:`torch.col_indices`, but all output tensors
- are freshly created instead of aliasing the input.
- """
- ...
- def column_stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- column_stack(tensors, *, out=None) -> Tensor
-
- Creates a new tensor by horizontally stacking the tensors in :attr:`tensors`.
-
- Equivalent to ``torch.hstack(tensors)``, except each zero or one dimensional tensor ``t``
- in :attr:`tensors` is first reshaped into a ``(t.numel(), 1)`` column before being stacked horizontally.
-
- Args:
- tensors (sequence of Tensors): sequence of tensors to concatenate
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.tensor([1, 2, 3])
- >>> b = torch.tensor([4, 5, 6])
- >>> torch.column_stack((a, b))
- tensor([[1, 4],
- [2, 5],
- [3, 6]])
- >>> a = torch.arange(5)
- >>> b = torch.arange(10).reshape(5, 2)
- >>> torch.column_stack((a, b, b))
- tensor([[0, 0, 1, 0, 1],
- [1, 2, 3, 2, 3],
- [2, 4, 5, 4, 5],
- [3, 6, 7, 6, 7],
- [4, 8, 9, 8, 9]])
- """
- ...
- def combinations(input: Tensor, r: _int = 2, with_replacement: _bool = False) -> Tensor:
- r"""
- combinations(input, r=2, with_replacement=False) -> seq
-
- Compute combinations of length :math:`r` of the given tensor. The behavior is similar to
- python's `itertools.combinations` when `with_replacement` is set to `False`, and
- `itertools.combinations_with_replacement` when `with_replacement` is set to `True`.
-
- Arguments:
- input (Tensor): 1D vector.
- r (int, optional): number of elements to combine
- with_replacement (bool, optional): whether to allow duplication in combination
-
- Returns:
- Tensor: A tensor equivalent to converting all the input tensors into lists, do
- `itertools.combinations` or `itertools.combinations_with_replacement` on these
- lists, and finally convert the resulting list into tensor.
-
- Example::
-
- >>> a = [1, 2, 3]
- >>> list(itertools.combinations(a, r=2))
- [(1, 2), (1, 3), (2, 3)]
- >>> list(itertools.combinations(a, r=3))
- [(1, 2, 3)]
- >>> list(itertools.combinations_with_replacement(a, r=2))
- [(1, 1), (1, 2), (1, 3), (2, 2), (2, 3), (3, 3)]
- >>> tensor_a = torch.tensor(a)
- >>> torch.combinations(tensor_a)
- tensor([[1, 2],
- [1, 3],
- [2, 3]])
- >>> torch.combinations(tensor_a, r=3)
- tensor([[1, 2, 3]])
- >>> torch.combinations(tensor_a, with_replacement=True)
- tensor([[1, 1],
- [1, 2],
- [1, 3],
- [2, 2],
- [2, 3],
- [3, 3]])
- """
- ...
- def complex(real: Tensor, imag: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- complex(real, imag, *, out=None) -> Tensor
-
- Constructs a complex tensor with its real part equal to :attr:`real` and its
- imaginary part equal to :attr:`imag`.
-
- Args:
- real (Tensor): The real part of the complex tensor. Must be half, float or double.
- imag (Tensor): The imaginary part of the complex tensor. Must be same dtype
- as :attr:`real`.
-
- Keyword args:
- out (Tensor): If the inputs are ``torch.float32``, must be
- ``torch.complex64``. If the inputs are ``torch.float64``, must be
- ``torch.complex128``.
-
- Example::
-
- >>> real = torch.tensor([1, 2], dtype=torch.float32)
- >>> imag = torch.tensor([3, 4], dtype=torch.float32)
- >>> z = torch.complex(real, imag)
- >>> z
- tensor([(1.+3.j), (2.+4.j)])
- >>> z.dtype
- torch.complex64
- """
- ...
- @overload
- def concat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int = 0, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- concat(tensors, dim=0, *, out=None) -> Tensor
-
- Alias of :func:`torch.cat`.
- """
- ...
- @overload
- def concat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: Union[str, ellipsis, None], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- concat(tensors, dim=0, *, out=None) -> Tensor
-
- Alias of :func:`torch.cat`.
- """
- ...
- @overload
- def concatenate(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int = 0, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- concatenate(tensors, axis=0, out=None) -> Tensor
-
- Alias of :func:`torch.cat`.
- """
- ...
- @overload
- def concatenate(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: Union[str, ellipsis, None], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- concatenate(tensors, axis=0, out=None) -> Tensor
-
- Alias of :func:`torch.cat`.
- """
- ...
- def conj(input: Tensor) -> Tensor:
- r"""
- conj(input) -> Tensor
-
- Returns a view of :attr:`input` with a flipped conjugate bit. If :attr:`input` has a non-complex dtype,
- this function just returns :attr:`input`.
-
- .. note::
- :func:`torch.conj` performs a lazy conjugation, but the actual conjugated tensor can be materialized
- at any time using :func:`torch.resolve_conj`.
-
- .. warning:: In the future, :func:`torch.conj` may return a non-writeable view for an :attr:`input` of
- non-complex dtype. It's recommended that programs not modify the tensor returned by :func:`torch.conj_physical`
- when :attr:`input` is of non-complex dtype to be compatible with this change.
-
- Args:
- input (Tensor): the input tensor.
-
- Example::
-
- >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])
- >>> x.is_conj()
- False
- >>> y = torch.conj(x)
- >>> y.is_conj()
- True
- """
- ...
- def conj_physical(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- conj_physical(input, *, out=None) -> Tensor
-
- Computes the element-wise conjugate of the given :attr:`input` tensor.
- If :attr:`input` has a non-complex dtype, this function just returns :attr:`input`.
-
- .. note::
- This performs the conjugate operation regardless of the fact conjugate bit is set or not.
-
- .. warning:: In the future, :func:`torch.conj_physical` may return a non-writeable view for an :attr:`input` of
- non-complex dtype. It's recommended that programs not modify the tensor returned by :func:`torch.conj_physical`
- when :attr:`input` is of non-complex dtype to be compatible with this change.
-
- .. math::
- \text{out}_{i} = conj(\text{input}_{i})
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.conj_physical(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))
- tensor([-1 - 1j, -2 - 2j, 3 + 3j])
- """
- ...
- def conj_physical_(input: Tensor) -> Tensor: ...
- def constant_pad_nd(input: Tensor, pad: Sequence[Union[_int, SymInt]], value: Union[Number, _complex] = 0) -> Tensor: ...
- @overload
- def conv1d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ...
- @overload
- def conv1d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: str = "valid", dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ...
- @overload
- def conv2d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ...
- @overload
- def conv2d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: str = "valid", dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ...
- @overload
- def conv3d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ...
- @overload
- def conv3d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: str = "valid", dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ...
- def conv_tbc(input: Tensor, weight: Tensor, bias: Tensor, pad: _int = 0) -> Tensor: ...
- def conv_transpose1d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, output_padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, groups: Union[_int, SymInt] = 1, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1) -> Tensor: ...
- def conv_transpose2d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, output_padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, groups: Union[_int, SymInt] = 1, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1) -> Tensor: ...
- def conv_transpose3d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, output_padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, groups: Union[_int, SymInt] = 1, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1) -> Tensor: ...
- def convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], transposed: _bool, output_padding: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
- @overload
- def copysign(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- copysign(input, other, *, out=None) -> Tensor
-
- Create a new floating-point tensor with the magnitude of :attr:`input` and the sign of :attr:`other`, elementwise.
-
- .. math::
- \text{out}_{i} = \begin{cases}
- -|\text{input}_{i}| & \text{if } \text{other}_{i} \leq -0.0 \\
- |\text{input}_{i}| & \text{if } \text{other}_{i} \geq 0.0 \\
- \end{cases}
-
-
- Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
- and integer and float inputs.
-
- Args:
- input (Tensor): magnitudes.
- other (Tensor or Number): contains value(s) whose signbit(s) are
- applied to the magnitudes in :attr:`input`.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(5)
- >>> a
- tensor([-1.2557, -0.0026, -0.5387, 0.4740, -0.9244])
- >>> torch.copysign(a, 1)
- tensor([1.2557, 0.0026, 0.5387, 0.4740, 0.9244])
- >>> a = torch.randn(4, 4)
- >>> a
- tensor([[ 0.7079, 0.2778, -1.0249, 0.5719],
- [-0.0059, -0.2600, -0.4475, -1.3948],
- [ 0.3667, -0.9567, -2.5757, -0.1751],
- [ 0.2046, -0.0742, 0.2998, -0.1054]])
- >>> b = torch.randn(4)
- tensor([ 0.2373, 0.3120, 0.3190, -1.1128])
- >>> torch.copysign(a, b)
- tensor([[ 0.7079, 0.2778, 1.0249, -0.5719],
- [ 0.0059, 0.2600, 0.4475, -1.3948],
- [ 0.3667, 0.9567, 2.5757, -0.1751],
- [ 0.2046, 0.0742, 0.2998, -0.1054]])
- >>> a = torch.tensor([1.])
- >>> b = torch.tensor([-0.])
- >>> torch.copysign(a, b)
- tensor([-1.])
-
- .. note::
- copysign handles signed zeros. If the other argument has a negative zero (-0),
- the corresponding output value will be negative.
- """
- ...
- @overload
- def copysign(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- copysign(input, other, *, out=None) -> Tensor
-
- Create a new floating-point tensor with the magnitude of :attr:`input` and the sign of :attr:`other`, elementwise.
-
- .. math::
- \text{out}_{i} = \begin{cases}
- -|\text{input}_{i}| & \text{if } \text{other}_{i} \leq -0.0 \\
- |\text{input}_{i}| & \text{if } \text{other}_{i} \geq 0.0 \\
- \end{cases}
-
-
- Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
- and integer and float inputs.
-
- Args:
- input (Tensor): magnitudes.
- other (Tensor or Number): contains value(s) whose signbit(s) are
- applied to the magnitudes in :attr:`input`.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(5)
- >>> a
- tensor([-1.2557, -0.0026, -0.5387, 0.4740, -0.9244])
- >>> torch.copysign(a, 1)
- tensor([1.2557, 0.0026, 0.5387, 0.4740, 0.9244])
- >>> a = torch.randn(4, 4)
- >>> a
- tensor([[ 0.7079, 0.2778, -1.0249, 0.5719],
- [-0.0059, -0.2600, -0.4475, -1.3948],
- [ 0.3667, -0.9567, -2.5757, -0.1751],
- [ 0.2046, -0.0742, 0.2998, -0.1054]])
- >>> b = torch.randn(4)
- tensor([ 0.2373, 0.3120, 0.3190, -1.1128])
- >>> torch.copysign(a, b)
- tensor([[ 0.7079, 0.2778, 1.0249, -0.5719],
- [ 0.0059, 0.2600, 0.4475, -1.3948],
- [ 0.3667, 0.9567, 2.5757, -0.1751],
- [ 0.2046, 0.0742, 0.2998, -0.1054]])
- >>> a = torch.tensor([1.])
- >>> b = torch.tensor([-0.])
- >>> torch.copysign(a, b)
- tensor([-1.])
-
- .. note::
- copysign handles signed zeros. If the other argument has a negative zero (-0),
- the corresponding output value will be negative.
- """
- ...
- def corrcoef(input: Tensor) -> Tensor:
- r"""
- corrcoef(input) -> Tensor
-
- Estimates the Pearson product-moment correlation coefficient matrix of the variables given by the :attr:`input` matrix,
- where rows are the variables and columns are the observations.
-
- .. note::
-
- The correlation coefficient matrix R is computed using the covariance matrix C as given by
- :math:`R_{ij} = \frac{ C_{ij} } { \sqrt{ C_{ii} * C_{jj} } }`
-
- .. note::
-
- Due to floating point rounding, the resulting array may not be Hermitian and its diagonal elements may not be 1.
- The real and imaginary values are clipped to the interval [-1, 1] in an attempt to improve this situation.
-
- Args:
- input (Tensor): A 2D matrix containing multiple variables and observations, or a
- Scalar or 1D vector representing a single variable.
-
- Returns:
- (Tensor) The correlation coefficient matrix of the variables.
-
- .. seealso::
-
- :func:`torch.cov` covariance matrix.
-
- Example::
-
- >>> x = torch.tensor([[0, 1, 2], [2, 1, 0]])
- >>> torch.corrcoef(x)
- tensor([[ 1., -1.],
- [-1., 1.]])
- >>> x = torch.randn(2, 4)
- >>> x
- tensor([[-0.2678, -0.0908, -0.3766, 0.2780],
- [-0.5812, 0.1535, 0.2387, 0.2350]])
- >>> torch.corrcoef(x)
- tensor([[1.0000, 0.3582],
- [0.3582, 1.0000]])
- >>> torch.corrcoef(x[0])
- tensor(1.)
- """
- ...
- def cos(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- cos(input, *, out=None) -> Tensor
-
- Returns a new tensor with the cosine of the elements of :attr:`input`.
-
- .. math::
- \text{out}_{i} = \cos(\text{input}_{i})
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(4)
- >>> a
- tensor([ 1.4309, 1.2706, -0.8562, 0.9796])
- >>> torch.cos(a)
- tensor([ 0.1395, 0.2957, 0.6553, 0.5574])
- """
- ...
- def cos_(input: Tensor) -> Tensor: ...
- def cosh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- cosh(input, *, out=None) -> Tensor
-
- Returns a new tensor with the hyperbolic cosine of the elements of
- :attr:`input`.
-
- .. math::
- \text{out}_{i} = \cosh(\text{input}_{i})
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(4)
- >>> a
- tensor([ 0.1632, 1.1835, -0.6979, -0.7325])
- >>> torch.cosh(a)
- tensor([ 1.0133, 1.7860, 1.2536, 1.2805])
-
- .. note::
- When :attr:`input` is on the CPU, the implementation of torch.cosh may use
- the Sleef library, which rounds very large results to infinity or negative
- infinity. See `here <https://sleef.org/purec.xhtml>`_ for details.
- """
- ...
- def cosh_(input: Tensor) -> Tensor: ...
- def cosine_embedding_loss(input1: Tensor, input2: Tensor, target: Tensor, margin: _float = 0.0, reduction: _int = 1) -> Tensor: ...
- def cosine_similarity(x1: Tensor, x2: Tensor, dim: _int = 1, eps: _float = 1e-08) -> Tensor: ...
- @overload
- def count_nonzero(input: Tensor, dim: Optional[_int] = None) -> Tensor:
- r"""
- count_nonzero(input, dim=None) -> Tensor
-
- Counts the number of non-zero values in the tensor :attr:`input` along the given :attr:`dim`.
- If no dim is specified then all non-zeros in the tensor are counted.
-
- Args:
- input (Tensor): the input tensor.
- dim (int or tuple of ints, optional): Dim or tuple of dims along which to count non-zeros.
-
- Example::
-
- >>> x = torch.zeros(3,3)
- >>> x[torch.randn(3,3) > 0.5] = 1
- >>> x
- tensor([[0., 1., 1.],
- [0., 0., 0.],
- [0., 0., 1.]])
- >>> torch.count_nonzero(x)
- tensor(3)
- >>> torch.count_nonzero(x, dim=0)
- tensor([0, 1, 2])
- """
- ...
- @overload
- def count_nonzero(input: Tensor, dim: _size) -> Tensor:
- r"""
- count_nonzero(input, dim=None) -> Tensor
-
- Counts the number of non-zero values in the tensor :attr:`input` along the given :attr:`dim`.
- If no dim is specified then all non-zeros in the tensor are counted.
-
- Args:
- input (Tensor): the input tensor.
- dim (int or tuple of ints, optional): Dim or tuple of dims along which to count non-zeros.
-
- Example::
-
- >>> x = torch.zeros(3,3)
- >>> x[torch.randn(3,3) > 0.5] = 1
- >>> x
- tensor([[0., 1., 1.],
- [0., 0., 0.],
- [0., 0., 1.]])
- >>> torch.count_nonzero(x)
- tensor(3)
- >>> torch.count_nonzero(x, dim=0)
- tensor([0, 1, 2])
- """
- ...
- def cov(input: Tensor, *, correction: _int = 1, fweights: Optional[Tensor] = None, aweights: Optional[Tensor] = None) -> Tensor:
- r"""
- cov(input, *, correction=1, fweights=None, aweights=None) -> Tensor
-
- Estimates the covariance matrix of the variables given by the :attr:`input` matrix, where rows are
- the variables and columns are the observations.
-
- A covariance matrix is a square matrix giving the covariance of each pair of variables. The diagonal contains
- the variance of each variable (covariance of a variable with itself). By definition, if :attr:`input` represents
- a single variable (Scalar or 1D) then its variance is returned.
-
- The sample covariance of the variables :math:`x` and :math:`y` is given by:
-
- .. math::
- \text{cov}(x,y) = \frac{\sum^{N}_{i = 1}(x_{i} - \bar{x})(y_{i} - \bar{y})}{\max(0,~N~-~\delta N)}
-
- where :math:`\bar{x}` and :math:`\bar{y}` are the simple means of the :math:`x` and :math:`y` respectively, and
- :math:`\delta N` is the :attr:`correction`.
-
- If :attr:`fweights` and/or :attr:`aweights` are provided, the weighted covariance
- is calculated, which is given by:
-
- .. math::
- \text{cov}_w(x,y) = \frac{\sum^{N}_{i = 1}w_i(x_{i} - \mu_x^*)(y_{i} - \mu_y^*)}
- {\max(0,~\sum^{N}_{i = 1}w_i~-~\frac{\sum^{N}_{i = 1}w_ia_i}{\sum^{N}_{i = 1}w_i}~\delta N)}
-
- where :math:`w` denotes :attr:`fweights` or :attr:`aweights` (``f`` and ``a`` for brevity) based on whichever is
- provided, or :math:`w = f \times a` if both are provided, and
- :math:`\mu_x^* = \frac{\sum^{N}_{i = 1}w_ix_{i} }{\sum^{N}_{i = 1}w_i}` is the weighted mean of the variable. If not
- provided, ``f`` and/or ``a`` can be seen as a :math:`\mathbb{1}` vector of appropriate size.
-
- Args:
- input (Tensor): A 2D matrix containing multiple variables and observations, or a
- Scalar or 1D vector representing a single variable.
-
- Keyword Args:
- correction (int, optional): difference between the sample size and sample degrees of freedom.
- Defaults to Bessel's correction, ``correction = 1`` which returns the unbiased estimate,
- even if both :attr:`fweights` and :attr:`aweights` are specified. ``correction = 0``
- will return the simple average. Defaults to ``1``.
- fweights (tensor, optional): A Scalar or 1D tensor of observation vector frequencies representing the number of
- times each observation should be repeated. Its numel must equal the number of columns of :attr:`input`.
- Must have integral dtype. Ignored if ``None``. Defaults to ``None``.
- aweights (tensor, optional): A Scalar or 1D array of observation vector weights.
- These relative weights are typically large for observations considered "important" and smaller for
- observations considered less "important". Its numel must equal the number of columns of :attr:`input`.
- Must have floating point dtype. Ignored if ``None``. Defaults to ``None``.
-
- Returns:
- (Tensor) The covariance matrix of the variables.
-
- .. seealso::
-
- :func:`torch.corrcoef` normalized covariance matrix.
-
- Example::
- >>> x = torch.tensor([[0, 2], [1, 1], [2, 0]]).T
- >>> x
- tensor([[0, 1, 2],
- [2, 1, 0]])
- >>> torch.cov(x)
- tensor([[ 1., -1.],
- [-1., 1.]])
- >>> torch.cov(x, correction=0)
- tensor([[ 0.6667, -0.6667],
- [-0.6667, 0.6667]])
- >>> fw = torch.randint(1, 10, (3,))
- >>> fw
- tensor([1, 6, 9])
- >>> aw = torch.rand(3)
- >>> aw
- tensor([0.4282, 0.0255, 0.4144])
- >>> torch.cov(x, fweights=fw, aweights=aw)
- tensor([[ 0.4169, -0.4169],
- [-0.4169, 0.4169]])
- """
- ...
- def cross(input: Tensor, other: Tensor, dim: Optional[_int] = None, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- cross(input, other, dim=None, *, out=None) -> Tensor
-
-
- Returns the cross product of vectors in dimension :attr:`dim` of :attr:`input`
- and :attr:`other`.
-
- Supports input of float, double, cfloat and cdouble dtypes. Also supports batches
- of vectors, for which it computes the product along the dimension :attr:`dim`.
- In this case, the output has the same batch dimensions as the inputs.
-
- .. warning::
- If :attr:`dim` is not given, it defaults to the first dimension found
- with the size 3. Note that this might be unexpected.
-
- This behavior is deprecated and will be changed to match that of :func:`torch.linalg.cross`
- in a future release.
-
- .. seealso::
- :func:`torch.linalg.cross` which has dim=-1 as default.
-
-
- Args:
- input (Tensor): the input tensor.
- other (Tensor): the second input tensor
- dim (int, optional): the dimension to take the cross-product in.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(4, 3)
- >>> a
- tensor([[-0.3956, 1.1455, 1.6895],
- [-0.5849, 1.3672, 0.3599],
- [-1.1626, 0.7180, -0.0521],
- [-0.1339, 0.9902, -2.0225]])
- >>> b = torch.randn(4, 3)
- >>> b
- tensor([[-0.0257, -1.4725, -1.2251],
- [-1.1479, -0.7005, -1.9757],
- [-1.3904, 0.3726, -1.1836],
- [-0.9688, -0.7153, 0.2159]])
- >>> torch.cross(a, b, dim=1)
- tensor([[ 1.0844, -0.5281, 0.6120],
- [-2.4490, -1.5687, 1.9792],
- [-0.8304, -1.3037, 0.5650],
- [-1.2329, 1.9883, 1.0551]])
- >>> torch.cross(a, b)
- tensor([[ 1.0844, -0.5281, 0.6120],
- [-2.4490, -1.5687, 1.9792],
- [-0.8304, -1.3037, 0.5650],
- [-1.2329, 1.9883, 1.0551]])
- """
- ...
- def crow_indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- Performs the same operation as :func:`torch.crow_indices`, but all output tensors
- are freshly created instead of aliasing the input.
- """
- ...
- @overload
- def ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int = 0, reduction: _int = 1, zero_infinity: _bool = False) -> Tensor: ...
- @overload
- def ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int = 0, reduction: _int = 1, zero_infinity: _bool = False) -> Tensor: ...
- def cudnn_affine_grid_generator(theta: Tensor, N: _int, C: _int, H: _int, W: _int) -> Tensor: ...
- def cudnn_batch_norm(input: Tensor, weight: Tensor, bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, exponential_average_factor: _float, epsilon: _float) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
- def cudnn_convolution(input: Tensor, weight: Tensor, padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool, allow_tf32: _bool, *, out: Optional[Tensor] = None) -> Tensor: ...
- def cudnn_convolution_add_relu(input: Tensor, weight: Tensor, z: Tensor, alpha: Optional[Union[Number, _complex]], bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
- def cudnn_convolution_relu(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
- def cudnn_convolution_transpose(input: Tensor, weight: Tensor, padding: Sequence[Union[_int, SymInt]], output_padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool, allow_tf32: _bool) -> Tensor: ...
- def cudnn_grid_sampler(input: Tensor, grid: Tensor) -> Tensor: ...
- def cudnn_is_acceptable(input: Tensor) -> _bool: ...
- @overload
- def cummax(input: Tensor, dim: _int, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.cummax:
- r"""
- cummax(input, dim, *, out=None) -> (Tensor, LongTensor)
- Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative maximum of
- elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index
- location of each maximum value found in the dimension :attr:`dim`.
-
- .. math::
- y_i = max(x_1, x_2, x_3, \dots, x_i)
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the dimension to do the operation over
-
- Keyword args:
- out (tuple, optional): the result tuple of two output tensors (values, indices)
-
- Example::
-
- >>> a = torch.randn(10)
- >>> a
- tensor([-0.3449, -1.5447, 0.0685, -1.5104, -1.1706, 0.2259, 1.4696, -1.3284,
- 1.9946, -0.8209])
- >>> torch.cummax(a, dim=0)
- torch.return_types.cummax(
- values=tensor([-0.3449, -0.3449, 0.0685, 0.0685, 0.0685, 0.2259, 1.4696, 1.4696,
- 1.9946, 1.9946]),
- indices=tensor([0, 0, 2, 2, 2, 5, 6, 6, 8, 8]))
- """
- ...
- @overload
- def cummax(input: Tensor, dim: Union[str, ellipsis, None], *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.cummax:
- r"""
- cummax(input, dim, *, out=None) -> (Tensor, LongTensor)
- Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative maximum of
- elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index
- location of each maximum value found in the dimension :attr:`dim`.
-
- .. math::
- y_i = max(x_1, x_2, x_3, \dots, x_i)
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the dimension to do the operation over
-
- Keyword args:
- out (tuple, optional): the result tuple of two output tensors (values, indices)
-
- Example::
-
- >>> a = torch.randn(10)
- >>> a
- tensor([-0.3449, -1.5447, 0.0685, -1.5104, -1.1706, 0.2259, 1.4696, -1.3284,
- 1.9946, -0.8209])
- >>> torch.cummax(a, dim=0)
- torch.return_types.cummax(
- values=tensor([-0.3449, -0.3449, 0.0685, 0.0685, 0.0685, 0.2259, 1.4696, 1.4696,
- 1.9946, 1.9946]),
- indices=tensor([0, 0, 2, 2, 2, 5, 6, 6, 8, 8]))
- """
- ...
- @overload
- def cummin(input: Tensor, dim: _int, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.cummin:
- r"""
- cummin(input, dim, *, out=None) -> (Tensor, LongTensor)
- Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative minimum of
- elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index
- location of each maximum value found in the dimension :attr:`dim`.
-
- .. math::
- y_i = min(x_1, x_2, x_3, \dots, x_i)
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the dimension to do the operation over
-
- Keyword args:
- out (tuple, optional): the result tuple of two output tensors (values, indices)
-
- Example::
-
- >>> a = torch.randn(10)
- >>> a
- tensor([-0.2284, -0.6628, 0.0975, 0.2680, -1.3298, -0.4220, -0.3885, 1.1762,
- 0.9165, 1.6684])
- >>> torch.cummin(a, dim=0)
- torch.return_types.cummin(
- values=tensor([-0.2284, -0.6628, -0.6628, -0.6628, -1.3298, -1.3298, -1.3298, -1.3298,
- -1.3298, -1.3298]),
- indices=tensor([0, 1, 1, 1, 4, 4, 4, 4, 4, 4]))
- """
- ...
- @overload
- def cummin(input: Tensor, dim: Union[str, ellipsis, None], *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.cummin:
- r"""
- cummin(input, dim, *, out=None) -> (Tensor, LongTensor)
- Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative minimum of
- elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index
- location of each maximum value found in the dimension :attr:`dim`.
-
- .. math::
- y_i = min(x_1, x_2, x_3, \dots, x_i)
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the dimension to do the operation over
-
- Keyword args:
- out (tuple, optional): the result tuple of two output tensors (values, indices)
-
- Example::
-
- >>> a = torch.randn(10)
- >>> a
- tensor([-0.2284, -0.6628, 0.0975, 0.2680, -1.3298, -0.4220, -0.3885, 1.1762,
- 0.9165, 1.6684])
- >>> torch.cummin(a, dim=0)
- torch.return_types.cummin(
- values=tensor([-0.2284, -0.6628, -0.6628, -0.6628, -1.3298, -1.3298, -1.3298, -1.3298,
- -1.3298, -1.3298]),
- indices=tensor([0, 1, 1, 1, 4, 4, 4, 4, 4, 4]))
- """
- ...
- @overload
- def cumprod(input: Tensor, dim: _int, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
- r"""
- cumprod(input, dim, *, dtype=None, out=None) -> Tensor
-
- Returns the cumulative product of elements of :attr:`input` in the dimension
- :attr:`dim`.
-
- For example, if :attr:`input` is a vector of size N, the result will also be
- a vector of size N, with elements.
-
- .. math::
- y_i = x_1 \times x_2\times x_3\times \dots \times x_i
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the dimension to do the operation over
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- If specified, the input tensor is casted to :attr:`dtype` before the operation
- is performed. This is useful for preventing data type overflows. Default: None.
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(10)
- >>> a
- tensor([ 0.6001, 0.2069, -0.1919, 0.9792, 0.6727, 1.0062, 0.4126,
- -0.2129, -0.4206, 0.1968])
- >>> torch.cumprod(a, dim=0)
- tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0158, -0.0065,
- 0.0014, -0.0006, -0.0001])
-
- >>> a[5] = 0.0
- >>> torch.cumprod(a, dim=0)
- tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0000, -0.0000,
- 0.0000, -0.0000, -0.0000])
- """
- ...
- @overload
- def cumprod(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
- r"""
- cumprod(input, dim, *, dtype=None, out=None) -> Tensor
-
- Returns the cumulative product of elements of :attr:`input` in the dimension
- :attr:`dim`.
-
- For example, if :attr:`input` is a vector of size N, the result will also be
- a vector of size N, with elements.
-
- .. math::
- y_i = x_1 \times x_2\times x_3\times \dots \times x_i
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the dimension to do the operation over
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- If specified, the input tensor is casted to :attr:`dtype` before the operation
- is performed. This is useful for preventing data type overflows. Default: None.
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(10)
- >>> a
- tensor([ 0.6001, 0.2069, -0.1919, 0.9792, 0.6727, 1.0062, 0.4126,
- -0.2129, -0.4206, 0.1968])
- >>> torch.cumprod(a, dim=0)
- tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0158, -0.0065,
- 0.0014, -0.0006, -0.0001])
-
- >>> a[5] = 0.0
- >>> torch.cumprod(a, dim=0)
- tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0000, -0.0000,
- 0.0000, -0.0000, -0.0000])
- """
- ...
- @overload
- def cumsum(input: Tensor, dim: _int, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
- r"""
- cumsum(input, dim, *, dtype=None, out=None) -> Tensor
-
- Returns the cumulative sum of elements of :attr:`input` in the dimension
- :attr:`dim`.
-
- For example, if :attr:`input` is a vector of size N, the result will also be
- a vector of size N, with elements.
-
- .. math::
- y_i = x_1 + x_2 + x_3 + \dots + x_i
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the dimension to do the operation over
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- If specified, the input tensor is casted to :attr:`dtype` before the operation
- is performed. This is useful for preventing data type overflows. Default: None.
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randint(1, 20, (10,))
- >>> a
- tensor([13, 7, 3, 10, 13, 3, 15, 10, 9, 10])
- >>> torch.cumsum(a, dim=0)
- tensor([13, 20, 23, 33, 46, 49, 64, 74, 83, 93])
- """
- ...
- @overload
- def cumsum(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
- r"""
- cumsum(input, dim, *, dtype=None, out=None) -> Tensor
-
- Returns the cumulative sum of elements of :attr:`input` in the dimension
- :attr:`dim`.
-
- For example, if :attr:`input` is a vector of size N, the result will also be
- a vector of size N, with elements.
-
- .. math::
- y_i = x_1 + x_2 + x_3 + \dots + x_i
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the dimension to do the operation over
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- If specified, the input tensor is casted to :attr:`dtype` before the operation
- is performed. This is useful for preventing data type overflows. Default: None.
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randint(1, 20, (10,))
- >>> a
- tensor([13, 7, 3, 10, 13, 3, 15, 10, 9, 10])
- >>> torch.cumsum(a, dim=0)
- tensor([13, 20, 23, 33, 46, 49, 64, 74, 83, 93])
- """
- ...
- @overload
- def cumulative_trapezoid(y: Tensor, x: Tensor, *, dim: _int = -1) -> Tensor:
- r"""
- cumulative_trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor
-
- Cumulatively computes the `trapezoidal rule <https://en.wikipedia.org/wiki/Trapezoidal_rule>`_
- along :attr:`dim`. By default the spacing between elements is assumed to be 1, but
- :attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be
- used to specify arbitrary spacing along :attr:`dim`.
-
- For more details, please read :func:`torch.trapezoid`. The difference between :func:`torch.trapezoid`
- and this function is that, :func:`torch.trapezoid` returns a value for each integration,
- where as this function returns a cumulative value for every spacing within the integration. This
- is analogous to how `.sum` returns a value and `.cumsum` returns a cumulative sum.
-
- Arguments:
- y (Tensor): Values to use when computing the trapezoidal rule.
- x (Tensor): If specified, defines spacing between values as specified above.
-
- Keyword arguments:
- dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx`
- are specified then this defaults to 1. Effectively multiplies the result by its value.
- dim (int): The dimension along which to compute the trapezoidal rule.
- The last (inner-most) dimension by default.
-
- Examples::
-
- >>> # Cumulatively computes the trapezoidal rule in 1D, spacing is implicitly 1.
- >>> y = torch.tensor([1, 5, 10])
- >>> torch.cumulative_trapezoid(y)
- tensor([3., 10.5])
-
- >>> # Computes the same trapezoidal rule directly up to each element to verify
- >>> (1 + 5) / 2
- 3.0
- >>> (1 + 10 + 10) / 2
- 10.5
-
- >>> # Cumulatively computes the trapezoidal rule in 1D with constant spacing of 2
- >>> # NOTE: the result is the same as before, but multiplied by 2
- >>> torch.cumulative_trapezoid(y, dx=2)
- tensor([6., 21.])
-
- >>> # Cumulatively computes the trapezoidal rule in 1D with arbitrary spacing
- >>> x = torch.tensor([1, 3, 6])
- >>> torch.cumulative_trapezoid(y, x)
- tensor([6., 28.5])
-
- >>> # Computes the same trapezoidal rule directly up to each element to verify
- >>> ((3 - 1) * (1 + 5)) / 2
- 6.0
- >>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2
- 28.5
-
- >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 matrix
- >>> y = torch.arange(9).reshape(3, 3)
- tensor([[0, 1, 2],
- [3, 4, 5],
- [6, 7, 8]])
- >>> torch.cumulative_trapezoid(y)
- tensor([[ 0.5, 2.],
- [ 3.5, 8.],
- [ 6.5, 14.]])
-
- >>> # Cumulatively computes the trapezoidal rule for each column of the matrix
- >>> torch.cumulative_trapezoid(y, dim=0)
- tensor([[ 1.5, 2.5, 3.5],
- [ 6.0, 8.0, 10.0]])
-
- >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix
- >>> # with the same arbitrary spacing
- >>> y = torch.ones(3, 3)
- >>> x = torch.tensor([1, 3, 6])
- >>> torch.cumulative_trapezoid(y, x)
- tensor([[2., 5.],
- [2., 5.],
- [2., 5.]])
-
- >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix
- >>> # with different arbitrary spacing per row
- >>> y = torch.ones(3, 3)
- >>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]])
- >>> torch.cumulative_trapezoid(y, x)
- tensor([[1., 2.],
- [2., 4.],
- [3., 6.]])
- """
- ...
- @overload
- def cumulative_trapezoid(y: Tensor, *, dx: Union[Number, _complex] = 1, dim: _int = -1) -> Tensor:
- r"""
- cumulative_trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor
-
- Cumulatively computes the `trapezoidal rule <https://en.wikipedia.org/wiki/Trapezoidal_rule>`_
- along :attr:`dim`. By default the spacing between elements is assumed to be 1, but
- :attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be
- used to specify arbitrary spacing along :attr:`dim`.
-
- For more details, please read :func:`torch.trapezoid`. The difference between :func:`torch.trapezoid`
- and this function is that, :func:`torch.trapezoid` returns a value for each integration,
- where as this function returns a cumulative value for every spacing within the integration. This
- is analogous to how `.sum` returns a value and `.cumsum` returns a cumulative sum.
-
- Arguments:
- y (Tensor): Values to use when computing the trapezoidal rule.
- x (Tensor): If specified, defines spacing between values as specified above.
-
- Keyword arguments:
- dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx`
- are specified then this defaults to 1. Effectively multiplies the result by its value.
- dim (int): The dimension along which to compute the trapezoidal rule.
- The last (inner-most) dimension by default.
-
- Examples::
-
- >>> # Cumulatively computes the trapezoidal rule in 1D, spacing is implicitly 1.
- >>> y = torch.tensor([1, 5, 10])
- >>> torch.cumulative_trapezoid(y)
- tensor([3., 10.5])
-
- >>> # Computes the same trapezoidal rule directly up to each element to verify
- >>> (1 + 5) / 2
- 3.0
- >>> (1 + 10 + 10) / 2
- 10.5
-
- >>> # Cumulatively computes the trapezoidal rule in 1D with constant spacing of 2
- >>> # NOTE: the result is the same as before, but multiplied by 2
- >>> torch.cumulative_trapezoid(y, dx=2)
- tensor([6., 21.])
-
- >>> # Cumulatively computes the trapezoidal rule in 1D with arbitrary spacing
- >>> x = torch.tensor([1, 3, 6])
- >>> torch.cumulative_trapezoid(y, x)
- tensor([6., 28.5])
-
- >>> # Computes the same trapezoidal rule directly up to each element to verify
- >>> ((3 - 1) * (1 + 5)) / 2
- 6.0
- >>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2
- 28.5
-
- >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 matrix
- >>> y = torch.arange(9).reshape(3, 3)
- tensor([[0, 1, 2],
- [3, 4, 5],
- [6, 7, 8]])
- >>> torch.cumulative_trapezoid(y)
- tensor([[ 0.5, 2.],
- [ 3.5, 8.],
- [ 6.5, 14.]])
-
- >>> # Cumulatively computes the trapezoidal rule for each column of the matrix
- >>> torch.cumulative_trapezoid(y, dim=0)
- tensor([[ 1.5, 2.5, 3.5],
- [ 6.0, 8.0, 10.0]])
-
- >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix
- >>> # with the same arbitrary spacing
- >>> y = torch.ones(3, 3)
- >>> x = torch.tensor([1, 3, 6])
- >>> torch.cumulative_trapezoid(y, x)
- tensor([[2., 5.],
- [2., 5.],
- [2., 5.]])
-
- >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix
- >>> # with different arbitrary spacing per row
- >>> y = torch.ones(3, 3)
- >>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]])
- >>> torch.cumulative_trapezoid(y, x)
- tensor([[1., 2.],
- [2., 4.],
- [3., 6.]])
- """
- ...
- def deg2rad(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- deg2rad(input, *, out=None) -> Tensor
-
- Returns a new tensor with each of the elements of :attr:`input`
- converted from angles in degrees to radians.
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword arguments:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.tensor([[180.0, -180.0], [360.0, -360.0], [90.0, -90.0]])
- >>> torch.deg2rad(a)
- tensor([[ 3.1416, -3.1416],
- [ 6.2832, -6.2832],
- [ 1.5708, -1.5708]])
- """
- ...
- def deg2rad_(input: Tensor) -> Tensor: ...
- @overload
- def dequantize(input: Tensor) -> Tensor:
- r"""
- dequantize(tensor) -> Tensor
-
- Returns an fp32 Tensor by dequantizing a quantized Tensor
-
- Args:
- tensor (Tensor): A quantized Tensor
-
- .. function:: dequantize(tensors) -> sequence of Tensors
- :noindex:
-
- Given a list of quantized Tensors, dequantize them and return a list of fp32 Tensors
-
- Args:
- tensors (sequence of Tensors): A list of quantized Tensors
- """
- ...
- @overload
- def dequantize(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]:
- r"""
- dequantize(tensor) -> Tensor
-
- Returns an fp32 Tensor by dequantizing a quantized Tensor
-
- Args:
- tensor (Tensor): A quantized Tensor
-
- .. function:: dequantize(tensors) -> sequence of Tensors
- :noindex:
-
- Given a list of quantized Tensors, dequantize them and return a list of fp32 Tensors
-
- Args:
- tensors (sequence of Tensors): A list of quantized Tensors
- """
- ...
- def det(input: Tensor) -> Tensor:
- r"""
- det(input) -> Tensor
-
- Alias for :func:`torch.linalg.det`
- """
- ...
- def detach(input: Tensor) -> Tensor: ...
- def detach_(input: Tensor) -> Tensor: ...
- def detach_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- Performs the same operation as :func:`torch.detach`, but all output tensors
- are freshly created instead of aliasing the input.
- """
- ...
- def diag(input: Tensor, diagonal: _int = 0, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- diag(input, diagonal=0, *, out=None) -> Tensor
-
- - If :attr:`input` is a vector (1-D tensor), then returns a 2-D square tensor
- with the elements of :attr:`input` as the diagonal.
- - If :attr:`input` is a matrix (2-D tensor), then returns a 1-D tensor with
- the diagonal elements of :attr:`input`.
-
- The argument :attr:`diagonal` controls which diagonal to consider:
-
- - If :attr:`diagonal` = 0, it is the main diagonal.
- - If :attr:`diagonal` > 0, it is above the main diagonal.
- - If :attr:`diagonal` < 0, it is below the main diagonal.
-
- Args:
- input (Tensor): the input tensor.
- diagonal (int, optional): the diagonal to consider
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- .. seealso::
-
- :func:`torch.diagonal` always returns the diagonal of its input.
-
- :func:`torch.diagflat` always constructs a tensor with diagonal elements
- specified by the input.
-
- Examples:
-
- Get the square matrix where the input vector is the diagonal::
-
- >>> a = torch.randn(3)
- >>> a
- tensor([ 0.5950,-0.0872, 2.3298])
- >>> torch.diag(a)
- tensor([[ 0.5950, 0.0000, 0.0000],
- [ 0.0000,-0.0872, 0.0000],
- [ 0.0000, 0.0000, 2.3298]])
- >>> torch.diag(a, 1)
- tensor([[ 0.0000, 0.5950, 0.0000, 0.0000],
- [ 0.0000, 0.0000,-0.0872, 0.0000],
- [ 0.0000, 0.0000, 0.0000, 2.3298],
- [ 0.0000, 0.0000, 0.0000, 0.0000]])
-
- Get the k-th diagonal of a given matrix::
-
- >>> a = torch.randn(3, 3)
- >>> a
- tensor([[-0.4264, 0.0255,-0.1064],
- [ 0.8795,-0.2429, 0.1374],
- [ 0.1029,-0.6482,-1.6300]])
- >>> torch.diag(a, 0)
- tensor([-0.4264,-0.2429,-1.6300])
- >>> torch.diag(a, 1)
- tensor([ 0.0255, 0.1374])
- """
- ...
- def diag_embed(input: Tensor, offset: _int = 0, dim1: _int = -2, dim2: _int = -1) -> Tensor:
- r"""
- diag_embed(input, offset=0, dim1=-2, dim2=-1) -> Tensor
-
- Creates a tensor whose diagonals of certain 2D planes (specified by
- :attr:`dim1` and :attr:`dim2`) are filled by :attr:`input`.
- To facilitate creating batched diagonal matrices, the 2D planes formed by
- the last two dimensions of the returned tensor are chosen by default.
-
- The argument :attr:`offset` controls which diagonal to consider:
-
- - If :attr:`offset` = 0, it is the main diagonal.
- - If :attr:`offset` > 0, it is above the main diagonal.
- - If :attr:`offset` < 0, it is below the main diagonal.
-
- The size of the new matrix will be calculated to make the specified diagonal
- of the size of the last input dimension.
- Note that for :attr:`offset` other than :math:`0`, the order of :attr:`dim1`
- and :attr:`dim2` matters. Exchanging them is equivalent to changing the
- sign of :attr:`offset`.
-
- Applying :meth:`torch.diagonal` to the output of this function with
- the same arguments yields a matrix identical to input. However,
- :meth:`torch.diagonal` has different default dimensions, so those
- need to be explicitly specified.
-
- Args:
- input (Tensor): the input tensor. Must be at least 1-dimensional.
- offset (int, optional): which diagonal to consider. Default: 0
- (main diagonal).
- dim1 (int, optional): first dimension with respect to which to
- take diagonal. Default: -2.
- dim2 (int, optional): second dimension with respect to which to
- take diagonal. Default: -1.
-
- Example::
-
- >>> a = torch.randn(2, 3)
- >>> torch.diag_embed(a)
- tensor([[[ 1.5410, 0.0000, 0.0000],
- [ 0.0000, -0.2934, 0.0000],
- [ 0.0000, 0.0000, -2.1788]],
-
- [[ 0.5684, 0.0000, 0.0000],
- [ 0.0000, -1.0845, 0.0000],
- [ 0.0000, 0.0000, -1.3986]]])
-
- >>> torch.diag_embed(a, offset=1, dim1=0, dim2=2)
- tensor([[[ 0.0000, 1.5410, 0.0000, 0.0000],
- [ 0.0000, 0.5684, 0.0000, 0.0000]],
-
- [[ 0.0000, 0.0000, -0.2934, 0.0000],
- [ 0.0000, 0.0000, -1.0845, 0.0000]],
-
- [[ 0.0000, 0.0000, 0.0000, -2.1788],
- [ 0.0000, 0.0000, 0.0000, -1.3986]],
-
- [[ 0.0000, 0.0000, 0.0000, 0.0000],
- [ 0.0000, 0.0000, 0.0000, 0.0000]]])
- """
- ...
- def diagflat(input: Tensor, offset: _int = 0) -> Tensor:
- r"""
- diagflat(input, offset=0) -> Tensor
-
- - If :attr:`input` is a vector (1-D tensor), then returns a 2-D square tensor
- with the elements of :attr:`input` as the diagonal.
- - If :attr:`input` is a tensor with more than one dimension, then returns a
- 2-D tensor with diagonal elements equal to a flattened :attr:`input`.
-
- The argument :attr:`offset` controls which diagonal to consider:
-
- - If :attr:`offset` = 0, it is the main diagonal.
- - If :attr:`offset` > 0, it is above the main diagonal.
- - If :attr:`offset` < 0, it is below the main diagonal.
-
- Args:
- input (Tensor): the input tensor.
- offset (int, optional): the diagonal to consider. Default: 0 (main
- diagonal).
-
- Examples::
-
- >>> a = torch.randn(3)
- >>> a
- tensor([-0.2956, -0.9068, 0.1695])
- >>> torch.diagflat(a)
- tensor([[-0.2956, 0.0000, 0.0000],
- [ 0.0000, -0.9068, 0.0000],
- [ 0.0000, 0.0000, 0.1695]])
- >>> torch.diagflat(a, 1)
- tensor([[ 0.0000, -0.2956, 0.0000, 0.0000],
- [ 0.0000, 0.0000, -0.9068, 0.0000],
- [ 0.0000, 0.0000, 0.0000, 0.1695],
- [ 0.0000, 0.0000, 0.0000, 0.0000]])
-
- >>> a = torch.randn(2, 2)
- >>> a
- tensor([[ 0.2094, -0.3018],
- [-0.1516, 1.9342]])
- >>> torch.diagflat(a)
- tensor([[ 0.2094, 0.0000, 0.0000, 0.0000],
- [ 0.0000, -0.3018, 0.0000, 0.0000],
- [ 0.0000, 0.0000, -0.1516, 0.0000],
- [ 0.0000, 0.0000, 0.0000, 1.9342]])
- """
- ...
- @overload
- def diagonal(input: Tensor, offset: _int = 0, dim1: _int = 0, dim2: _int = 1) -> Tensor:
- r"""
- diagonal(input, offset=0, dim1=0, dim2=1) -> Tensor
-
- Returns a partial view of :attr:`input` with the its diagonal elements
- with respect to :attr:`dim1` and :attr:`dim2` appended as a dimension
- at the end of the shape.
-
- The argument :attr:`offset` controls which diagonal to consider:
-
- - If :attr:`offset` = 0, it is the main diagonal.
- - If :attr:`offset` > 0, it is above the main diagonal.
- - If :attr:`offset` < 0, it is below the main diagonal.
-
- Applying :meth:`torch.diag_embed` to the output of this function with
- the same arguments yields a diagonal matrix with the diagonal entries
- of the input. However, :meth:`torch.diag_embed` has different default
- dimensions, so those need to be explicitly specified.
-
- Args:
- input (Tensor): the input tensor. Must be at least 2-dimensional.
- offset (int, optional): which diagonal to consider. Default: 0
- (main diagonal).
- dim1 (int, optional): first dimension with respect to which to
- take diagonal. Default: 0.
- dim2 (int, optional): second dimension with respect to which to
- take diagonal. Default: 1.
-
- .. note:: To take a batch diagonal, pass in dim1=-2, dim2=-1.
-
- Examples::
-
- >>> a = torch.randn(3, 3)
- >>> a
- tensor([[-1.0854, 1.1431, -0.1752],
- [ 0.8536, -0.0905, 0.0360],
- [ 0.6927, -0.3735, -0.4945]])
-
-
- >>> torch.diagonal(a, 0)
- tensor([-1.0854, -0.0905, -0.4945])
-
-
- >>> torch.diagonal(a, 1)
- tensor([ 1.1431, 0.0360])
-
-
- >>> x = torch.randn(2, 5, 4, 2)
- >>> torch.diagonal(x, offset=-1, dim1=1, dim2=2)
- tensor([[[-1.2631, 0.3755, -1.5977, -1.8172],
- [-1.1065, 1.0401, -0.2235, -0.7938]],
-
- [[-1.7325, -0.3081, 0.6166, 0.2335],
- [ 1.0500, 0.7336, -0.3836, -1.1015]]])
- """
- ...
- @overload
- def diagonal(input: Tensor, *, outdim: Union[str, ellipsis, None], dim1: Union[str, ellipsis, None], dim2: Union[str, ellipsis, None], offset: _int = 0) -> Tensor:
- r"""
- diagonal(input, offset=0, dim1=0, dim2=1) -> Tensor
-
- Returns a partial view of :attr:`input` with the its diagonal elements
- with respect to :attr:`dim1` and :attr:`dim2` appended as a dimension
- at the end of the shape.
-
- The argument :attr:`offset` controls which diagonal to consider:
-
- - If :attr:`offset` = 0, it is the main diagonal.
- - If :attr:`offset` > 0, it is above the main diagonal.
- - If :attr:`offset` < 0, it is below the main diagonal.
-
- Applying :meth:`torch.diag_embed` to the output of this function with
- the same arguments yields a diagonal matrix with the diagonal entries
- of the input. However, :meth:`torch.diag_embed` has different default
- dimensions, so those need to be explicitly specified.
-
- Args:
- input (Tensor): the input tensor. Must be at least 2-dimensional.
- offset (int, optional): which diagonal to consider. Default: 0
- (main diagonal).
- dim1 (int, optional): first dimension with respect to which to
- take diagonal. Default: 0.
- dim2 (int, optional): second dimension with respect to which to
- take diagonal. Default: 1.
-
- .. note:: To take a batch diagonal, pass in dim1=-2, dim2=-1.
-
- Examples::
-
- >>> a = torch.randn(3, 3)
- >>> a
- tensor([[-1.0854, 1.1431, -0.1752],
- [ 0.8536, -0.0905, 0.0360],
- [ 0.6927, -0.3735, -0.4945]])
-
-
- >>> torch.diagonal(a, 0)
- tensor([-1.0854, -0.0905, -0.4945])
-
-
- >>> torch.diagonal(a, 1)
- tensor([ 1.1431, 0.0360])
-
-
- >>> x = torch.randn(2, 5, 4, 2)
- >>> torch.diagonal(x, offset=-1, dim1=1, dim2=2)
- tensor([[[-1.2631, 0.3755, -1.5977, -1.8172],
- [-1.1065, 1.0401, -0.2235, -0.7938]],
-
- [[-1.7325, -0.3081, 0.6166, 0.2335],
- [ 1.0500, 0.7336, -0.3836, -1.1015]]])
- """
- ...
- def diagonal_copy(input: Tensor, offset: _int = 0, dim1: _int = 0, dim2: _int = 1, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- Performs the same operation as :func:`torch.diagonal`, but all output tensors
- are freshly created instead of aliasing the input.
- """
- ...
- def diagonal_scatter(input: Tensor, src: Tensor, offset: _int = 0, dim1: _int = 0, dim2: _int = 1) -> Tensor:
- r"""
- diagonal_scatter(input, src, offset=0, dim1=0, dim2=1) -> Tensor
-
- Embeds the values of the :attr:`src` tensor into :attr:`input` along
- the diagonal elements of :attr:`input`, with respect to :attr:`dim1`
- and :attr:`dim2`.
-
- This function returns a tensor with fresh storage; it does not
- return a view.
-
- The argument :attr:`offset` controls which diagonal to consider:
-
- - If :attr:`offset` = 0, it is the main diagonal.
- - If :attr:`offset` > 0, it is above the main diagonal.
- - If :attr:`offset` < 0, it is below the main diagonal.
-
- Args:
- input (Tensor): the input tensor. Must be at least 2-dimensional.
- src (Tensor): the tensor to embed into :attr:`input`.
- offset (int, optional): which diagonal to consider. Default: 0
- (main diagonal).
- dim1 (int, optional): first dimension with respect to which to
- take diagonal. Default: 0.
- dim2 (int, optional): second dimension with respect to which to
- take diagonal. Default: 1.
-
- .. note::
-
- :attr:`src` must be of the proper size in order to be embedded
- into :attr:`input`. Specifically, it should have the same shape as
- ``torch.diagonal(input, offset, dim1, dim2)``
-
- Examples::
-
- >>> a = torch.zeros(3, 3)
- >>> a
- tensor([[0., 0., 0.],
- [0., 0., 0.],
- [0., 0., 0.]])
-
- >>> torch.diagonal_scatter(a, torch.ones(3), 0)
- tensor([[1., 0., 0.],
- [0., 1., 0.],
- [0., 0., 1.]])
-
- >>> torch.diagonal_scatter(a, torch.ones(2), 1)
- tensor([[0., 1., 0.],
- [0., 0., 1.],
- [0., 0., 0.]])
- """
- ...
- def diff(input: Tensor, n: _int = 1, dim: _int = -1, prepend: Optional[Tensor] = None, append: Optional[Tensor] = None, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- diff(input, n=1, dim=-1, prepend=None, append=None) -> Tensor
-
- Computes the n-th forward difference along the given dimension.
-
- The first-order differences are given by `out[i] = input[i + 1] - input[i]`. Higher-order
- differences are calculated by using :func:`torch.diff` recursively.
-
- Args:
- input (Tensor): the tensor to compute the differences on
- n (int, optional): the number of times to recursively compute the difference
- dim (int, optional): the dimension to compute the difference along.
- Default is the last dimension.
- prepend, append (Tensor, optional): values to prepend or append to
- :attr:`input` along :attr:`dim` before computing the difference.
- Their dimensions must be equivalent to that of input, and their shapes
- must match input's shape except on :attr:`dim`.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.tensor([1, 3, 2])
- >>> torch.diff(a)
- tensor([ 2, -1])
- >>> b = torch.tensor([4, 5])
- >>> torch.diff(a, append=b)
- tensor([ 2, -1, 2, 1])
- >>> c = torch.tensor([[1, 2, 3], [3, 4, 5]])
- >>> torch.diff(c, dim=0)
- tensor([[2, 2, 2]])
- >>> torch.diff(c, dim=1)
- tensor([[1, 1],
- [1, 1]])
- """
- ...
- def digamma(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- digamma(input, *, out=None) -> Tensor
-
- Alias for :func:`torch.special.digamma`.
- """
- ...
- def dist(input: Tensor, other: Tensor, p: Union[Number, _complex] = 2) -> Tensor:
- r"""
- dist(input, other, p=2) -> Tensor
-
- Returns the p-norm of (:attr:`input` - :attr:`other`)
-
- The shapes of :attr:`input` and :attr:`other` must be
- :ref:`broadcastable <broadcasting-semantics>`.
-
- Args:
- input (Tensor): the input tensor.
- other (Tensor): the Right-hand-side input tensor
- p (float, optional): the norm to be computed
-
- Example::
-
- >>> x = torch.randn(4)
- >>> x
- tensor([-1.5393, -0.8675, 0.5916, 1.6321])
- >>> y = torch.randn(4)
- >>> y
- tensor([ 0.0967, -1.0511, 0.6295, 0.8360])
- >>> torch.dist(x, y, 3.5)
- tensor(1.6727)
- >>> torch.dist(x, y, 3)
- tensor(1.6973)
- >>> torch.dist(x, y, 0)
- tensor(4.)
- >>> torch.dist(x, y, 1)
- tensor(2.6537)
- """
- ...
- def div(input: Union[Tensor, Number], other: Union[Tensor, Number], *, rounding_mode: Optional[str] = None, out: Optional[Tensor] = None) -> Tensor:
- r"""
- div(input, other, *, rounding_mode=None, out=None) -> Tensor
-
- Divides each element of the input ``input`` by the corresponding element of
- :attr:`other`.
-
- .. math::
- \text{out}_i = \frac{\text{input}_i}{\text{other}_i}
-
- .. note::
- By default, this performs a "true" division like Python 3.
- See the :attr:`rounding_mode` argument for floor division.
-
- Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
- :ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
- Always promotes integer types to the default scalar type.
-
- Args:
- input (Tensor): the dividend
- other (Tensor or Number): the divisor
-
- Keyword args:
- rounding_mode (str, optional): Type of rounding applied to the result:
-
- * None - default behavior. Performs no rounding and, if both :attr:`input` and
- :attr:`other` are integer types, promotes the inputs to the default scalar type.
- Equivalent to true division in Python (the ``/`` operator) and NumPy's ``np.true_divide``.
- * ``"trunc"`` - rounds the results of the division towards zero.
- Equivalent to C-style integer division.
- * ``"floor"`` - rounds the results of the division down.
- Equivalent to floor division in Python (the ``//`` operator) and NumPy's ``np.floor_divide``.
-
- out (Tensor, optional): the output tensor.
-
- Examples::
-
- >>> x = torch.tensor([ 0.3810, 1.2774, -0.2972, -0.3719, 0.4637])
- >>> torch.div(x, 0.5)
- tensor([ 0.7620, 2.5548, -0.5944, -0.7438, 0.9274])
-
- >>> a = torch.tensor([[-0.3711, -1.9353, -0.4605, -0.2917],
- ... [ 0.1815, -1.0111, 0.9805, -1.5923],
- ... [ 0.1062, 1.4581, 0.7759, -1.2344],
- ... [-0.1830, -0.0313, 1.1908, -1.4757]])
- >>> b = torch.tensor([ 0.8032, 0.2930, -0.8113, -0.2308])
- >>> torch.div(a, b)
- tensor([[-0.4620, -6.6051, 0.5676, 1.2639],
- [ 0.2260, -3.4509, -1.2086, 6.8990],
- [ 0.1322, 4.9764, -0.9564, 5.3484],
- [-0.2278, -0.1068, -1.4678, 6.3938]])
-
- >>> torch.div(a, b, rounding_mode='trunc')
- tensor([[-0., -6., 0., 1.],
- [ 0., -3., -1., 6.],
- [ 0., 4., -0., 5.],
- [-0., -0., -1., 6.]])
-
- >>> torch.div(a, b, rounding_mode='floor')
- tensor([[-1., -7., 0., 1.],
- [ 0., -4., -2., 6.],
- [ 0., 4., -1., 5.],
- [-1., -1., -2., 6.]])
- """
- ...
- @overload
- def divide(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- divide(input, other, *, rounding_mode=None, out=None) -> Tensor
-
- Alias for :func:`torch.div`.
- """
- ...
- @overload
- def divide(input: Tensor, other: Tensor, *, rounding_mode: Optional[str], out: Optional[Tensor] = None) -> Tensor:
- r"""
- divide(input, other, *, rounding_mode=None, out=None) -> Tensor
-
- Alias for :func:`torch.div`.
- """
- ...
- @overload
- def divide(input: Tensor, other: Union[Number, _complex], *, rounding_mode: Optional[str]) -> Tensor:
- r"""
- divide(input, other, *, rounding_mode=None, out=None) -> Tensor
-
- Alias for :func:`torch.div`.
- """
- ...
- @overload
- def divide(input: Tensor, other: Union[Number, _complex]) -> Tensor:
- r"""
- divide(input, other, *, rounding_mode=None, out=None) -> Tensor
-
- Alias for :func:`torch.div`.
- """
- ...
- def dot(input: Tensor, tensor: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- dot(input, tensor, *, out=None) -> Tensor
-
- Computes the dot product of two 1D tensors.
-
- .. note::
-
- Unlike NumPy's dot, torch.dot intentionally only supports computing the dot product
- of two 1D tensors with the same number of elements.
-
- Args:
- input (Tensor): first tensor in the dot product, must be 1D.
- tensor (Tensor): second tensor in the dot product, must be 1D.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.dot(torch.tensor([2, 3]), torch.tensor([2, 1]))
- tensor(7)
-
- >>> t1, t2 = torch.tensor([0, 1]), torch.tensor([2, 3])
- >>> torch.dot(t1, t2)
- tensor(3)
- """
- ...
- def dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ...
- def dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ...
- def dsmm(input: Tensor, mat2: Tensor) -> Tensor: ...
- @overload
- def dsplit(input: Tensor, sections: _int) -> Tuple[Tensor, ...]:
- r"""
- dsplit(input, indices_or_sections) -> List of Tensors
-
- Splits :attr:`input`, a tensor with three or more dimensions, into multiple tensors
- depthwise according to :attr:`indices_or_sections`. Each split is a view of
- :attr:`input`.
-
- This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=2)
- (the split dimension is 2), except that if :attr:`indices_or_sections` is an integer
- it must evenly divide the split dimension or a runtime error will be thrown.
-
- This function is based on NumPy's :func:`numpy.dsplit`.
-
- Args:
- input (Tensor): tensor to split.
- indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
-
- Example::
- >>> t = torch.arange(16.0).reshape(2, 2, 4)
- >>> t
- tensor([[[ 0., 1., 2., 3.],
- [ 4., 5., 6., 7.]],
- [[ 8., 9., 10., 11.],
- [12., 13., 14., 15.]]])
- >>> torch.dsplit(t, 2)
- (tensor([[[ 0., 1.],
- [ 4., 5.]],
- [[ 8., 9.],
- [12., 13.]]]),
- tensor([[[ 2., 3.],
- [ 6., 7.]],
- [[10., 11.],
- [14., 15.]]]))
-
- >>> torch.dsplit(t, [3, 6])
- (tensor([[[ 0., 1., 2.],
- [ 4., 5., 6.]],
- [[ 8., 9., 10.],
- [12., 13., 14.]]]),
- tensor([[[ 3.],
- [ 7.]],
- [[11.],
- [15.]]]),
- tensor([], size=(2, 2, 0)))
- """
- ...
- @overload
- def dsplit(input: Tensor, indices: _size) -> Tuple[Tensor, ...]:
- r"""
- dsplit(input, indices_or_sections) -> List of Tensors
-
- Splits :attr:`input`, a tensor with three or more dimensions, into multiple tensors
- depthwise according to :attr:`indices_or_sections`. Each split is a view of
- :attr:`input`.
-
- This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=2)
- (the split dimension is 2), except that if :attr:`indices_or_sections` is an integer
- it must evenly divide the split dimension or a runtime error will be thrown.
-
- This function is based on NumPy's :func:`numpy.dsplit`.
-
- Args:
- input (Tensor): tensor to split.
- indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
-
- Example::
- >>> t = torch.arange(16.0).reshape(2, 2, 4)
- >>> t
- tensor([[[ 0., 1., 2., 3.],
- [ 4., 5., 6., 7.]],
- [[ 8., 9., 10., 11.],
- [12., 13., 14., 15.]]])
- >>> torch.dsplit(t, 2)
- (tensor([[[ 0., 1.],
- [ 4., 5.]],
- [[ 8., 9.],
- [12., 13.]]]),
- tensor([[[ 2., 3.],
- [ 6., 7.]],
- [[10., 11.],
- [14., 15.]]]))
-
- >>> torch.dsplit(t, [3, 6])
- (tensor([[[ 0., 1., 2.],
- [ 4., 5., 6.]],
- [[ 8., 9., 10.],
- [12., 13., 14.]]]),
- tensor([[[ 3.],
- [ 7.]],
- [[11.],
- [15.]]]),
- tensor([], size=(2, 2, 0)))
- """
- ...
- def dstack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- dstack(tensors, *, out=None) -> Tensor
-
- Stack tensors in sequence depthwise (along third axis).
-
- This is equivalent to concatenation along the third axis after 1-D and 2-D tensors have been reshaped by :func:`torch.atleast_3d`.
-
- Args:
- tensors (sequence of Tensors): sequence of tensors to concatenate
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.tensor([1, 2, 3])
- >>> b = torch.tensor([4, 5, 6])
- >>> torch.dstack((a,b))
- tensor([[[1, 4],
- [2, 5],
- [3, 6]]])
- >>> a = torch.tensor([[1],[2],[3]])
- >>> b = torch.tensor([[4],[5],[6]])
- >>> torch.dstack((a,b))
- tensor([[[1, 4]],
- [[2, 5]],
- [[3, 6]]])
- """
- ...
- def embedding(weight: Tensor, indices: Tensor, padding_idx: Union[_int, SymInt] = -1, scale_grad_by_freq: _bool = False, sparse: _bool = False) -> Tensor: ...
- @overload
- def embedding_bag(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool, mode: _int, sparse: _bool, per_sample_weights: Optional[Tensor], include_last_offset: _bool, padding_idx: Optional[_int]) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
- @overload
- def embedding_bag(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool = False, mode: _int = 0, sparse: _bool = False, per_sample_weights: Optional[Tensor] = None, include_last_offset: _bool = False) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
- def embedding_renorm_(input: Tensor, indices: Tensor, max_norm: _float, norm_type: _float) -> Tensor: ...
- @overload
- def empty(size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format] = None, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- empty(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format) -> Tensor
-
- Returns a tensor filled with uninitialized data. The shape of the tensor is
- defined by the variable argument :attr:`size`.
-
- .. note::
- If :func:`torch.use_deterministic_algorithms()` and
- :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
- ``True``, the output tensor is initialized to prevent any possible
- nondeterministic behavior from using the data as an input to an operation.
- Floating point and complex tensors are filled with NaN, and integer tensors
- are filled with the maximum value.
-
- Args:
- size (int...): a sequence of integers defining the shape of the output tensor.
- Can be a variable number of arguments or a collection like a list or tuple.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned Tensor. Default: ``torch.contiguous_format``.
-
- Example::
-
- >>> torch.empty((2,3), dtype=torch.int64)
- tensor([[ 9.4064e+13, 2.8000e+01, 9.3493e+13],
- [ 7.5751e+18, 7.1428e+18, 7.5955e+18]])
- """
- ...
- @overload
- def empty(*size: _int, memory_format: Optional[memory_format] = None, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- empty(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format) -> Tensor
-
- Returns a tensor filled with uninitialized data. The shape of the tensor is
- defined by the variable argument :attr:`size`.
-
- .. note::
- If :func:`torch.use_deterministic_algorithms()` and
- :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
- ``True``, the output tensor is initialized to prevent any possible
- nondeterministic behavior from using the data as an input to an operation.
- Floating point and complex tensors are filled with NaN, and integer tensors
- are filled with the maximum value.
-
- Args:
- size (int...): a sequence of integers defining the shape of the output tensor.
- Can be a variable number of arguments or a collection like a list or tuple.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned Tensor. Default: ``torch.contiguous_format``.
-
- Example::
-
- >>> torch.empty((2,3), dtype=torch.int64)
- tensor([[ 9.4064e+13, 2.8000e+01, 9.3493e+13],
- [ 7.5751e+18, 7.1428e+18, 7.5955e+18]])
- """
- ...
- @overload
- def empty(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- empty(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format) -> Tensor
-
- Returns a tensor filled with uninitialized data. The shape of the tensor is
- defined by the variable argument :attr:`size`.
-
- .. note::
- If :func:`torch.use_deterministic_algorithms()` and
- :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
- ``True``, the output tensor is initialized to prevent any possible
- nondeterministic behavior from using the data as an input to an operation.
- Floating point and complex tensors are filled with NaN, and integer tensors
- are filled with the maximum value.
-
- Args:
- size (int...): a sequence of integers defining the shape of the output tensor.
- Can be a variable number of arguments or a collection like a list or tuple.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned Tensor. Default: ``torch.contiguous_format``.
-
- Example::
-
- >>> torch.empty((2,3), dtype=torch.int64)
- tensor([[ 9.4064e+13, 2.8000e+01, 9.3493e+13],
- [ 7.5751e+18, 7.1428e+18, 7.5955e+18]])
- """
- ...
- @overload
- def empty(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- empty(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format) -> Tensor
-
- Returns a tensor filled with uninitialized data. The shape of the tensor is
- defined by the variable argument :attr:`size`.
-
- .. note::
- If :func:`torch.use_deterministic_algorithms()` and
- :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
- ``True``, the output tensor is initialized to prevent any possible
- nondeterministic behavior from using the data as an input to an operation.
- Floating point and complex tensors are filled with NaN, and integer tensors
- are filled with the maximum value.
-
- Args:
- size (int...): a sequence of integers defining the shape of the output tensor.
- Can be a variable number of arguments or a collection like a list or tuple.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned Tensor. Default: ``torch.contiguous_format``.
-
- Example::
-
- >>> torch.empty((2,3), dtype=torch.int64)
- tensor([[ 9.4064e+13, 2.8000e+01, 9.3493e+13],
- [ 7.5751e+18, 7.1428e+18, 7.5955e+18]])
- """
- ...
- def empty_like(input: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- empty_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
-
- Returns an uninitialized tensor with the same size as :attr:`input`.
- ``torch.empty_like(input)`` is equivalent to
- ``torch.empty(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
-
- .. note::
- If :func:`torch.use_deterministic_algorithms()` and
- :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
- ``True``, the output tensor is initialized to prevent any possible
- nondeterministic behavior from using the data as an input to an operation.
- Floating point and complex tensors are filled with NaN, and integer tensors
- are filled with the maximum value.
-
- Args:
- input (Tensor): the size of :attr:`input` will determine size of the output tensor.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
- Default: if ``None``, defaults to the dtype of :attr:`input`.
- layout (:class:`torch.layout`, optional): the desired layout of returned tensor.
- Default: if ``None``, defaults to the layout of :attr:`input`.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, defaults to the device of :attr:`input`.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned Tensor. Default: ``torch.preserve_format``.
-
- Example::
-
- >>> a=torch.empty((2,3), dtype=torch.int32, device = 'cuda')
- >>> torch.empty_like(a)
- tensor([[0, 0, 0],
- [0, 0, 0]], device='cuda:0', dtype=torch.int32)
- """
- ...
- def empty_permuted(size: Sequence[Union[_int, SymInt]], physical_layout: _size, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- empty_permuted(size, physical_layout, *, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) -> Tensor
-
- Creates an uninitialized, non-overlapping and dense tensor with the
- specified :attr:`size`, with :attr:`physical_layout` specifying how the
- dimensions are physically laid out in memory (each logical dimension is listed
- from outermost to innermost). :attr:`physical_layout` is a generalization
- of NCHW/NHWC notation: if each dimension is assigned a number according to
- what order they occur in size (N=0, C=1, H=2, W=3), then NCHW is ``(0, 1, 2, 3)``
- while NHWC is ``(0, 2, 3, 1)``. Equivalently, the strides of the output
- tensor ``t`` are such that ``t.stride(physical_layout[i]) == contiguous_strides[i]``
- (notably, this function is *not* equivalent to ``torch.empty(size).permute(physical_layout)``).
-
- Unlike :func:`torch.empty_strided`, this is guaranteed to produce a dense
- tensor with no overlaps. If possible, prefer using this function over
- :func:`torch.empty_strided` or manual use of :func:`torch.as_strided`.
-
- .. note::
- If :func:`torch.use_deterministic_algorithms()` and
- :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
- ``True``, the output tensor is initialized to prevent any possible
- nondeterministic behavior from using the data as an input to an operation.
- Floating point and complex tensors are filled with NaN, and integer tensors
- are filled with the maximum value.
-
- Args:
- size (tuple of int): the shape of the output tensor
- physical_layout (tuple of int): the ordering of dimensions physically in memory
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
-
- Examples:
-
- >>> torch.empty((2, 3, 5, 7)).stride()
- (105, 35, 7, 1)
- >>> torch.empty_permuted((2, 3, 5, 7), (0, 1, 2, 3)).stride()
- (105, 35, 7, 1)
- >>> torch.empty((2, 3, 5, 7), memory_format=torch.channels_last).stride()
- (105, 1, 21, 3)
- >>> torch.empty_permuted((2, 3, 5, 7), (0, 2, 3, 1)).stride()
- (105, 1, 21, 3)
- >>> torch.empty_permuted((2, 3, 5, 7), (0, 2, 3, 1)).dim_order()
- (0, 2, 3, 1)
- """
- ...
- def empty_quantized(size: _size, qtensor: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
- def empty_strided(size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- empty_strided(size, stride, *, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) -> Tensor
-
- Creates a tensor with the specified :attr:`size` and :attr:`stride` and filled with undefined data.
-
- .. warning::
- If the constructed tensor is "overlapped" (with multiple indices referring to the same element
- in memory) its behavior is undefined.
-
- .. note::
- If :func:`torch.use_deterministic_algorithms()` and
- :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
- ``True``, the output tensor is initialized to prevent any possible
- nondeterministic behavior from using the data as an input to an operation.
- Floating point and complex tensors are filled with NaN, and integer tensors
- are filled with the maximum value.
-
- Args:
- size (tuple of int): the shape of the output tensor
- stride (tuple of int): the strides of the output tensor
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
-
- Example::
-
- >>> a = torch.empty_strided((2, 3), (1, 2))
- >>> a
- tensor([[8.9683e-44, 4.4842e-44, 5.1239e+07],
- [0.0000e+00, 0.0000e+00, 3.0705e-41]])
- >>> a.stride()
- (1, 2)
- >>> a.size()
- torch.Size([2, 3])
- """
- ...
- @overload
- def eq(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- eq(input, other, *, out=None) -> Tensor
-
- Computes element-wise equality
-
- The second argument can be a number or a tensor whose shape is
- :ref:`broadcastable <broadcasting-semantics>` with the first argument.
-
- Args:
- input (Tensor): the tensor to compare
- other (Tensor or float): the tensor or value to compare
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Returns:
- A boolean tensor that is True where :attr:`input` is equal to :attr:`other` and False elsewhere
-
- Example::
-
- >>> torch.eq(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
- tensor([[ True, False],
- [False, True]])
- """
- ...
- @overload
- def eq(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- eq(input, other, *, out=None) -> Tensor
-
- Computes element-wise equality
-
- The second argument can be a number or a tensor whose shape is
- :ref:`broadcastable <broadcasting-semantics>` with the first argument.
-
- Args:
- input (Tensor): the tensor to compare
- other (Tensor or float): the tensor or value to compare
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Returns:
- A boolean tensor that is True where :attr:`input` is equal to :attr:`other` and False elsewhere
-
- Example::
-
- >>> torch.eq(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
- tensor([[ True, False],
- [False, True]])
- """
- ...
- def equal(input: Tensor, other: Tensor) -> _bool:
- r"""
- equal(input, other) -> bool
-
- ``True`` if two tensors have the same size and elements, ``False`` otherwise.
-
- Example::
-
- >>> torch.equal(torch.tensor([1, 2]), torch.tensor([1, 2]))
- True
- """
- ...
- def erf(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- erf(input, *, out=None) -> Tensor
-
- Alias for :func:`torch.special.erf`.
- """
- ...
- def erf_(input: Tensor) -> Tensor: ...
- def erfc(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- erfc(input, *, out=None) -> Tensor
-
- Alias for :func:`torch.special.erfc`.
- """
- ...
- def erfc_(input: Tensor) -> Tensor: ...
- def erfinv(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- erfinv(input, *, out=None) -> Tensor
-
- Alias for :func:`torch.special.erfinv`.
- """
- ...
- def exp(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- exp(input, *, out=None) -> Tensor
-
- Returns a new tensor with the exponential of the elements
- of the input tensor :attr:`input`.
-
- .. math::
- y_{i} = e^{x_{i}}
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.exp(torch.tensor([0, math.log(2.)]))
- tensor([ 1., 2.])
- """
- ...
- def exp2(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- exp2(input, *, out=None) -> Tensor
-
- Alias for :func:`torch.special.exp2`.
- """
- ...
- def exp2_(input: Tensor) -> Tensor: ...
- def exp_(input: Tensor) -> Tensor: ...
- def expand_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], *, implicit: _bool = False, out: Optional[Tensor] = None) -> Tensor:
- r"""
- Performs the same operation as :func:`torch.expand`, but all output tensors
- are freshly created instead of aliasing the input.
- """
- ...
- def expm1(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- expm1(input, *, out=None) -> Tensor
-
- Alias for :func:`torch.special.expm1`.
- """
- ...
- def expm1_(input: Tensor) -> Tensor: ...
- @overload
- def eye(n: Union[_int, SymInt], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- eye(n, m=None, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Returns a 2-D tensor with ones on the diagonal and zeros elsewhere.
-
- Args:
- n (int): the number of rows
- m (int, optional): the number of columns with default being :attr:`n`
-
- Keyword arguments:
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Returns:
- Tensor: A 2-D tensor with ones on the diagonal and zeros elsewhere
-
- Example::
-
- >>> torch.eye(3)
- tensor([[ 1., 0., 0.],
- [ 0., 1., 0.],
- [ 0., 0., 1.]])
- """
- ...
- @overload
- def eye(n: Union[_int, SymInt], m: Union[_int, SymInt], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- eye(n, m=None, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Returns a 2-D tensor with ones on the diagonal and zeros elsewhere.
-
- Args:
- n (int): the number of rows
- m (int, optional): the number of columns with default being :attr:`n`
-
- Keyword arguments:
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Returns:
- Tensor: A 2-D tensor with ones on the diagonal and zeros elsewhere
-
- Example::
-
- >>> torch.eye(3)
- tensor([[ 1., 0., 0.],
- [ 0., 1., 0.],
- [ 0., 0., 1.]])
- """
- ...
- def fake_quantize_per_channel_affine(input: Tensor, scale: Tensor, zero_point: Tensor, axis: _int, quant_min: _int, quant_max: _int) -> Tensor:
- r"""
- fake_quantize_per_channel_affine(input, scale, zero_point, axis, quant_min, quant_max) -> Tensor
-
- Returns a new tensor with the data in :attr:`input` fake quantized per channel using :attr:`scale`,
- :attr:`zero_point`, :attr:`quant_min` and :attr:`quant_max`, across the channel specified by :attr:`axis`.
-
- .. math::
- \text{output} = (
- min(
- \text{quant\_max},
- max(
- \text{quant\_min},
- \text{std::nearby\_int}(\text{input} / \text{scale}) + \text{zero\_point}
- )
- ) - \text{zero\_point}
- ) \times \text{scale}
-
- Args:
- input (Tensor): the input value(s), in ``torch.float32``
- scale (Tensor): quantization scale, per channel in ``torch.float32``
- zero_point (Tensor): quantization zero_point, per channel in ``torch.int32`` or ``torch.half`` or ``torch.float32``
- axis (int32): channel axis
- quant_min (int64): lower bound of the quantized domain
- quant_max (int64): upper bound of the quantized domain
-
- Returns:
- Tensor: A newly fake_quantized per channel ``torch.float32`` tensor
-
- Example::
-
- >>> x = torch.randn(2, 2, 2)
- >>> x
- tensor([[[-0.2525, -0.0466],
- [ 0.3491, -0.2168]],
-
- [[-0.5906, 1.6258],
- [ 0.6444, -0.0542]]])
- >>> scales = (torch.randn(2) + 1) * 0.05
- >>> scales
- tensor([0.0475, 0.0486])
- >>> zero_points = torch.zeros(2).to(torch.int32)
- >>> zero_points
- tensor([0, 0])
- >>> torch.fake_quantize_per_channel_affine(x, scales, zero_points, 1, 0, 255)
- tensor([[[0.0000, 0.0000],
- [0.3405, 0.0000]],
-
- [[0.0000, 1.6134],
- [0.6323, 0.0000]]])
- """
- ...
- @overload
- def fake_quantize_per_tensor_affine(input: Tensor, scale: _float, zero_point: _int, quant_min: _int, quant_max: _int) -> Tensor:
- r"""
- fake_quantize_per_tensor_affine(input, scale, zero_point, quant_min, quant_max) -> Tensor
-
- Returns a new tensor with the data in :attr:`input` fake quantized using :attr:`scale`,
- :attr:`zero_point`, :attr:`quant_min` and :attr:`quant_max`.
-
- .. math::
- \text{output} = (
- min(
- \text{quant\_max},
- max(
- \text{quant\_min},
- \text{std::nearby\_int}(\text{input} / \text{scale}) + \text{zero\_point}
- )
- ) - \text{zero\_point}
- ) \times \text{scale}
-
- Args:
- input (Tensor): the input value(s), ``torch.float32`` tensor
- scale (double scalar or ``float32`` Tensor): quantization scale
- zero_point (int64 scalar or ``int32`` Tensor): quantization zero_point
- quant_min (int64): lower bound of the quantized domain
- quant_max (int64): upper bound of the quantized domain
-
- Returns:
- Tensor: A newly fake_quantized ``torch.float32`` tensor
-
- Example::
-
- >>> x = torch.randn(4)
- >>> x
- tensor([ 0.0552, 0.9730, 0.3973, -1.0780])
- >>> torch.fake_quantize_per_tensor_affine(x, 0.1, 0, 0, 255)
- tensor([0.1000, 1.0000, 0.4000, 0.0000])
- >>> torch.fake_quantize_per_tensor_affine(x, torch.tensor(0.1), torch.tensor(0), 0, 255)
- tensor([0.1000, 1.0000, 0.4000, 0.0000])
- """
- ...
- @overload
- def fake_quantize_per_tensor_affine(input: Tensor, scale: Tensor, zero_point: Tensor, quant_min: _int, quant_max: _int) -> Tensor:
- r"""
- fake_quantize_per_tensor_affine(input, scale, zero_point, quant_min, quant_max) -> Tensor
-
- Returns a new tensor with the data in :attr:`input` fake quantized using :attr:`scale`,
- :attr:`zero_point`, :attr:`quant_min` and :attr:`quant_max`.
-
- .. math::
- \text{output} = (
- min(
- \text{quant\_max},
- max(
- \text{quant\_min},
- \text{std::nearby\_int}(\text{input} / \text{scale}) + \text{zero\_point}
- )
- ) - \text{zero\_point}
- ) \times \text{scale}
-
- Args:
- input (Tensor): the input value(s), ``torch.float32`` tensor
- scale (double scalar or ``float32`` Tensor): quantization scale
- zero_point (int64 scalar or ``int32`` Tensor): quantization zero_point
- quant_min (int64): lower bound of the quantized domain
- quant_max (int64): upper bound of the quantized domain
-
- Returns:
- Tensor: A newly fake_quantized ``torch.float32`` tensor
-
- Example::
-
- >>> x = torch.randn(4)
- >>> x
- tensor([ 0.0552, 0.9730, 0.3973, -1.0780])
- >>> torch.fake_quantize_per_tensor_affine(x, 0.1, 0, 0, 255)
- tensor([0.1000, 1.0000, 0.4000, 0.0000])
- >>> torch.fake_quantize_per_tensor_affine(x, torch.tensor(0.1), torch.tensor(0), 0, 255)
- tensor([0.1000, 1.0000, 0.4000, 0.0000])
- """
- ...
- def fbgemm_linear_fp16_weight(input: Tensor, packed_weight: Tensor, bias: Tensor) -> Tensor: ...
- def fbgemm_linear_fp16_weight_fp32_activation(input: Tensor, packed_weight: Tensor, bias: Tensor) -> Tensor: ...
- def fbgemm_linear_int8_weight(input: Tensor, weight: Tensor, packed: Tensor, col_offsets: Tensor, weight_scale: Union[Number, _complex], weight_zero_point: Union[Number, _complex], bias: Tensor) -> Tensor: ...
- def fbgemm_linear_int8_weight_fp32_activation(input: Tensor, weight: Tensor, packed: Tensor, col_offsets: Tensor, weight_scale: Union[Number, _complex], weight_zero_point: Union[Number, _complex], bias: Tensor) -> Tensor: ...
- def fbgemm_linear_quantize_weight(input: Tensor) -> Tuple[Tensor, Tensor, _float, _int]: ...
- def fbgemm_pack_gemm_matrix_fp16(input: Tensor) -> Tensor: ...
- @overload
- def fbgemm_pack_quantized_matrix(input: Tensor) -> Tensor: ...
- @overload
- def fbgemm_pack_quantized_matrix(input: Tensor, K: _int, N: _int) -> Tensor: ...
- def feature_alpha_dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ...
- def feature_alpha_dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ...
- def feature_dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ...
- def feature_dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ...
- @overload
- def fill(input: Tensor, value: Tensor) -> Tensor: ...
- @overload
- def fill(input: Tensor, value: Union[Number, _complex]) -> Tensor: ...
- @overload
- def fill_(input: Tensor, value: Tensor) -> Tensor: ...
- @overload
- def fill_(input: Tensor, value: Union[Number, _complex]) -> Tensor: ...
- def fix(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- fix(input, *, out=None) -> Tensor
-
- Alias for :func:`torch.trunc`
- """
- ...
- def fix_(input: Tensor) -> Tensor: ...
- @overload
- def flatten(input: Tensor, start_dim: _int = 0, end_dim: _int = -1) -> Tensor:
- r"""
- flatten(input, start_dim=0, end_dim=-1) -> Tensor
-
- Flattens :attr:`input` by reshaping it into a one-dimensional tensor. If :attr:`start_dim` or :attr:`end_dim`
- are passed, only dimensions starting with :attr:`start_dim` and ending with :attr:`end_dim` are flattened.
- The order of elements in :attr:`input` is unchanged.
-
- Unlike NumPy's flatten, which always copies input's data, this function may return the original object, a view,
- or copy. If no dimensions are flattened, then the original object :attr:`input` is returned. Otherwise, if input can
- be viewed as the flattened shape, then that view is returned. Finally, only if the input cannot be viewed as the
- flattened shape is input's data copied. See :meth:`torch.Tensor.view` for details on when a view will be returned.
-
- .. note::
- Flattening a zero-dimensional tensor will return a one-dimensional view.
-
- Args:
- input (Tensor): the input tensor.
- start_dim (int): the first dim to flatten
- end_dim (int): the last dim to flatten
-
- Example::
-
- >>> t = torch.tensor([[[1, 2],
- ... [3, 4]],
- ... [[5, 6],
- ... [7, 8]]])
- >>> torch.flatten(t)
- tensor([1, 2, 3, 4, 5, 6, 7, 8])
- >>> torch.flatten(t, start_dim=1)
- tensor([[1, 2, 3, 4],
- [5, 6, 7, 8]])
- """
- ...
- @overload
- def flatten(input: Tensor, start_dim: _int, end_dim: _int, out_dim: Union[str, ellipsis, None]) -> Tensor:
- r"""
- flatten(input, start_dim=0, end_dim=-1) -> Tensor
-
- Flattens :attr:`input` by reshaping it into a one-dimensional tensor. If :attr:`start_dim` or :attr:`end_dim`
- are passed, only dimensions starting with :attr:`start_dim` and ending with :attr:`end_dim` are flattened.
- The order of elements in :attr:`input` is unchanged.
-
- Unlike NumPy's flatten, which always copies input's data, this function may return the original object, a view,
- or copy. If no dimensions are flattened, then the original object :attr:`input` is returned. Otherwise, if input can
- be viewed as the flattened shape, then that view is returned. Finally, only if the input cannot be viewed as the
- flattened shape is input's data copied. See :meth:`torch.Tensor.view` for details on when a view will be returned.
-
- .. note::
- Flattening a zero-dimensional tensor will return a one-dimensional view.
-
- Args:
- input (Tensor): the input tensor.
- start_dim (int): the first dim to flatten
- end_dim (int): the last dim to flatten
-
- Example::
-
- >>> t = torch.tensor([[[1, 2],
- ... [3, 4]],
- ... [[5, 6],
- ... [7, 8]]])
- >>> torch.flatten(t)
- tensor([1, 2, 3, 4, 5, 6, 7, 8])
- >>> torch.flatten(t, start_dim=1)
- tensor([[1, 2, 3, 4],
- [5, 6, 7, 8]])
- """
- ...
- @overload
- def flatten(input: Tensor, start_dim: Union[str, ellipsis, None], end_dim: Union[str, ellipsis, None], out_dim: Union[str, ellipsis, None]) -> Tensor:
- r"""
- flatten(input, start_dim=0, end_dim=-1) -> Tensor
-
- Flattens :attr:`input` by reshaping it into a one-dimensional tensor. If :attr:`start_dim` or :attr:`end_dim`
- are passed, only dimensions starting with :attr:`start_dim` and ending with :attr:`end_dim` are flattened.
- The order of elements in :attr:`input` is unchanged.
-
- Unlike NumPy's flatten, which always copies input's data, this function may return the original object, a view,
- or copy. If no dimensions are flattened, then the original object :attr:`input` is returned. Otherwise, if input can
- be viewed as the flattened shape, then that view is returned. Finally, only if the input cannot be viewed as the
- flattened shape is input's data copied. See :meth:`torch.Tensor.view` for details on when a view will be returned.
-
- .. note::
- Flattening a zero-dimensional tensor will return a one-dimensional view.
-
- Args:
- input (Tensor): the input tensor.
- start_dim (int): the first dim to flatten
- end_dim (int): the last dim to flatten
-
- Example::
-
- >>> t = torch.tensor([[[1, 2],
- ... [3, 4]],
- ... [[5, 6],
- ... [7, 8]]])
- >>> torch.flatten(t)
- tensor([1, 2, 3, 4, 5, 6, 7, 8])
- >>> torch.flatten(t, start_dim=1)
- tensor([[1, 2, 3, 4],
- [5, 6, 7, 8]])
- """
- ...
- @overload
- def flatten(input: Tensor, dims: Sequence[Union[str, ellipsis, None]], out_dim: Union[str, ellipsis, None]) -> Tensor:
- r"""
- flatten(input, start_dim=0, end_dim=-1) -> Tensor
-
- Flattens :attr:`input` by reshaping it into a one-dimensional tensor. If :attr:`start_dim` or :attr:`end_dim`
- are passed, only dimensions starting with :attr:`start_dim` and ending with :attr:`end_dim` are flattened.
- The order of elements in :attr:`input` is unchanged.
-
- Unlike NumPy's flatten, which always copies input's data, this function may return the original object, a view,
- or copy. If no dimensions are flattened, then the original object :attr:`input` is returned. Otherwise, if input can
- be viewed as the flattened shape, then that view is returned. Finally, only if the input cannot be viewed as the
- flattened shape is input's data copied. See :meth:`torch.Tensor.view` for details on when a view will be returned.
-
- .. note::
- Flattening a zero-dimensional tensor will return a one-dimensional view.
-
- Args:
- input (Tensor): the input tensor.
- start_dim (int): the first dim to flatten
- end_dim (int): the last dim to flatten
-
- Example::
-
- >>> t = torch.tensor([[[1, 2],
- ... [3, 4]],
- ... [[5, 6],
- ... [7, 8]]])
- >>> torch.flatten(t)
- tensor([1, 2, 3, 4, 5, 6, 7, 8])
- >>> torch.flatten(t, start_dim=1)
- tensor([[1, 2, 3, 4],
- [5, 6, 7, 8]])
- """
- ...
- def flip(input: Tensor, dims: _size) -> Tensor:
- r"""
- flip(input, dims) -> Tensor
-
- Reverse the order of an n-D tensor along given axis in dims.
-
- .. note::
- `torch.flip` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.flip`,
- which returns a view in constant time. Since copying a tensor's data is more work than viewing that data,
- `torch.flip` is expected to be slower than `np.flip`.
-
- Args:
- input (Tensor): the input tensor.
- dims (a list or tuple): axis to flip on
-
- Example::
-
- >>> x = torch.arange(8).view(2, 2, 2)
- >>> x
- tensor([[[ 0, 1],
- [ 2, 3]],
-
- [[ 4, 5],
- [ 6, 7]]])
- >>> torch.flip(x, [0, 1])
- tensor([[[ 6, 7],
- [ 4, 5]],
-
- [[ 2, 3],
- [ 0, 1]]])
- """
- ...
- def fliplr(input: Tensor) -> Tensor:
- r"""
- fliplr(input) -> Tensor
-
- Flip tensor in the left/right direction, returning a new tensor.
-
- Flip the entries in each row in the left/right direction.
- Columns are preserved, but appear in a different order than before.
-
- Note:
- Requires the tensor to be at least 2-D.
-
- .. note::
- `torch.fliplr` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.fliplr`,
- which returns a view in constant time. Since copying a tensor's data is more work than viewing that data,
- `torch.fliplr` is expected to be slower than `np.fliplr`.
-
- Args:
- input (Tensor): Must be at least 2-dimensional.
-
- Example::
-
- >>> x = torch.arange(4).view(2, 2)
- >>> x
- tensor([[0, 1],
- [2, 3]])
- >>> torch.fliplr(x)
- tensor([[1, 0],
- [3, 2]])
- """
- ...
- def flipud(input: Tensor) -> Tensor:
- r"""
- flipud(input) -> Tensor
-
- Flip tensor in the up/down direction, returning a new tensor.
-
- Flip the entries in each column in the up/down direction.
- Rows are preserved, but appear in a different order than before.
-
- Note:
- Requires the tensor to be at least 1-D.
-
- .. note::
- `torch.flipud` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.flipud`,
- which returns a view in constant time. Since copying a tensor's data is more work than viewing that data,
- `torch.flipud` is expected to be slower than `np.flipud`.
-
- Args:
- input (Tensor): Must be at least 1-dimensional.
-
- Example::
-
- >>> x = torch.arange(4).view(2, 2)
- >>> x
- tensor([[0, 1],
- [2, 3]])
- >>> torch.flipud(x)
- tensor([[2, 3],
- [0, 1]])
- """
- ...
- @overload
- def float_power(input: Tensor, exponent: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- float_power(input, exponent, *, out=None) -> Tensor
-
- Raises :attr:`input` to the power of :attr:`exponent`, elementwise, in double precision.
- If neither input is complex returns a ``torch.float64`` tensor,
- and if one or more inputs is complex returns a ``torch.complex128`` tensor.
-
- .. note::
- This function always computes in double precision, unlike :func:`torch.pow`,
- which implements more typical :ref:`type promotion <type-promotion-doc>`.
- This is useful when the computation needs to be performed in a wider or more precise dtype,
- or the results of the computation may contain fractional values not representable in the input dtypes,
- like when an integer base is raised to a negative integer exponent.
-
- Args:
- input (Tensor or Number): the base value(s)
- exponent (Tensor or Number): the exponent value(s)
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randint(10, (4,))
- >>> a
- tensor([6, 4, 7, 1])
- >>> torch.float_power(a, 2)
- tensor([36., 16., 49., 1.], dtype=torch.float64)
-
- >>> a = torch.arange(1, 5)
- >>> a
- tensor([ 1, 2, 3, 4])
- >>> exp = torch.tensor([2, -3, 4, -5])
- >>> exp
- tensor([ 2, -3, 4, -5])
- >>> torch.float_power(a, exp)
- tensor([1.0000e+00, 1.2500e-01, 8.1000e+01, 9.7656e-04], dtype=torch.float64)
- """
- ...
- @overload
- def float_power(self: Union[Number, _complex], exponent: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- float_power(input, exponent, *, out=None) -> Tensor
-
- Raises :attr:`input` to the power of :attr:`exponent`, elementwise, in double precision.
- If neither input is complex returns a ``torch.float64`` tensor,
- and if one or more inputs is complex returns a ``torch.complex128`` tensor.
-
- .. note::
- This function always computes in double precision, unlike :func:`torch.pow`,
- which implements more typical :ref:`type promotion <type-promotion-doc>`.
- This is useful when the computation needs to be performed in a wider or more precise dtype,
- or the results of the computation may contain fractional values not representable in the input dtypes,
- like when an integer base is raised to a negative integer exponent.
-
- Args:
- input (Tensor or Number): the base value(s)
- exponent (Tensor or Number): the exponent value(s)
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randint(10, (4,))
- >>> a
- tensor([6, 4, 7, 1])
- >>> torch.float_power(a, 2)
- tensor([36., 16., 49., 1.], dtype=torch.float64)
-
- >>> a = torch.arange(1, 5)
- >>> a
- tensor([ 1, 2, 3, 4])
- >>> exp = torch.tensor([2, -3, 4, -5])
- >>> exp
- tensor([ 2, -3, 4, -5])
- >>> torch.float_power(a, exp)
- tensor([1.0000e+00, 1.2500e-01, 8.1000e+01, 9.7656e-04], dtype=torch.float64)
- """
- ...
- @overload
- def float_power(input: Tensor, exponent: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- float_power(input, exponent, *, out=None) -> Tensor
-
- Raises :attr:`input` to the power of :attr:`exponent`, elementwise, in double precision.
- If neither input is complex returns a ``torch.float64`` tensor,
- and if one or more inputs is complex returns a ``torch.complex128`` tensor.
-
- .. note::
- This function always computes in double precision, unlike :func:`torch.pow`,
- which implements more typical :ref:`type promotion <type-promotion-doc>`.
- This is useful when the computation needs to be performed in a wider or more precise dtype,
- or the results of the computation may contain fractional values not representable in the input dtypes,
- like when an integer base is raised to a negative integer exponent.
-
- Args:
- input (Tensor or Number): the base value(s)
- exponent (Tensor or Number): the exponent value(s)
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randint(10, (4,))
- >>> a
- tensor([6, 4, 7, 1])
- >>> torch.float_power(a, 2)
- tensor([36., 16., 49., 1.], dtype=torch.float64)
-
- >>> a = torch.arange(1, 5)
- >>> a
- tensor([ 1, 2, 3, 4])
- >>> exp = torch.tensor([2, -3, 4, -5])
- >>> exp
- tensor([ 2, -3, 4, -5])
- >>> torch.float_power(a, exp)
- tensor([1.0000e+00, 1.2500e-01, 8.1000e+01, 9.7656e-04], dtype=torch.float64)
- """
- ...
- def floor(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- floor(input, *, out=None) -> Tensor
-
- Returns a new tensor with the floor of the elements of :attr:`input`,
- the largest integer less than or equal to each element.
-
- For integer inputs, follows the array-api convention of returning a
- copy of the input tensor.
-
- .. math::
- \text{out}_{i} = \left\lfloor \text{input}_{i} \right\rfloor
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(4)
- >>> a
- tensor([-0.8166, 1.5308, -0.2530, -0.2091])
- >>> torch.floor(a)
- tensor([-1., 1., -1., -1.])
- """
- ...
- def floor_(input: Tensor) -> Tensor: ...
- def floor_divide(input: Union[Tensor, Number], other: Union[Tensor, Number], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- floor_divide(input, other, *, out=None) -> Tensor
-
- .. note::
-
- Before PyTorch 1.13 :func:`torch.floor_divide` incorrectly performed
- truncation division. To restore the previous behavior use
- :func:`torch.div` with ``rounding_mode='trunc'``.
-
- Computes :attr:`input` divided by :attr:`other`, elementwise, and floors
- the result.
-
- .. math::
- \text{{out}}_i = \text{floor} \left( \frac{{\text{{input}}_i}}{{\text{{other}}_i}} \right)
-
-
-
- Supports broadcasting to a common shape, type promotion, and integer and float inputs.
-
- Args:
- input (Tensor or Number): the dividend
- other (Tensor or Number): the divisor
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.tensor([4.0, 3.0])
- >>> b = torch.tensor([2.0, 2.0])
- >>> torch.floor_divide(a, b)
- tensor([2.0, 1.0])
- >>> torch.floor_divide(a, 1.4)
- tensor([2.0, 2.0])
- """
- ...
- def fmax(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- fmax(input, other, *, out=None) -> Tensor
-
- Computes the element-wise maximum of :attr:`input` and :attr:`other`.
-
- This is like :func:`torch.maximum` except it handles NaNs differently:
- if exactly one of the two elements being compared is a NaN then the non-NaN element is taken as the maximum.
- Only if both elements are NaN is NaN propagated.
-
- This function is a wrapper around C++'s ``std::fmax`` and is similar to NumPy's ``fmax`` function.
-
- Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
- :ref:`type promotion <type-promotion-doc>`, and integer and floating-point inputs.
-
- Args:
- input (Tensor): the input tensor.
- other (Tensor): the second input tensor
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.tensor([9.7, float('nan'), 3.1, float('nan')])
- >>> b = torch.tensor([-2.2, 0.5, float('nan'), float('nan')])
- >>> torch.fmax(a, b)
- tensor([9.7000, 0.5000, 3.1000, nan])
- """
- ...
- def fmin(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- fmin(input, other, *, out=None) -> Tensor
-
- Computes the element-wise minimum of :attr:`input` and :attr:`other`.
-
- This is like :func:`torch.minimum` except it handles NaNs differently:
- if exactly one of the two elements being compared is a NaN then the non-NaN element is taken as the minimum.
- Only if both elements are NaN is NaN propagated.
-
- This function is a wrapper around C++'s ``std::fmin`` and is similar to NumPy's ``fmin`` function.
-
- Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
- :ref:`type promotion <type-promotion-doc>`, and integer and floating-point inputs.
-
- Args:
- input (Tensor): the input tensor.
- other (Tensor): the second input tensor
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.tensor([2.2, float('nan'), 2.1, float('nan')])
- >>> b = torch.tensor([-9.3, 0.1, float('nan'), float('nan')])
- >>> torch.fmin(a, b)
- tensor([-9.3000, 0.1000, 2.1000, nan])
- """
- ...
- @overload
- def fmod(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- fmod(input, other, *, out=None) -> Tensor
-
- Applies C++'s `std::fmod <https://en.cppreference.com/w/cpp/numeric/math/fmod>`_ entrywise.
- The result has the same sign as the dividend :attr:`input` and its absolute value
- is less than that of :attr:`other`.
-
- This function may be defined in terms of :func:`torch.div` as
-
- .. code:: python
-
- torch.fmod(a, b) == a - a.div(b, rounding_mode="trunc") * b
-
- Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
- :ref:`type promotion <type-promotion-doc>`, and integer and float inputs.
-
- .. note::
-
- When the divisor is zero, returns ``NaN`` for floating point dtypes
- on both CPU and GPU; raises ``RuntimeError`` for integer division by
- zero on CPU; Integer division by zero on GPU may return any value.
-
- .. note::
-
- Complex inputs are not supported. In some cases, it is not mathematically
- possible to satisfy the definition of a modulo operation with complex numbers.
-
- .. seealso::
-
- :func:`torch.remainder` which implements Python's modulus operator.
- This one is defined using division rounding down the result.
-
- Args:
- input (Tensor): the dividend
- other (Tensor or Scalar): the divisor
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.fmod(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
- tensor([-1., -0., -1., 1., 0., 1.])
- >>> torch.fmod(torch.tensor([1, 2, 3, 4, 5]), -1.5)
- tensor([1.0000, 0.5000, 0.0000, 1.0000, 0.5000])
- """
- ...
- @overload
- def fmod(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- fmod(input, other, *, out=None) -> Tensor
-
- Applies C++'s `std::fmod <https://en.cppreference.com/w/cpp/numeric/math/fmod>`_ entrywise.
- The result has the same sign as the dividend :attr:`input` and its absolute value
- is less than that of :attr:`other`.
-
- This function may be defined in terms of :func:`torch.div` as
-
- .. code:: python
-
- torch.fmod(a, b) == a - a.div(b, rounding_mode="trunc") * b
-
- Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
- :ref:`type promotion <type-promotion-doc>`, and integer and float inputs.
-
- .. note::
-
- When the divisor is zero, returns ``NaN`` for floating point dtypes
- on both CPU and GPU; raises ``RuntimeError`` for integer division by
- zero on CPU; Integer division by zero on GPU may return any value.
-
- .. note::
-
- Complex inputs are not supported. In some cases, it is not mathematically
- possible to satisfy the definition of a modulo operation with complex numbers.
-
- .. seealso::
-
- :func:`torch.remainder` which implements Python's modulus operator.
- This one is defined using division rounding down the result.
-
- Args:
- input (Tensor): the dividend
- other (Tensor or Scalar): the divisor
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.fmod(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
- tensor([-1., -0., -1., 1., 0., 1.])
- >>> torch.fmod(torch.tensor([1, 2, 3, 4, 5]), -1.5)
- tensor([1.0000, 0.5000, 0.0000, 1.0000, 0.5000])
- """
- ...
- def frac(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- frac(input, *, out=None) -> Tensor
-
- Computes the fractional portion of each element in :attr:`input`.
-
- .. math::
- \text{out}_{i} = \text{input}_{i} - \left\lfloor |\text{input}_{i}| \right\rfloor * \operatorname{sgn}(\text{input}_{i})
-
- Example::
-
- >>> torch.frac(torch.tensor([1, 2.5, -3.2]))
- tensor([ 0.0000, 0.5000, -0.2000])
- """
- ...
- def frac_(input: Tensor) -> Tensor: ...
- def frexp(input: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.frexp:
- r"""
- frexp(input, *, out=None) -> (Tensor mantissa, Tensor exponent)
-
- Decomposes :attr:`input` into mantissa and exponent tensors
- such that :math:`\text{input} = \text{mantissa} \times 2^{\text{exponent}}`.
-
- The range of mantissa is the open interval (-1, 1).
-
- Supports float inputs.
-
- Args:
- input (Tensor): the input tensor
-
-
- Keyword args:
- out (tuple, optional): the output tensors
-
- Example::
-
- >>> x = torch.arange(9.)
- >>> mantissa, exponent = torch.frexp(x)
- >>> mantissa
- tensor([0.0000, 0.5000, 0.5000, 0.7500, 0.5000, 0.6250, 0.7500, 0.8750, 0.5000])
- >>> exponent
- tensor([0, 1, 2, 2, 3, 3, 3, 3, 4], dtype=torch.int32)
- >>> torch.ldexp(mantissa, exponent)
- tensor([0., 1., 2., 3., 4., 5., 6., 7., 8.])
- """
- ...
- def frobenius_norm(input: Tensor, dim: Union[_int, _size], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ...
- def from_file(filename: str, shared: Optional[_bool] = None, size: Optional[_int] = 0, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- from_file(filename, shared=None, size=0, *, dtype=None, layout=None, device=None, pin_memory=False)
-
- Creates a CPU tensor with a storage backed by a memory-mapped file.
-
- If ``shared`` is True, then memory is shared between processes. All changes are written to the file.
- If ``shared`` is False, then changes to the tensor do not affect the file.
-
- ``size`` is the number of elements in the Tensor. If ``shared`` is ``False``, then the file must contain
- at least ``size * sizeof(dtype)`` bytes. If ``shared`` is ``True`` the file will be created if needed.
-
- .. note::
- Only CPU tensors can be mapped to files.
-
- .. note::
- For now, tensors with storages backed by a memory-mapped file cannot be created in pinned memory.
-
-
- Args:
- filename (str): file name to map
- shared (bool): whether to share memory (whether ``MAP_SHARED`` or ``MAP_PRIVATE`` is passed to the
- underlying `mmap(2) call <https://man7.org/linux/man-pages/man2/mmap.2.html>`_)
- size (int): number of elements in the tensor
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
-
- Example::
- >>> t = torch.randn(2, 5, dtype=torch.float64)
- >>> t.numpy().tofile('storage.pt')
- >>> t_mapped = torch.from_file('storage.pt', shared=False, size=10, dtype=torch.float64)
- """
- ...
- def from_numpy(ndarray) -> Tensor:
- r"""
- from_numpy(ndarray) -> Tensor
-
- Creates a :class:`Tensor` from a :class:`numpy.ndarray`.
-
- The returned tensor and :attr:`ndarray` share the same memory. Modifications to
- the tensor will be reflected in the :attr:`ndarray` and vice versa. The returned
- tensor is not resizable.
-
- It currently accepts :attr:`ndarray` with dtypes of ``numpy.float64``,
- ``numpy.float32``, ``numpy.float16``, ``numpy.complex64``, ``numpy.complex128``,
- ``numpy.int64``, ``numpy.int32``, ``numpy.int16``, ``numpy.int8``, ``numpy.uint8``,
- and ``bool``.
-
- .. warning::
- Writing to a tensor created from a read-only NumPy array is not supported and will result in undefined behavior.
-
- Example::
-
- >>> a = numpy.array([1, 2, 3])
- >>> t = torch.from_numpy(a)
- >>> t
- tensor([ 1, 2, 3])
- >>> t[0] = -1
- >>> a
- array([-1, 2, 3])
- """
- ...
- def frombuffer(buffer: Any, *, dtype: _dtype, count: int = -1, offset: int = 0, requires_grad: _bool = False) -> Tensor:
- r"""
- frombuffer(buffer, *, dtype, count=-1, offset=0, requires_grad=False) -> Tensor
-
- Creates a 1-dimensional :class:`Tensor` from an object that implements
- the Python buffer protocol.
-
- Skips the first :attr:`offset` bytes in the buffer, and interprets the rest of
- the raw bytes as a 1-dimensional tensor of type :attr:`dtype` with :attr:`count`
- elements.
-
- Note that either of the following must be true:
-
- 1. :attr:`count` is a positive non-zero number, and the total number of bytes
- in the buffer is more than :attr:`offset` plus :attr:`count` times the size
- (in bytes) of :attr:`dtype`.
-
- 2. :attr:`count` is negative, and the length (number of bytes) of the buffer
- subtracted by the :attr:`offset` is a multiple of the size (in bytes) of
- :attr:`dtype`.
-
- The returned tensor and buffer share the same memory. Modifications to
- the tensor will be reflected in the buffer and vice versa. The returned
- tensor is not resizable.
-
- .. note::
- This function increments the reference count for the object that
- owns the shared memory. Therefore, such memory will not be deallocated
- before the returned tensor goes out of scope.
-
- .. warning::
- This function's behavior is undefined when passed an object implementing
- the buffer protocol whose data is not on the CPU. Doing so is likely to
- cause a segmentation fault.
-
- .. warning::
- This function does not try to infer the :attr:`dtype` (hence, it is not
- optional). Passing a different :attr:`dtype` than its source may result
- in unexpected behavior.
-
- Args:
- buffer (object): a Python object that exposes the buffer interface.
-
- Keyword args:
- dtype (:class:`torch.dtype`): the desired data type of returned tensor.
- count (int, optional): the number of desired elements to be read.
- If negative, all the elements (until the end of the buffer) will be
- read. Default: -1.
- offset (int, optional): the number of bytes to skip at the start of
- the buffer. Default: 0.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Example::
-
- >>> import array
- >>> a = array.array('i', [1, 2, 3])
- >>> t = torch.frombuffer(a, dtype=torch.int32)
- >>> t
- tensor([ 1, 2, 3])
- >>> t[0] = -1
- >>> a
- array([-1, 2, 3])
-
- >>> # Interprets the signed char bytes as 32-bit integers.
- >>> # Each 4 signed char elements will be interpreted as
- >>> # 1 signed 32-bit integer.
- >>> import array
- >>> a = array.array('b', [-1, 0, 0, 0])
- >>> torch.frombuffer(a, dtype=torch.int32)
- tensor([255], dtype=torch.int32)
- """
- ...
- @overload
- def full(size: _size, fill_value: Union[Number, _complex], *, out: Optional[Tensor] = None, layout: _layout = strided, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
- r"""
- full(size, fill_value, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Creates a tensor of size :attr:`size` filled with :attr:`fill_value`. The
- tensor's dtype is inferred from :attr:`fill_value`.
-
- Args:
- size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
- shape of the output tensor.
- fill_value (Scalar): the value to fill the output tensor with.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Example::
-
- >>> torch.full((2, 3), 3.141592)
- tensor([[ 3.1416, 3.1416, 3.1416],
- [ 3.1416, 3.1416, 3.1416]])
- """
- ...
- @overload
- def full(size: _size, fill_value: Union[Number, _complex], *, names: List[Union[str, None]], layout: _layout = strided, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
- r"""
- full(size, fill_value, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Creates a tensor of size :attr:`size` filled with :attr:`fill_value`. The
- tensor's dtype is inferred from :attr:`fill_value`.
-
- Args:
- size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
- shape of the output tensor.
- fill_value (Scalar): the value to fill the output tensor with.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Example::
-
- >>> torch.full((2, 3), 3.141592)
- tensor([[ 3.1416, 3.1416, 3.1416],
- [ 3.1416, 3.1416, 3.1416]])
- """
- ...
- @overload
- def full(size: Sequence[Union[_int, SymInt]], fill_value: Union[Number, _complex], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- full(size, fill_value, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Creates a tensor of size :attr:`size` filled with :attr:`fill_value`. The
- tensor's dtype is inferred from :attr:`fill_value`.
-
- Args:
- size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
- shape of the output tensor.
- fill_value (Scalar): the value to fill the output tensor with.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Example::
-
- >>> torch.full((2, 3), 3.141592)
- tensor([[ 3.1416, 3.1416, 3.1416],
- [ 3.1416, 3.1416, 3.1416]])
- """
- ...
- @overload
- def full(size: _size, fill_value: Union[Number, _complex], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- full(size, fill_value, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Creates a tensor of size :attr:`size` filled with :attr:`fill_value`. The
- tensor's dtype is inferred from :attr:`fill_value`.
-
- Args:
- size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
- shape of the output tensor.
- fill_value (Scalar): the value to fill the output tensor with.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Example::
-
- >>> torch.full((2, 3), 3.141592)
- tensor([[ 3.1416, 3.1416, 3.1416],
- [ 3.1416, 3.1416, 3.1416]])
- """
- ...
- def full_like(input: Tensor, fill_value: Union[Number, _complex], *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- full_like(input, fill_value, \*, dtype=None, layout=torch.strided, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
-
- Returns a tensor with the same size as :attr:`input` filled with :attr:`fill_value`.
- ``torch.full_like(input, fill_value)`` is equivalent to
- ``torch.full(input.size(), fill_value, dtype=input.dtype, layout=input.layout, device=input.device)``.
-
- Args:
- input (Tensor): the size of :attr:`input` will determine size of the output tensor.
- fill_value: the number to fill the output tensor with.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
- Default: if ``None``, defaults to the dtype of :attr:`input`.
- layout (:class:`torch.layout`, optional): the desired layout of returned tensor.
- Default: if ``None``, defaults to the layout of :attr:`input`.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, defaults to the device of :attr:`input`.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned Tensor. Default: ``torch.preserve_format``.
- """
- ...
- def fused_moving_avg_obs_fake_quant(input: Tensor, observer_on: Tensor, fake_quant_on: Tensor, running_min: Tensor, running_max: Tensor, scale: Tensor, zero_point: Tensor, averaging_const: _float, quant_min: _int, quant_max: _int, ch_axis: _int, per_row_fake_quant: _bool = False, symmetric_quant: _bool = False) -> Tensor: ...
- @overload
- def gather(input: Tensor, dim: _int, index: Tensor, *, sparse_grad: _bool = False, out: Optional[Tensor] = None) -> Tensor:
- r"""
- gather(input, dim, index, *, sparse_grad=False, out=None) -> Tensor
-
- Gathers values along an axis specified by `dim`.
-
- For a 3-D tensor the output is specified by::
-
- out[i][j][k] = input[index[i][j][k]][j][k] # if dim == 0
- out[i][j][k] = input[i][index[i][j][k]][k] # if dim == 1
- out[i][j][k] = input[i][j][index[i][j][k]] # if dim == 2
-
- :attr:`input` and :attr:`index` must have the same number of dimensions.
- It is also required that ``index.size(d) <= input.size(d)`` for all
- dimensions ``d != dim``. :attr:`out` will have the same shape as :attr:`index`.
- Note that ``input`` and ``index`` do not broadcast against each other.
-
- Args:
- input (Tensor): the source tensor
- dim (int): the axis along which to index
- index (LongTensor): the indices of elements to gather
-
- Keyword arguments:
- sparse_grad (bool, optional): If ``True``, gradient w.r.t. :attr:`input` will be a sparse tensor.
- out (Tensor, optional): the destination tensor
-
- Example::
-
- >>> t = torch.tensor([[1, 2], [3, 4]])
- >>> torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]]))
- tensor([[ 1, 1],
- [ 4, 3]])
- """
- ...
- @overload
- def gather(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, *, sparse_grad: _bool = False, out: Optional[Tensor] = None) -> Tensor:
- r"""
- gather(input, dim, index, *, sparse_grad=False, out=None) -> Tensor
-
- Gathers values along an axis specified by `dim`.
-
- For a 3-D tensor the output is specified by::
-
- out[i][j][k] = input[index[i][j][k]][j][k] # if dim == 0
- out[i][j][k] = input[i][index[i][j][k]][k] # if dim == 1
- out[i][j][k] = input[i][j][index[i][j][k]] # if dim == 2
-
- :attr:`input` and :attr:`index` must have the same number of dimensions.
- It is also required that ``index.size(d) <= input.size(d)`` for all
- dimensions ``d != dim``. :attr:`out` will have the same shape as :attr:`index`.
- Note that ``input`` and ``index`` do not broadcast against each other.
-
- Args:
- input (Tensor): the source tensor
- dim (int): the axis along which to index
- index (LongTensor): the indices of elements to gather
-
- Keyword arguments:
- sparse_grad (bool, optional): If ``True``, gradient w.r.t. :attr:`input` will be a sparse tensor.
- out (Tensor, optional): the destination tensor
-
- Example::
-
- >>> t = torch.tensor([[1, 2], [3, 4]])
- >>> torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]]))
- tensor([[ 1, 1],
- [ 4, 3]])
- """
- ...
- def gcd(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- gcd(input, other, *, out=None) -> Tensor
-
- Computes the element-wise greatest common divisor (GCD) of :attr:`input` and :attr:`other`.
-
- Both :attr:`input` and :attr:`other` must have integer types.
-
- .. note::
- This defines :math:`gcd(0, 0) = 0`.
-
- Args:
- input (Tensor): the input tensor.
- other (Tensor): the second input tensor
-
- Keyword arguments:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.tensor([5, 10, 15])
- >>> b = torch.tensor([3, 4, 5])
- >>> torch.gcd(a, b)
- tensor([1, 2, 5])
- >>> c = torch.tensor([3])
- >>> torch.gcd(a, c)
- tensor([1, 1, 3])
- """
- ...
- def gcd_(input: Tensor, other: Tensor) -> Tensor: ...
- @overload
- def ge(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- ge(input, other, *, out=None) -> Tensor
-
- Computes :math:`\text{input} \geq \text{other}` element-wise.
-
-
- The second argument can be a number or a tensor whose shape is
- :ref:`broadcastable <broadcasting-semantics>` with the first argument.
-
- Args:
- input (Tensor): the tensor to compare
- other (Tensor or float): the tensor or value to compare
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Returns:
- A boolean tensor that is True where :attr:`input` is greater than or equal to :attr:`other` and False elsewhere
-
- Example::
-
- >>> torch.ge(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
- tensor([[True, True], [False, True]])
- """
- ...
- @overload
- def ge(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- ge(input, other, *, out=None) -> Tensor
-
- Computes :math:`\text{input} \geq \text{other}` element-wise.
-
-
- The second argument can be a number or a tensor whose shape is
- :ref:`broadcastable <broadcasting-semantics>` with the first argument.
-
- Args:
- input (Tensor): the tensor to compare
- other (Tensor or float): the tensor or value to compare
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Returns:
- A boolean tensor that is True where :attr:`input` is greater than or equal to :attr:`other` and False elsewhere
-
- Example::
-
- >>> torch.ge(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
- tensor([[True, True], [False, True]])
- """
- ...
- def geqrf(input: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.geqrf:
- r"""
- geqrf(input, *, out=None) -> (Tensor, Tensor)
-
- This is a low-level function for calling LAPACK's geqrf directly. This function
- returns a namedtuple (a, tau) as defined in `LAPACK documentation for geqrf`_ .
-
- Computes a QR decomposition of :attr:`input`.
- Both `Q` and `R` matrices are stored in the same output tensor `a`.
- The elements of `R` are stored on and above the diagonal.
- Elementary reflectors (or Householder vectors) implicitly defining matrix `Q`
- are stored below the diagonal.
- The results of this function can be used together with :func:`torch.linalg.householder_product`
- to obtain the `Q` matrix or
- with :func:`torch.ormqr`, which uses an implicit representation of the `Q` matrix,
- for an efficient matrix-matrix multiplication.
-
- See `LAPACK documentation for geqrf`_ for further details.
-
- .. note::
- See also :func:`torch.linalg.qr`, which computes Q and R matrices, and :func:`torch.linalg.lstsq`
- with the ``driver="gels"`` option for a function that can solve matrix equations using a QR decomposition.
-
- Args:
- input (Tensor): the input matrix
-
- Keyword args:
- out (tuple, optional): the output tuple of (Tensor, Tensor). Ignored if `None`. Default: `None`.
-
- .. _LAPACK documentation for geqrf:
- http://www.netlib.org/lapack/explore-html/df/dc5/group__variants_g_ecomputational_ga3766ea903391b5cf9008132f7440ec7b.html
- """
- ...
- def ger(input: Tensor, vec2: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- ger(input, vec2, *, out=None) -> Tensor
-
- Alias of :func:`torch.outer`.
-
- .. warning::
- This function is deprecated and will be removed in a future PyTorch release.
- Use :func:`torch.outer` instead.
- """
- ...
- def get_default_dtype() -> _dtype:
- r"""
- get_default_dtype() -> torch.dtype
-
- Get the current default floating point :class:`torch.dtype`.
-
- Example::
-
- >>> torch.get_default_dtype() # initial default for floating point is torch.float32
- torch.float32
- >>> torch.set_default_dtype(torch.float64)
- >>> torch.get_default_dtype() # default is now changed to torch.float64
- torch.float64
- """
- ...
- def get_num_interop_threads() -> _int:
- r"""
- get_num_interop_threads() -> int
-
- Returns the number of threads used for inter-op parallelism on CPU
- (e.g. in JIT interpreter)
- """
- ...
- def get_num_threads() -> _int:
- r"""
- get_num_threads() -> int
-
- Returns the number of threads used for parallelizing CPU operations
- """
- ...
- @overload
- def gradient(input: Tensor, *, spacing: Optional[Union[Number, _complex]] = None, dim: Optional[_int] = None, edge_order: _int = 1) -> Tuple[Tensor, ...]:
- r"""
- gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors
-
- Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in
- one or more dimensions using the `second-order accurate central differences method
- <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ and
- either first or second order estimates at the boundaries.
-
- The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not
- specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates
- to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional
- :attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and
- :math:`g(1, 2, 3)\ == input[1, 2, 3]`.
-
- When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates.
- This is detailed in the "Keyword Arguments" section below.
-
- The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is
- accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be
- improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative
- is estimated using `Taylor's theorem with remainder <https://en.wikipedia.org/wiki/Taylor%27s_theorem>`_.
- Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring
- it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using:
-
- .. math::
- \begin{aligned}
- f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\
- f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\
- \end{aligned}
-
- Using the fact that :math:`f \in C^3` and solving the linear system, we derive:
-
- .. math::
- f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l)
- + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} }
-
- .. note::
- We estimate the gradient of functions in complex domain
- :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way.
-
- The value of each partial derivative at the boundary points is computed differently. See edge_order below.
-
- Args:
- input (``Tensor``): the tensor that represents the values of the function
-
- Keyword args:
- spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify
- how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then
- the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the
- indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding
- indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9).
- Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for
- the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then
- the coordinates are (t0[1], t1[2], t2[3])
-
- dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default
- the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of
- the :attr:`spacing` argument must correspond with the specified dims."
-
- edge_order (``int``, optional): 1 or 2, for `first-order
- <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ or
- `second-order <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_
- estimation of the boundary ("edge") values, respectively.
-
- Examples::
-
- >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4]
- >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),)
- >>> values = torch.tensor([4., 1., 1., 16.], )
- >>> torch.gradient(values, spacing = coordinates)
- (tensor([-3., -2., 2., 5.]),)
-
- >>> # Estimates the gradient of the R^2 -> R function whose samples are
- >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost
- >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates
- >>> # partial derivative for both dimensions.
- >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]])
- >>> torch.gradient(t)
- (tensor([[ 9., 18., 36., 72.],
- [ 9., 18., 36., 72.]]),
- tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
- [10.0000, 15.0000, 30.0000, 40.0000]]))
-
- >>> # A scalar value for spacing modifies the relationship between tensor indices
- >>> # and input coordinates by multiplying the indices to find the
- >>> # coordinates. For example, below the indices of the innermost
- >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of
- >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2].
- >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1])
- (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
- [ 4.5000, 9.0000, 18.0000, 36.0000]]),
- tensor([[ 0.5000, 0.7500, 1.5000, 2.0000],
- [ 5.0000, 7.5000, 15.0000, 20.0000]]))
- >>> # doubling the spacing between samples halves the estimated partial gradients.
-
- >>>
- >>> # Estimates only the partial derivative for dimension 1
- >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.)
- (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
- [10.0000, 15.0000, 30.0000, 40.0000]]),)
-
- >>> # When spacing is a list of scalars, the relationship between the tensor
- >>> # indices and input coordinates changes based on dimension.
- >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate
- >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension
- >>> # 0, 1 translate to coordinates of [0, 2].
- >>> torch.gradient(t, spacing = [3., 2.])
- (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
- [ 4.5000, 9.0000, 18.0000, 36.0000]]),
- tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
- [ 3.3333, 5.0000, 10.0000, 13.3333]]))
-
- >>> # The following example is a replication of the previous one with explicit
- >>> # coordinates.
- >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9]))
- >>> torch.gradient(t, spacing = coords)
- (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
- [ 4.5000, 9.0000, 18.0000, 36.0000]]),
- tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
- [ 3.3333, 5.0000, 10.0000, 13.3333]]))
- """
- ...
- @overload
- def gradient(input: Tensor, *, spacing: Sequence[Union[Number, _complex]], dim: Optional[_int] = None, edge_order: _int = 1) -> Tuple[Tensor, ...]:
- r"""
- gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors
-
- Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in
- one or more dimensions using the `second-order accurate central differences method
- <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ and
- either first or second order estimates at the boundaries.
-
- The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not
- specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates
- to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional
- :attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and
- :math:`g(1, 2, 3)\ == input[1, 2, 3]`.
-
- When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates.
- This is detailed in the "Keyword Arguments" section below.
-
- The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is
- accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be
- improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative
- is estimated using `Taylor's theorem with remainder <https://en.wikipedia.org/wiki/Taylor%27s_theorem>`_.
- Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring
- it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using:
-
- .. math::
- \begin{aligned}
- f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\
- f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\
- \end{aligned}
-
- Using the fact that :math:`f \in C^3` and solving the linear system, we derive:
-
- .. math::
- f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l)
- + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} }
-
- .. note::
- We estimate the gradient of functions in complex domain
- :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way.
-
- The value of each partial derivative at the boundary points is computed differently. See edge_order below.
-
- Args:
- input (``Tensor``): the tensor that represents the values of the function
-
- Keyword args:
- spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify
- how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then
- the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the
- indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding
- indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9).
- Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for
- the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then
- the coordinates are (t0[1], t1[2], t2[3])
-
- dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default
- the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of
- the :attr:`spacing` argument must correspond with the specified dims."
-
- edge_order (``int``, optional): 1 or 2, for `first-order
- <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ or
- `second-order <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_
- estimation of the boundary ("edge") values, respectively.
-
- Examples::
-
- >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4]
- >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),)
- >>> values = torch.tensor([4., 1., 1., 16.], )
- >>> torch.gradient(values, spacing = coordinates)
- (tensor([-3., -2., 2., 5.]),)
-
- >>> # Estimates the gradient of the R^2 -> R function whose samples are
- >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost
- >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates
- >>> # partial derivative for both dimensions.
- >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]])
- >>> torch.gradient(t)
- (tensor([[ 9., 18., 36., 72.],
- [ 9., 18., 36., 72.]]),
- tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
- [10.0000, 15.0000, 30.0000, 40.0000]]))
-
- >>> # A scalar value for spacing modifies the relationship between tensor indices
- >>> # and input coordinates by multiplying the indices to find the
- >>> # coordinates. For example, below the indices of the innermost
- >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of
- >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2].
- >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1])
- (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
- [ 4.5000, 9.0000, 18.0000, 36.0000]]),
- tensor([[ 0.5000, 0.7500, 1.5000, 2.0000],
- [ 5.0000, 7.5000, 15.0000, 20.0000]]))
- >>> # doubling the spacing between samples halves the estimated partial gradients.
-
- >>>
- >>> # Estimates only the partial derivative for dimension 1
- >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.)
- (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
- [10.0000, 15.0000, 30.0000, 40.0000]]),)
-
- >>> # When spacing is a list of scalars, the relationship between the tensor
- >>> # indices and input coordinates changes based on dimension.
- >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate
- >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension
- >>> # 0, 1 translate to coordinates of [0, 2].
- >>> torch.gradient(t, spacing = [3., 2.])
- (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
- [ 4.5000, 9.0000, 18.0000, 36.0000]]),
- tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
- [ 3.3333, 5.0000, 10.0000, 13.3333]]))
-
- >>> # The following example is a replication of the previous one with explicit
- >>> # coordinates.
- >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9]))
- >>> torch.gradient(t, spacing = coords)
- (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
- [ 4.5000, 9.0000, 18.0000, 36.0000]]),
- tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
- [ 3.3333, 5.0000, 10.0000, 13.3333]]))
- """
- ...
- @overload
- def gradient(input: Tensor, *, spacing: Sequence[Union[Number, _complex]], dim: _size, edge_order: _int = 1) -> Tuple[Tensor, ...]:
- r"""
- gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors
-
- Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in
- one or more dimensions using the `second-order accurate central differences method
- <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ and
- either first or second order estimates at the boundaries.
-
- The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not
- specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates
- to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional
- :attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and
- :math:`g(1, 2, 3)\ == input[1, 2, 3]`.
-
- When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates.
- This is detailed in the "Keyword Arguments" section below.
-
- The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is
- accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be
- improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative
- is estimated using `Taylor's theorem with remainder <https://en.wikipedia.org/wiki/Taylor%27s_theorem>`_.
- Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring
- it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using:
-
- .. math::
- \begin{aligned}
- f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\
- f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\
- \end{aligned}
-
- Using the fact that :math:`f \in C^3` and solving the linear system, we derive:
-
- .. math::
- f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l)
- + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} }
-
- .. note::
- We estimate the gradient of functions in complex domain
- :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way.
-
- The value of each partial derivative at the boundary points is computed differently. See edge_order below.
-
- Args:
- input (``Tensor``): the tensor that represents the values of the function
-
- Keyword args:
- spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify
- how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then
- the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the
- indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding
- indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9).
- Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for
- the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then
- the coordinates are (t0[1], t1[2], t2[3])
-
- dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default
- the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of
- the :attr:`spacing` argument must correspond with the specified dims."
-
- edge_order (``int``, optional): 1 or 2, for `first-order
- <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ or
- `second-order <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_
- estimation of the boundary ("edge") values, respectively.
-
- Examples::
-
- >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4]
- >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),)
- >>> values = torch.tensor([4., 1., 1., 16.], )
- >>> torch.gradient(values, spacing = coordinates)
- (tensor([-3., -2., 2., 5.]),)
-
- >>> # Estimates the gradient of the R^2 -> R function whose samples are
- >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost
- >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates
- >>> # partial derivative for both dimensions.
- >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]])
- >>> torch.gradient(t)
- (tensor([[ 9., 18., 36., 72.],
- [ 9., 18., 36., 72.]]),
- tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
- [10.0000, 15.0000, 30.0000, 40.0000]]))
-
- >>> # A scalar value for spacing modifies the relationship between tensor indices
- >>> # and input coordinates by multiplying the indices to find the
- >>> # coordinates. For example, below the indices of the innermost
- >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of
- >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2].
- >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1])
- (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
- [ 4.5000, 9.0000, 18.0000, 36.0000]]),
- tensor([[ 0.5000, 0.7500, 1.5000, 2.0000],
- [ 5.0000, 7.5000, 15.0000, 20.0000]]))
- >>> # doubling the spacing between samples halves the estimated partial gradients.
-
- >>>
- >>> # Estimates only the partial derivative for dimension 1
- >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.)
- (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
- [10.0000, 15.0000, 30.0000, 40.0000]]),)
-
- >>> # When spacing is a list of scalars, the relationship between the tensor
- >>> # indices and input coordinates changes based on dimension.
- >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate
- >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension
- >>> # 0, 1 translate to coordinates of [0, 2].
- >>> torch.gradient(t, spacing = [3., 2.])
- (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
- [ 4.5000, 9.0000, 18.0000, 36.0000]]),
- tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
- [ 3.3333, 5.0000, 10.0000, 13.3333]]))
-
- >>> # The following example is a replication of the previous one with explicit
- >>> # coordinates.
- >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9]))
- >>> torch.gradient(t, spacing = coords)
- (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
- [ 4.5000, 9.0000, 18.0000, 36.0000]]),
- tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
- [ 3.3333, 5.0000, 10.0000, 13.3333]]))
- """
- ...
- @overload
- def gradient(input: Tensor, *, spacing: Union[Tuple[Tensor, ...], List[Tensor]], dim: Optional[_int] = None, edge_order: _int = 1) -> Tuple[Tensor, ...]:
- r"""
- gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors
-
- Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in
- one or more dimensions using the `second-order accurate central differences method
- <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ and
- either first or second order estimates at the boundaries.
-
- The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not
- specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates
- to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional
- :attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and
- :math:`g(1, 2, 3)\ == input[1, 2, 3]`.
-
- When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates.
- This is detailed in the "Keyword Arguments" section below.
-
- The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is
- accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be
- improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative
- is estimated using `Taylor's theorem with remainder <https://en.wikipedia.org/wiki/Taylor%27s_theorem>`_.
- Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring
- it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using:
-
- .. math::
- \begin{aligned}
- f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\
- f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\
- \end{aligned}
-
- Using the fact that :math:`f \in C^3` and solving the linear system, we derive:
-
- .. math::
- f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l)
- + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} }
-
- .. note::
- We estimate the gradient of functions in complex domain
- :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way.
-
- The value of each partial derivative at the boundary points is computed differently. See edge_order below.
-
- Args:
- input (``Tensor``): the tensor that represents the values of the function
-
- Keyword args:
- spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify
- how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then
- the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the
- indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding
- indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9).
- Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for
- the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then
- the coordinates are (t0[1], t1[2], t2[3])
-
- dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default
- the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of
- the :attr:`spacing` argument must correspond with the specified dims."
-
- edge_order (``int``, optional): 1 or 2, for `first-order
- <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ or
- `second-order <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_
- estimation of the boundary ("edge") values, respectively.
-
- Examples::
-
- >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4]
- >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),)
- >>> values = torch.tensor([4., 1., 1., 16.], )
- >>> torch.gradient(values, spacing = coordinates)
- (tensor([-3., -2., 2., 5.]),)
-
- >>> # Estimates the gradient of the R^2 -> R function whose samples are
- >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost
- >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates
- >>> # partial derivative for both dimensions.
- >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]])
- >>> torch.gradient(t)
- (tensor([[ 9., 18., 36., 72.],
- [ 9., 18., 36., 72.]]),
- tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
- [10.0000, 15.0000, 30.0000, 40.0000]]))
-
- >>> # A scalar value for spacing modifies the relationship between tensor indices
- >>> # and input coordinates by multiplying the indices to find the
- >>> # coordinates. For example, below the indices of the innermost
- >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of
- >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2].
- >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1])
- (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
- [ 4.5000, 9.0000, 18.0000, 36.0000]]),
- tensor([[ 0.5000, 0.7500, 1.5000, 2.0000],
- [ 5.0000, 7.5000, 15.0000, 20.0000]]))
- >>> # doubling the spacing between samples halves the estimated partial gradients.
-
- >>>
- >>> # Estimates only the partial derivative for dimension 1
- >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.)
- (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
- [10.0000, 15.0000, 30.0000, 40.0000]]),)
-
- >>> # When spacing is a list of scalars, the relationship between the tensor
- >>> # indices and input coordinates changes based on dimension.
- >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate
- >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension
- >>> # 0, 1 translate to coordinates of [0, 2].
- >>> torch.gradient(t, spacing = [3., 2.])
- (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
- [ 4.5000, 9.0000, 18.0000, 36.0000]]),
- tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
- [ 3.3333, 5.0000, 10.0000, 13.3333]]))
-
- >>> # The following example is a replication of the previous one with explicit
- >>> # coordinates.
- >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9]))
- >>> torch.gradient(t, spacing = coords)
- (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
- [ 4.5000, 9.0000, 18.0000, 36.0000]]),
- tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
- [ 3.3333, 5.0000, 10.0000, 13.3333]]))
- """
- ...
- @overload
- def gradient(input: Tensor, *, spacing: Union[Number, _complex], dim: _size, edge_order: _int = 1) -> Tuple[Tensor, ...]:
- r"""
- gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors
-
- Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in
- one or more dimensions using the `second-order accurate central differences method
- <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ and
- either first or second order estimates at the boundaries.
-
- The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not
- specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates
- to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional
- :attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and
- :math:`g(1, 2, 3)\ == input[1, 2, 3]`.
-
- When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates.
- This is detailed in the "Keyword Arguments" section below.
-
- The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is
- accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be
- improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative
- is estimated using `Taylor's theorem with remainder <https://en.wikipedia.org/wiki/Taylor%27s_theorem>`_.
- Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring
- it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using:
-
- .. math::
- \begin{aligned}
- f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\
- f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\
- \end{aligned}
-
- Using the fact that :math:`f \in C^3` and solving the linear system, we derive:
-
- .. math::
- f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l)
- + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} }
-
- .. note::
- We estimate the gradient of functions in complex domain
- :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way.
-
- The value of each partial derivative at the boundary points is computed differently. See edge_order below.
-
- Args:
- input (``Tensor``): the tensor that represents the values of the function
-
- Keyword args:
- spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify
- how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then
- the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the
- indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding
- indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9).
- Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for
- the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then
- the coordinates are (t0[1], t1[2], t2[3])
-
- dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default
- the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of
- the :attr:`spacing` argument must correspond with the specified dims."
-
- edge_order (``int``, optional): 1 or 2, for `first-order
- <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ or
- `second-order <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_
- estimation of the boundary ("edge") values, respectively.
-
- Examples::
-
- >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4]
- >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),)
- >>> values = torch.tensor([4., 1., 1., 16.], )
- >>> torch.gradient(values, spacing = coordinates)
- (tensor([-3., -2., 2., 5.]),)
-
- >>> # Estimates the gradient of the R^2 -> R function whose samples are
- >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost
- >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates
- >>> # partial derivative for both dimensions.
- >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]])
- >>> torch.gradient(t)
- (tensor([[ 9., 18., 36., 72.],
- [ 9., 18., 36., 72.]]),
- tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
- [10.0000, 15.0000, 30.0000, 40.0000]]))
-
- >>> # A scalar value for spacing modifies the relationship between tensor indices
- >>> # and input coordinates by multiplying the indices to find the
- >>> # coordinates. For example, below the indices of the innermost
- >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of
- >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2].
- >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1])
- (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
- [ 4.5000, 9.0000, 18.0000, 36.0000]]),
- tensor([[ 0.5000, 0.7500, 1.5000, 2.0000],
- [ 5.0000, 7.5000, 15.0000, 20.0000]]))
- >>> # doubling the spacing between samples halves the estimated partial gradients.
-
- >>>
- >>> # Estimates only the partial derivative for dimension 1
- >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.)
- (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
- [10.0000, 15.0000, 30.0000, 40.0000]]),)
-
- >>> # When spacing is a list of scalars, the relationship between the tensor
- >>> # indices and input coordinates changes based on dimension.
- >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate
- >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension
- >>> # 0, 1 translate to coordinates of [0, 2].
- >>> torch.gradient(t, spacing = [3., 2.])
- (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
- [ 4.5000, 9.0000, 18.0000, 36.0000]]),
- tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
- [ 3.3333, 5.0000, 10.0000, 13.3333]]))
-
- >>> # The following example is a replication of the previous one with explicit
- >>> # coordinates.
- >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9]))
- >>> torch.gradient(t, spacing = coords)
- (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
- [ 4.5000, 9.0000, 18.0000, 36.0000]]),
- tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
- [ 3.3333, 5.0000, 10.0000, 13.3333]]))
- """
- ...
- @overload
- def gradient(input: Tensor, *, spacing: Union[Tuple[Tensor, ...], List[Tensor]], dim: _size, edge_order: _int = 1) -> Tuple[Tensor, ...]:
- r"""
- gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors
-
- Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in
- one or more dimensions using the `second-order accurate central differences method
- <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ and
- either first or second order estimates at the boundaries.
-
- The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not
- specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates
- to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional
- :attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and
- :math:`g(1, 2, 3)\ == input[1, 2, 3]`.
-
- When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates.
- This is detailed in the "Keyword Arguments" section below.
-
- The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is
- accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be
- improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative
- is estimated using `Taylor's theorem with remainder <https://en.wikipedia.org/wiki/Taylor%27s_theorem>`_.
- Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring
- it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using:
-
- .. math::
- \begin{aligned}
- f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\
- f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\
- \end{aligned}
-
- Using the fact that :math:`f \in C^3` and solving the linear system, we derive:
-
- .. math::
- f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l)
- + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} }
-
- .. note::
- We estimate the gradient of functions in complex domain
- :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way.
-
- The value of each partial derivative at the boundary points is computed differently. See edge_order below.
-
- Args:
- input (``Tensor``): the tensor that represents the values of the function
-
- Keyword args:
- spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify
- how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then
- the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the
- indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding
- indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9).
- Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for
- the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then
- the coordinates are (t0[1], t1[2], t2[3])
-
- dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default
- the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of
- the :attr:`spacing` argument must correspond with the specified dims."
-
- edge_order (``int``, optional): 1 or 2, for `first-order
- <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ or
- `second-order <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_
- estimation of the boundary ("edge") values, respectively.
-
- Examples::
-
- >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4]
- >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),)
- >>> values = torch.tensor([4., 1., 1., 16.], )
- >>> torch.gradient(values, spacing = coordinates)
- (tensor([-3., -2., 2., 5.]),)
-
- >>> # Estimates the gradient of the R^2 -> R function whose samples are
- >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost
- >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates
- >>> # partial derivative for both dimensions.
- >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]])
- >>> torch.gradient(t)
- (tensor([[ 9., 18., 36., 72.],
- [ 9., 18., 36., 72.]]),
- tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
- [10.0000, 15.0000, 30.0000, 40.0000]]))
-
- >>> # A scalar value for spacing modifies the relationship between tensor indices
- >>> # and input coordinates by multiplying the indices to find the
- >>> # coordinates. For example, below the indices of the innermost
- >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of
- >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2].
- >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1])
- (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
- [ 4.5000, 9.0000, 18.0000, 36.0000]]),
- tensor([[ 0.5000, 0.7500, 1.5000, 2.0000],
- [ 5.0000, 7.5000, 15.0000, 20.0000]]))
- >>> # doubling the spacing between samples halves the estimated partial gradients.
-
- >>>
- >>> # Estimates only the partial derivative for dimension 1
- >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.)
- (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
- [10.0000, 15.0000, 30.0000, 40.0000]]),)
-
- >>> # When spacing is a list of scalars, the relationship between the tensor
- >>> # indices and input coordinates changes based on dimension.
- >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate
- >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension
- >>> # 0, 1 translate to coordinates of [0, 2].
- >>> torch.gradient(t, spacing = [3., 2.])
- (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
- [ 4.5000, 9.0000, 18.0000, 36.0000]]),
- tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
- [ 3.3333, 5.0000, 10.0000, 13.3333]]))
-
- >>> # The following example is a replication of the previous one with explicit
- >>> # coordinates.
- >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9]))
- >>> torch.gradient(t, spacing = coords)
- (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
- [ 4.5000, 9.0000, 18.0000, 36.0000]]),
- tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
- [ 3.3333, 5.0000, 10.0000, 13.3333]]))
- """
- ...
- @overload
- def gradient(input: Tensor, *, dim: _size, edge_order: _int = 1) -> Tuple[Tensor, ...]:
- r"""
- gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors
-
- Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in
- one or more dimensions using the `second-order accurate central differences method
- <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ and
- either first or second order estimates at the boundaries.
-
- The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not
- specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates
- to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional
- :attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and
- :math:`g(1, 2, 3)\ == input[1, 2, 3]`.
-
- When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates.
- This is detailed in the "Keyword Arguments" section below.
-
- The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is
- accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be
- improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative
- is estimated using `Taylor's theorem with remainder <https://en.wikipedia.org/wiki/Taylor%27s_theorem>`_.
- Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring
- it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using:
-
- .. math::
- \begin{aligned}
- f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\
- f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\
- \end{aligned}
-
- Using the fact that :math:`f \in C^3` and solving the linear system, we derive:
-
- .. math::
- f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l)
- + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} }
-
- .. note::
- We estimate the gradient of functions in complex domain
- :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way.
-
- The value of each partial derivative at the boundary points is computed differently. See edge_order below.
-
- Args:
- input (``Tensor``): the tensor that represents the values of the function
-
- Keyword args:
- spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify
- how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then
- the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the
- indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding
- indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9).
- Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for
- the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then
- the coordinates are (t0[1], t1[2], t2[3])
-
- dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default
- the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of
- the :attr:`spacing` argument must correspond with the specified dims."
-
- edge_order (``int``, optional): 1 or 2, for `first-order
- <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ or
- `second-order <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_
- estimation of the boundary ("edge") values, respectively.
-
- Examples::
-
- >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4]
- >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),)
- >>> values = torch.tensor([4., 1., 1., 16.], )
- >>> torch.gradient(values, spacing = coordinates)
- (tensor([-3., -2., 2., 5.]),)
-
- >>> # Estimates the gradient of the R^2 -> R function whose samples are
- >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost
- >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates
- >>> # partial derivative for both dimensions.
- >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]])
- >>> torch.gradient(t)
- (tensor([[ 9., 18., 36., 72.],
- [ 9., 18., 36., 72.]]),
- tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
- [10.0000, 15.0000, 30.0000, 40.0000]]))
-
- >>> # A scalar value for spacing modifies the relationship between tensor indices
- >>> # and input coordinates by multiplying the indices to find the
- >>> # coordinates. For example, below the indices of the innermost
- >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of
- >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2].
- >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1])
- (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
- [ 4.5000, 9.0000, 18.0000, 36.0000]]),
- tensor([[ 0.5000, 0.7500, 1.5000, 2.0000],
- [ 5.0000, 7.5000, 15.0000, 20.0000]]))
- >>> # doubling the spacing between samples halves the estimated partial gradients.
-
- >>>
- >>> # Estimates only the partial derivative for dimension 1
- >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.)
- (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
- [10.0000, 15.0000, 30.0000, 40.0000]]),)
-
- >>> # When spacing is a list of scalars, the relationship between the tensor
- >>> # indices and input coordinates changes based on dimension.
- >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate
- >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension
- >>> # 0, 1 translate to coordinates of [0, 2].
- >>> torch.gradient(t, spacing = [3., 2.])
- (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
- [ 4.5000, 9.0000, 18.0000, 36.0000]]),
- tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
- [ 3.3333, 5.0000, 10.0000, 13.3333]]))
-
- >>> # The following example is a replication of the previous one with explicit
- >>> # coordinates.
- >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9]))
- >>> torch.gradient(t, spacing = coords)
- (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
- [ 4.5000, 9.0000, 18.0000, 36.0000]]),
- tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
- [ 3.3333, 5.0000, 10.0000, 13.3333]]))
- """
- ...
- @overload
- def greater(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- greater(input, other, *, out=None) -> Tensor
-
- Alias for :func:`torch.gt`.
- """
- ...
- @overload
- def greater(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- greater(input, other, *, out=None) -> Tensor
-
- Alias for :func:`torch.gt`.
- """
- ...
- @overload
- def greater_equal(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- greater_equal(input, other, *, out=None) -> Tensor
-
- Alias for :func:`torch.ge`.
- """
- ...
- @overload
- def greater_equal(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- greater_equal(input, other, *, out=None) -> Tensor
-
- Alias for :func:`torch.ge`.
- """
- ...
- def grid_sampler(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ...
- def grid_sampler_2d(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ...
- def grid_sampler_3d(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ...
- def group_norm(input: Tensor, num_groups: _int, weight: Optional[Tensor] = None, bias: Optional[Tensor] = None, eps: _float = 1e-05, cudnn_enabled: _bool = True) -> Tensor: ...
- @overload
- def gru(data: Tensor, batch_sizes: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor]: ...
- @overload
- def gru(input: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor]: ...
- def gru_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor] = None, b_hh: Optional[Tensor] = None) -> Tensor: ...
- @overload
- def gt(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- gt(input, other, *, out=None) -> Tensor
-
- Computes :math:`\text{input} > \text{other}` element-wise.
-
-
- The second argument can be a number or a tensor whose shape is
- :ref:`broadcastable <broadcasting-semantics>` with the first argument.
-
- Args:
- input (Tensor): the tensor to compare
- other (Tensor or float): the tensor or value to compare
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Returns:
- A boolean tensor that is True where :attr:`input` is greater than :attr:`other` and False elsewhere
-
- Example::
-
- >>> torch.gt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
- tensor([[False, True], [False, False]])
- """
- ...
- @overload
- def gt(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- gt(input, other, *, out=None) -> Tensor
-
- Computes :math:`\text{input} > \text{other}` element-wise.
-
-
- The second argument can be a number or a tensor whose shape is
- :ref:`broadcastable <broadcasting-semantics>` with the first argument.
-
- Args:
- input (Tensor): the tensor to compare
- other (Tensor or float): the tensor or value to compare
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Returns:
- A boolean tensor that is True where :attr:`input` is greater than :attr:`other` and False elsewhere
-
- Example::
-
- >>> torch.gt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
- tensor([[False, True], [False, False]])
- """
- ...
- @overload
- def hamming_window(window_length: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Hamming window function.
-
- .. math::
- w[n] = \alpha - \beta\ \cos \left( \frac{2 \pi n}{N - 1} \right),
-
- where :math:`N` is the full window size.
-
- The input :attr:`window_length` is a positive integer controlling the
- returned window size. :attr:`periodic` flag determines whether the returned
- window trims off the last duplicate value from the symmetric window and is
- ready to be used as a periodic window with functions like
- :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
- above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
- ``torch.hamming_window(L, periodic=True)`` equal to
- ``torch.hamming_window(L + 1, periodic=False)[:-1])``.
-
- .. note::
- If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
-
- .. note::
- This is a generalized version of :meth:`torch.hann_window`.
-
- Arguments:
- window_length (int): the size of returned window
- periodic (bool, optional): If True, returns a window to be used as periodic
- function. If False, return a symmetric window.
- alpha (float, optional): The coefficient :math:`\alpha` in the equation above
- beta (float, optional): The coefficient :math:`\beta` in the equation above
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported.
- layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
- ``torch.strided`` (dense layout) is supported.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Returns:
- Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window.
- """
- ...
- @overload
- def hamming_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Hamming window function.
-
- .. math::
- w[n] = \alpha - \beta\ \cos \left( \frac{2 \pi n}{N - 1} \right),
-
- where :math:`N` is the full window size.
-
- The input :attr:`window_length` is a positive integer controlling the
- returned window size. :attr:`periodic` flag determines whether the returned
- window trims off the last duplicate value from the symmetric window and is
- ready to be used as a periodic window with functions like
- :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
- above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
- ``torch.hamming_window(L, periodic=True)`` equal to
- ``torch.hamming_window(L + 1, periodic=False)[:-1])``.
-
- .. note::
- If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
-
- .. note::
- This is a generalized version of :meth:`torch.hann_window`.
-
- Arguments:
- window_length (int): the size of returned window
- periodic (bool, optional): If True, returns a window to be used as periodic
- function. If False, return a symmetric window.
- alpha (float, optional): The coefficient :math:`\alpha` in the equation above
- beta (float, optional): The coefficient :math:`\beta` in the equation above
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported.
- layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
- ``torch.strided`` (dense layout) is supported.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Returns:
- Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window.
- """
- ...
- @overload
- def hamming_window(window_length: _int, periodic: _bool, alpha: _float, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Hamming window function.
-
- .. math::
- w[n] = \alpha - \beta\ \cos \left( \frac{2 \pi n}{N - 1} \right),
-
- where :math:`N` is the full window size.
-
- The input :attr:`window_length` is a positive integer controlling the
- returned window size. :attr:`periodic` flag determines whether the returned
- window trims off the last duplicate value from the symmetric window and is
- ready to be used as a periodic window with functions like
- :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
- above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
- ``torch.hamming_window(L, periodic=True)`` equal to
- ``torch.hamming_window(L + 1, periodic=False)[:-1])``.
-
- .. note::
- If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
-
- .. note::
- This is a generalized version of :meth:`torch.hann_window`.
-
- Arguments:
- window_length (int): the size of returned window
- periodic (bool, optional): If True, returns a window to be used as periodic
- function. If False, return a symmetric window.
- alpha (float, optional): The coefficient :math:`\alpha` in the equation above
- beta (float, optional): The coefficient :math:`\beta` in the equation above
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported.
- layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
- ``torch.strided`` (dense layout) is supported.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Returns:
- Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window.
- """
- ...
- @overload
- def hamming_window(window_length: _int, periodic: _bool, alpha: _float, beta: _float, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Hamming window function.
-
- .. math::
- w[n] = \alpha - \beta\ \cos \left( \frac{2 \pi n}{N - 1} \right),
-
- where :math:`N` is the full window size.
-
- The input :attr:`window_length` is a positive integer controlling the
- returned window size. :attr:`periodic` flag determines whether the returned
- window trims off the last duplicate value from the symmetric window and is
- ready to be used as a periodic window with functions like
- :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
- above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
- ``torch.hamming_window(L, periodic=True)`` equal to
- ``torch.hamming_window(L + 1, periodic=False)[:-1])``.
-
- .. note::
- If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
-
- .. note::
- This is a generalized version of :meth:`torch.hann_window`.
-
- Arguments:
- window_length (int): the size of returned window
- periodic (bool, optional): If True, returns a window to be used as periodic
- function. If False, return a symmetric window.
- alpha (float, optional): The coefficient :math:`\alpha` in the equation above
- beta (float, optional): The coefficient :math:`\beta` in the equation above
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported.
- layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
- ``torch.strided`` (dense layout) is supported.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Returns:
- Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window.
- """
- ...
- @overload
- def hann_window(window_length: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- hann_window(window_length, periodic=True, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Hann window function.
-
- .. math::
- w[n] = \frac{1}{2}\ \left[1 - \cos \left( \frac{2 \pi n}{N - 1} \right)\right] =
- \sin^2 \left( \frac{\pi n}{N - 1} \right),
-
- where :math:`N` is the full window size.
-
- The input :attr:`window_length` is a positive integer controlling the
- returned window size. :attr:`periodic` flag determines whether the returned
- window trims off the last duplicate value from the symmetric window and is
- ready to be used as a periodic window with functions like
- :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
- above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
- ``torch.hann_window(L, periodic=True)`` equal to
- ``torch.hann_window(L + 1, periodic=False)[:-1])``.
-
- .. note::
- If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
-
- Arguments:
- window_length (int): the size of returned window
- periodic (bool, optional): If True, returns a window to be used as periodic
- function. If False, return a symmetric window.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported.
- layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
- ``torch.strided`` (dense layout) is supported.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Returns:
- Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window
- """
- ...
- @overload
- def hann_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- hann_window(window_length, periodic=True, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Hann window function.
-
- .. math::
- w[n] = \frac{1}{2}\ \left[1 - \cos \left( \frac{2 \pi n}{N - 1} \right)\right] =
- \sin^2 \left( \frac{\pi n}{N - 1} \right),
-
- where :math:`N` is the full window size.
-
- The input :attr:`window_length` is a positive integer controlling the
- returned window size. :attr:`periodic` flag determines whether the returned
- window trims off the last duplicate value from the symmetric window and is
- ready to be used as a periodic window with functions like
- :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
- above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
- ``torch.hann_window(L, periodic=True)`` equal to
- ``torch.hann_window(L + 1, periodic=False)[:-1])``.
-
- .. note::
- If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
-
- Arguments:
- window_length (int): the size of returned window
- periodic (bool, optional): If True, returns a window to be used as periodic
- function. If False, return a symmetric window.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported.
- layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
- ``torch.strided`` (dense layout) is supported.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Returns:
- Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window
- """
- ...
- def hardshrink(input: Tensor, lambd: Union[Number, _complex] = 0.5, *, out: Optional[Tensor] = None) -> Tensor: ...
- def heaviside(input: Tensor, values: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- heaviside(input, values, *, out=None) -> Tensor
-
- Computes the Heaviside step function for each element in :attr:`input`.
- The Heaviside step function is defined as:
-
- .. math::
- \text{{heaviside}}(input, values) = \begin{cases}
- 0, & \text{if input < 0}\\
- values, & \text{if input == 0}\\
- 1, & \text{if input > 0}
- \end{cases}
-
-
- Args:
- input (Tensor): the input tensor.
- values (Tensor): The values to use where :attr:`input` is zero.
-
- Keyword arguments:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> input = torch.tensor([-1.5, 0, 2.0])
- >>> values = torch.tensor([0.5])
- >>> torch.heaviside(input, values)
- tensor([0.0000, 0.5000, 1.0000])
- >>> values = torch.tensor([1.2, -2.0, 3.5])
- >>> torch.heaviside(input, values)
- tensor([0., -2., 1.])
- """
- ...
- def hinge_embedding_loss(input: Tensor, target: Tensor, margin: _float = 1.0, reduction: _int = 1) -> Tensor: ...
- def histc(input: Tensor, bins: _int = 100, min: Union[Number, _complex] = 0, max: Union[Number, _complex] = 0, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- histc(input, bins=100, min=0, max=0, *, out=None) -> Tensor
-
- Computes the histogram of a tensor.
-
- The elements are sorted into equal width bins between :attr:`min` and
- :attr:`max`. If :attr:`min` and :attr:`max` are both zero, the minimum and
- maximum values of the data are used.
-
- Elements lower than min and higher than max and ``NaN`` elements are ignored.
-
- Args:
- input (Tensor): the input tensor.
- bins (int): number of histogram bins
- min (Scalar): lower end of the range (inclusive)
- max (Scalar): upper end of the range (inclusive)
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Returns:
- Tensor: Histogram represented as a tensor
-
- Example::
-
- >>> torch.histc(torch.tensor([1., 2, 1]), bins=4, min=0, max=3)
- tensor([ 0., 2., 1., 0.])
- """
- ...
- @overload
- def histogram(input: Tensor, bins: Tensor, *, weight: Optional[Tensor] = None, density: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.histogram:
- r"""
- histogram(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor)
-
- Computes a histogram of the values in a tensor.
-
- :attr:`bins` can be an integer or a 1D tensor.
-
- If :attr:`bins` is an int, it specifies the number of equal-width bins.
- By default, the lower and upper range of the bins is determined by the
- minimum and maximum elements of the input tensor. The :attr:`range`
- argument can be provided to specify a range for the bins.
-
- If :attr:`bins` is a 1D tensor, it specifies the sequence of bin edges
- including the rightmost edge. It should contain at least 2 elements
- and its elements should be increasing.
-
- Args:
- input (Tensor): the input tensor.
- bins: int or 1D Tensor. If int, defines the number of equal-width bins. If tensor,
- defines the sequence of bin edges including the rightmost edge.
-
- Keyword args:
- range (tuple of float): Defines the range of the bins.
- weight (Tensor): If provided, weight should have the same shape as input. Each value in
- input contributes its associated weight towards its bin's result.
- density (bool): If False, the result will contain the count (or total weight) in each bin.
- If True, the result is the value of the probability density function over the bins,
- normalized such that the integral over the range of the bins is 1.
- out (Tensor, optional): the output tensor. (tuple, optional): The result tuple of two output tensors (hist, bin_edges).
-
- Returns:
- hist (Tensor): 1D Tensor containing the values of the histogram.
- bin_edges(Tensor): 1D Tensor containing the edges of the histogram bins.
-
- Example::
-
- >>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.]))
- (tensor([ 0., 5., 2., 0.]), tensor([0., 0.75, 1.5, 2.25, 3.]))
- >>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.]), density=True)
- (tensor([ 0., 0.9524, 0.3810, 0.]), tensor([0., 0.75, 1.5, 2.25, 3.]))
- """
- ...
- @overload
- def histogram(input: Tensor, bins: _int = 100, *, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.histogram:
- r"""
- histogram(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor)
-
- Computes a histogram of the values in a tensor.
-
- :attr:`bins` can be an integer or a 1D tensor.
-
- If :attr:`bins` is an int, it specifies the number of equal-width bins.
- By default, the lower and upper range of the bins is determined by the
- minimum and maximum elements of the input tensor. The :attr:`range`
- argument can be provided to specify a range for the bins.
-
- If :attr:`bins` is a 1D tensor, it specifies the sequence of bin edges
- including the rightmost edge. It should contain at least 2 elements
- and its elements should be increasing.
-
- Args:
- input (Tensor): the input tensor.
- bins: int or 1D Tensor. If int, defines the number of equal-width bins. If tensor,
- defines the sequence of bin edges including the rightmost edge.
-
- Keyword args:
- range (tuple of float): Defines the range of the bins.
- weight (Tensor): If provided, weight should have the same shape as input. Each value in
- input contributes its associated weight towards its bin's result.
- density (bool): If False, the result will contain the count (or total weight) in each bin.
- If True, the result is the value of the probability density function over the bins,
- normalized such that the integral over the range of the bins is 1.
- out (Tensor, optional): the output tensor. (tuple, optional): The result tuple of two output tensors (hist, bin_edges).
-
- Returns:
- hist (Tensor): 1D Tensor containing the values of the histogram.
- bin_edges(Tensor): 1D Tensor containing the edges of the histogram bins.
-
- Example::
-
- >>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.]))
- (tensor([ 0., 5., 2., 0.]), tensor([0., 0.75, 1.5, 2.25, 3.]))
- >>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.]), density=True)
- (tensor([ 0., 0.9524, 0.3810, 0.]), tensor([0., 0.75, 1.5, 2.25, 3.]))
- """
- ...
- @overload
- def histogramdd(input: Tensor, bins: _int, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> torch.return_types.histogramdd:
- r"""
- histogramdd(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor[])
-
- Computes a multi-dimensional histogram of the values in a tensor.
-
- Interprets the elements of an input tensor whose innermost dimension has size N
- as a collection of N-dimensional points. Maps each of the points into a set of
- N-dimensional bins and returns the number of points (or total weight) in each bin.
-
- :attr:`input` must be a tensor with at least 2 dimensions.
- If input has shape (M, N), each of its M rows defines a point in N-dimensional space.
- If input has three or more dimensions, all but the last dimension are flattened.
-
- Each dimension is independently associated with its own strictly increasing sequence
- of bin edges. Bin edges may be specified explicitly by passing a sequence of 1D
- tensors. Alternatively, bin edges may be constructed automatically by passing a
- sequence of integers specifying the number of equal-width bins in each dimension.
-
- For each N-dimensional point in input:
- - Each of its coordinates is binned independently among the bin edges
- corresponding to its dimension
- - Binning results are combined to identify the N-dimensional bin (if any)
- into which the point falls
- - If the point falls into a bin, the bin's count (or total weight) is incremented
- - Points which do not fall into any bin do not contribute to the output
-
- :attr:`bins` can be a sequence of N 1D tensors, a sequence of N ints, or a single int.
-
- If :attr:`bins` is a sequence of N 1D tensors, it explicitly specifies the N sequences
- of bin edges. Each 1D tensor should contain a strictly increasing sequence with at
- least one element. A sequence of K bin edges defines K-1 bins, explicitly specifying
- the left and right edges of all bins. Every bin is exclusive of its left edge. Only
- the rightmost bin is inclusive of its right edge.
-
- If :attr:`bins` is a sequence of N ints, it specifies the number of equal-width bins
- in each dimension. By default, the leftmost and rightmost bin edges in each dimension
- are determined by the minimum and maximum elements of the input tensor in the
- corresponding dimension. The :attr:`range` argument can be provided to manually
- specify the leftmost and rightmost bin edges in each dimension.
-
- If :attr:`bins` is an int, it specifies the number of equal-width bins for all dimensions.
-
- .. note::
- See also :func:`torch.histogram`, which specifically computes 1D histograms.
- While :func:`torch.histogramdd` infers the dimensionality of its bins and
- binned values from the shape of :attr:`input`, :func:`torch.histogram`
- accepts and flattens :attr:`input` of any shape.
-
- Args:
- input (Tensor): the input tensor.
- bins: Tensor[], int[], or int.
- If Tensor[], defines the sequences of bin edges.
- If int[], defines the number of equal-width bins in each dimension.
- If int, defines the number of equal-width bins for all dimensions.
- Keyword args:
- range (sequence of float): Defines the leftmost and rightmost bin edges
- in each dimension.
- weight (Tensor): By default, each value in the input has weight 1. If a weight
- tensor is passed, each N-dimensional coordinate in input
- contributes its associated weight towards its bin's result.
- The weight tensor should have the same shape as the :attr:`input`
- tensor excluding its innermost dimension N.
- density (bool): If False (default), the result will contain the count (or total weight)
- in each bin. If True, each count (weight) is divided by the total count
- (total weight), then divided by the volume of its associated bin.
- Returns:
- hist (Tensor): N-dimensional Tensor containing the values of the histogram.
- bin_edges(Tensor[]): sequence of N 1D Tensors containing the bin edges.
-
- Example::
- >>> torch.histogramdd(torch.tensor([[0., 1.], [1., 0.], [2., 0.], [2., 2.]]), bins=[3, 3],
- ... weight=torch.tensor([1., 2., 4., 8.]))
- torch.return_types.histogramdd(
- hist=tensor([[0., 1., 0.],
- [2., 0., 0.],
- [4., 0., 8.]]),
- bin_edges=(tensor([0.0000, 0.6667, 1.3333, 2.0000]),
- tensor([0.0000, 0.6667, 1.3333, 2.0000])))
-
- >>> torch.histogramdd(torch.tensor([[0., 0.], [1., 1.], [2., 2.]]), bins=[2, 2],
- ... range=[0., 1., 0., 1.], density=True)
- torch.return_types.histogramdd(
- hist=tensor([[2., 0.],
- [0., 2.]]),
- bin_edges=(tensor([0.0000, 0.5000, 1.0000]),
- tensor([0.0000, 0.5000, 1.0000])))
- """
- ...
- @overload
- def histogramdd(input: Tensor, bins: _size, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> torch.return_types.histogramdd:
- r"""
- histogramdd(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor[])
-
- Computes a multi-dimensional histogram of the values in a tensor.
-
- Interprets the elements of an input tensor whose innermost dimension has size N
- as a collection of N-dimensional points. Maps each of the points into a set of
- N-dimensional bins and returns the number of points (or total weight) in each bin.
-
- :attr:`input` must be a tensor with at least 2 dimensions.
- If input has shape (M, N), each of its M rows defines a point in N-dimensional space.
- If input has three or more dimensions, all but the last dimension are flattened.
-
- Each dimension is independently associated with its own strictly increasing sequence
- of bin edges. Bin edges may be specified explicitly by passing a sequence of 1D
- tensors. Alternatively, bin edges may be constructed automatically by passing a
- sequence of integers specifying the number of equal-width bins in each dimension.
-
- For each N-dimensional point in input:
- - Each of its coordinates is binned independently among the bin edges
- corresponding to its dimension
- - Binning results are combined to identify the N-dimensional bin (if any)
- into which the point falls
- - If the point falls into a bin, the bin's count (or total weight) is incremented
- - Points which do not fall into any bin do not contribute to the output
-
- :attr:`bins` can be a sequence of N 1D tensors, a sequence of N ints, or a single int.
-
- If :attr:`bins` is a sequence of N 1D tensors, it explicitly specifies the N sequences
- of bin edges. Each 1D tensor should contain a strictly increasing sequence with at
- least one element. A sequence of K bin edges defines K-1 bins, explicitly specifying
- the left and right edges of all bins. Every bin is exclusive of its left edge. Only
- the rightmost bin is inclusive of its right edge.
-
- If :attr:`bins` is a sequence of N ints, it specifies the number of equal-width bins
- in each dimension. By default, the leftmost and rightmost bin edges in each dimension
- are determined by the minimum and maximum elements of the input tensor in the
- corresponding dimension. The :attr:`range` argument can be provided to manually
- specify the leftmost and rightmost bin edges in each dimension.
-
- If :attr:`bins` is an int, it specifies the number of equal-width bins for all dimensions.
-
- .. note::
- See also :func:`torch.histogram`, which specifically computes 1D histograms.
- While :func:`torch.histogramdd` infers the dimensionality of its bins and
- binned values from the shape of :attr:`input`, :func:`torch.histogram`
- accepts and flattens :attr:`input` of any shape.
-
- Args:
- input (Tensor): the input tensor.
- bins: Tensor[], int[], or int.
- If Tensor[], defines the sequences of bin edges.
- If int[], defines the number of equal-width bins in each dimension.
- If int, defines the number of equal-width bins for all dimensions.
- Keyword args:
- range (sequence of float): Defines the leftmost and rightmost bin edges
- in each dimension.
- weight (Tensor): By default, each value in the input has weight 1. If a weight
- tensor is passed, each N-dimensional coordinate in input
- contributes its associated weight towards its bin's result.
- The weight tensor should have the same shape as the :attr:`input`
- tensor excluding its innermost dimension N.
- density (bool): If False (default), the result will contain the count (or total weight)
- in each bin. If True, each count (weight) is divided by the total count
- (total weight), then divided by the volume of its associated bin.
- Returns:
- hist (Tensor): N-dimensional Tensor containing the values of the histogram.
- bin_edges(Tensor[]): sequence of N 1D Tensors containing the bin edges.
-
- Example::
- >>> torch.histogramdd(torch.tensor([[0., 1.], [1., 0.], [2., 0.], [2., 2.]]), bins=[3, 3],
- ... weight=torch.tensor([1., 2., 4., 8.]))
- torch.return_types.histogramdd(
- hist=tensor([[0., 1., 0.],
- [2., 0., 0.],
- [4., 0., 8.]]),
- bin_edges=(tensor([0.0000, 0.6667, 1.3333, 2.0000]),
- tensor([0.0000, 0.6667, 1.3333, 2.0000])))
-
- >>> torch.histogramdd(torch.tensor([[0., 0.], [1., 1.], [2., 2.]]), bins=[2, 2],
- ... range=[0., 1., 0., 1.], density=True)
- torch.return_types.histogramdd(
- hist=tensor([[2., 0.],
- [0., 2.]]),
- bin_edges=(tensor([0.0000, 0.5000, 1.0000]),
- tensor([0.0000, 0.5000, 1.0000])))
- """
- ...
- @overload
- def histogramdd(input: Tensor, bins: Union[Tuple[Tensor, ...], List[Tensor]], range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> torch.return_types.histogramdd:
- r"""
- histogramdd(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor[])
-
- Computes a multi-dimensional histogram of the values in a tensor.
-
- Interprets the elements of an input tensor whose innermost dimension has size N
- as a collection of N-dimensional points. Maps each of the points into a set of
- N-dimensional bins and returns the number of points (or total weight) in each bin.
-
- :attr:`input` must be a tensor with at least 2 dimensions.
- If input has shape (M, N), each of its M rows defines a point in N-dimensional space.
- If input has three or more dimensions, all but the last dimension are flattened.
-
- Each dimension is independently associated with its own strictly increasing sequence
- of bin edges. Bin edges may be specified explicitly by passing a sequence of 1D
- tensors. Alternatively, bin edges may be constructed automatically by passing a
- sequence of integers specifying the number of equal-width bins in each dimension.
-
- For each N-dimensional point in input:
- - Each of its coordinates is binned independently among the bin edges
- corresponding to its dimension
- - Binning results are combined to identify the N-dimensional bin (if any)
- into which the point falls
- - If the point falls into a bin, the bin's count (or total weight) is incremented
- - Points which do not fall into any bin do not contribute to the output
-
- :attr:`bins` can be a sequence of N 1D tensors, a sequence of N ints, or a single int.
-
- If :attr:`bins` is a sequence of N 1D tensors, it explicitly specifies the N sequences
- of bin edges. Each 1D tensor should contain a strictly increasing sequence with at
- least one element. A sequence of K bin edges defines K-1 bins, explicitly specifying
- the left and right edges of all bins. Every bin is exclusive of its left edge. Only
- the rightmost bin is inclusive of its right edge.
-
- If :attr:`bins` is a sequence of N ints, it specifies the number of equal-width bins
- in each dimension. By default, the leftmost and rightmost bin edges in each dimension
- are determined by the minimum and maximum elements of the input tensor in the
- corresponding dimension. The :attr:`range` argument can be provided to manually
- specify the leftmost and rightmost bin edges in each dimension.
-
- If :attr:`bins` is an int, it specifies the number of equal-width bins for all dimensions.
-
- .. note::
- See also :func:`torch.histogram`, which specifically computes 1D histograms.
- While :func:`torch.histogramdd` infers the dimensionality of its bins and
- binned values from the shape of :attr:`input`, :func:`torch.histogram`
- accepts and flattens :attr:`input` of any shape.
-
- Args:
- input (Tensor): the input tensor.
- bins: Tensor[], int[], or int.
- If Tensor[], defines the sequences of bin edges.
- If int[], defines the number of equal-width bins in each dimension.
- If int, defines the number of equal-width bins for all dimensions.
- Keyword args:
- range (sequence of float): Defines the leftmost and rightmost bin edges
- in each dimension.
- weight (Tensor): By default, each value in the input has weight 1. If a weight
- tensor is passed, each N-dimensional coordinate in input
- contributes its associated weight towards its bin's result.
- The weight tensor should have the same shape as the :attr:`input`
- tensor excluding its innermost dimension N.
- density (bool): If False (default), the result will contain the count (or total weight)
- in each bin. If True, each count (weight) is divided by the total count
- (total weight), then divided by the volume of its associated bin.
- Returns:
- hist (Tensor): N-dimensional Tensor containing the values of the histogram.
- bin_edges(Tensor[]): sequence of N 1D Tensors containing the bin edges.
-
- Example::
- >>> torch.histogramdd(torch.tensor([[0., 1.], [1., 0.], [2., 0.], [2., 2.]]), bins=[3, 3],
- ... weight=torch.tensor([1., 2., 4., 8.]))
- torch.return_types.histogramdd(
- hist=tensor([[0., 1., 0.],
- [2., 0., 0.],
- [4., 0., 8.]]),
- bin_edges=(tensor([0.0000, 0.6667, 1.3333, 2.0000]),
- tensor([0.0000, 0.6667, 1.3333, 2.0000])))
-
- >>> torch.histogramdd(torch.tensor([[0., 0.], [1., 1.], [2., 2.]]), bins=[2, 2],
- ... range=[0., 1., 0., 1.], density=True)
- torch.return_types.histogramdd(
- hist=tensor([[2., 0.],
- [0., 2.]]),
- bin_edges=(tensor([0.0000, 0.5000, 1.0000]),
- tensor([0.0000, 0.5000, 1.0000])))
- """
- ...
- def hsmm(input: Tensor, mat2: Tensor) -> Tensor: ...
- @overload
- def hsplit(input: Tensor, sections: _int) -> Tuple[Tensor, ...]:
- r"""
- hsplit(input, indices_or_sections) -> List of Tensors
-
- Splits :attr:`input`, a tensor with one or more dimensions, into multiple tensors
- horizontally according to :attr:`indices_or_sections`. Each split is a view of
- :attr:`input`.
-
- If :attr:`input` is one dimensional this is equivalent to calling
- torch.tensor_split(input, indices_or_sections, dim=0) (the split dimension is
- zero), and if :attr:`input` has two or more dimensions it's equivalent to calling
- torch.tensor_split(input, indices_or_sections, dim=1) (the split dimension is 1),
- except that if :attr:`indices_or_sections` is an integer it must evenly divide
- the split dimension or a runtime error will be thrown.
-
- This function is based on NumPy's :func:`numpy.hsplit`.
-
- Args:
- input (Tensor): tensor to split.
- indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
-
- Example::
- >>> t = torch.arange(16.0).reshape(4,4)
- >>> t
- tensor([[ 0., 1., 2., 3.],
- [ 4., 5., 6., 7.],
- [ 8., 9., 10., 11.],
- [12., 13., 14., 15.]])
- >>> torch.hsplit(t, 2)
- (tensor([[ 0., 1.],
- [ 4., 5.],
- [ 8., 9.],
- [12., 13.]]),
- tensor([[ 2., 3.],
- [ 6., 7.],
- [10., 11.],
- [14., 15.]]))
- >>> torch.hsplit(t, [3, 6])
- (tensor([[ 0., 1., 2.],
- [ 4., 5., 6.],
- [ 8., 9., 10.],
- [12., 13., 14.]]),
- tensor([[ 3.],
- [ 7.],
- [11.],
- [15.]]),
- tensor([], size=(4, 0)))
- """
- ...
- @overload
- def hsplit(input: Tensor, indices: _size) -> Tuple[Tensor, ...]:
- r"""
- hsplit(input, indices_or_sections) -> List of Tensors
-
- Splits :attr:`input`, a tensor with one or more dimensions, into multiple tensors
- horizontally according to :attr:`indices_or_sections`. Each split is a view of
- :attr:`input`.
-
- If :attr:`input` is one dimensional this is equivalent to calling
- torch.tensor_split(input, indices_or_sections, dim=0) (the split dimension is
- zero), and if :attr:`input` has two or more dimensions it's equivalent to calling
- torch.tensor_split(input, indices_or_sections, dim=1) (the split dimension is 1),
- except that if :attr:`indices_or_sections` is an integer it must evenly divide
- the split dimension or a runtime error will be thrown.
-
- This function is based on NumPy's :func:`numpy.hsplit`.
-
- Args:
- input (Tensor): tensor to split.
- indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
-
- Example::
- >>> t = torch.arange(16.0).reshape(4,4)
- >>> t
- tensor([[ 0., 1., 2., 3.],
- [ 4., 5., 6., 7.],
- [ 8., 9., 10., 11.],
- [12., 13., 14., 15.]])
- >>> torch.hsplit(t, 2)
- (tensor([[ 0., 1.],
- [ 4., 5.],
- [ 8., 9.],
- [12., 13.]]),
- tensor([[ 2., 3.],
- [ 6., 7.],
- [10., 11.],
- [14., 15.]]))
- >>> torch.hsplit(t, [3, 6])
- (tensor([[ 0., 1., 2.],
- [ 4., 5., 6.],
- [ 8., 9., 10.],
- [12., 13., 14.]]),
- tensor([[ 3.],
- [ 7.],
- [11.],
- [15.]]),
- tensor([], size=(4, 0)))
- """
- ...
- def hspmm(mat1: Tensor, mat2: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- hspmm(mat1, mat2, *, out=None) -> Tensor
-
- Performs a matrix multiplication of a :ref:`sparse COO matrix
- <sparse-coo-docs>` :attr:`mat1` and a strided matrix :attr:`mat2`. The
- result is a (1 + 1)-dimensional :ref:`hybrid COO matrix
- <sparse-hybrid-coo-docs>`.
-
- Args:
- mat1 (Tensor): the first sparse matrix to be matrix multiplied
- mat2 (Tensor): the second strided matrix to be matrix multiplied
-
- Keyword args:
- out (Tensor, optional): the output tensor.
- """
- ...
- def hstack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- hstack(tensors, *, out=None) -> Tensor
-
- Stack tensors in sequence horizontally (column wise).
-
- This is equivalent to concatenation along the first axis for 1-D tensors, and along the second axis for all other tensors.
-
- Args:
- tensors (sequence of Tensors): sequence of tensors to concatenate
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.tensor([1, 2, 3])
- >>> b = torch.tensor([4, 5, 6])
- >>> torch.hstack((a,b))
- tensor([1, 2, 3, 4, 5, 6])
- >>> a = torch.tensor([[1],[2],[3]])
- >>> b = torch.tensor([[4],[5],[6]])
- >>> torch.hstack((a,b))
- tensor([[1, 4],
- [2, 5],
- [3, 6]])
- """
- ...
- def hypot(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- hypot(input, other, *, out=None) -> Tensor
-
- Given the legs of a right triangle, return its hypotenuse.
-
- .. math::
- \text{out}_{i} = \sqrt{\text{input}_{i}^{2} + \text{other}_{i}^{2}}
-
- The shapes of ``input`` and ``other`` must be
- :ref:`broadcastable <broadcasting-semantics>`.
-
- Args:
- input (Tensor): the first input tensor
- other (Tensor): the second input tensor
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.hypot(torch.tensor([4.0]), torch.tensor([3.0, 4.0, 5.0]))
- tensor([5.0000, 5.6569, 6.4031])
- """
- ...
- def i0(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- i0(input, *, out=None) -> Tensor
-
- Alias for :func:`torch.special.i0`.
- """
- ...
- def i0_(input: Tensor) -> Tensor: ...
- def igamma(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- igamma(input, other, *, out=None) -> Tensor
-
- Alias for :func:`torch.special.gammainc`.
- """
- ...
- def igammac(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- igammac(input, other, *, out=None) -> Tensor
-
- Alias for :func:`torch.special.gammaincc`.
- """
- ...
- def imag(input: Tensor) -> Tensor:
- r"""
- imag(input) -> Tensor
-
- Returns a new tensor containing imaginary values of the :attr:`self` tensor.
- The returned tensor and :attr:`self` share the same underlying storage.
-
- .. warning::
- :func:`imag` is only supported for tensors with complex dtypes.
-
- Args:
- input (Tensor): the input tensor.
-
- Example::
-
- >>> x=torch.randn(4, dtype=torch.cfloat)
- >>> x
- tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
- >>> x.imag
- tensor([ 0.3553, -0.7896, -0.0633, -0.8119])
- """
- ...
- @overload
- def index_add(input: Tensor, dim: _int, index: Tensor, source: Tensor, *, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor:
- r"""
- index_add(input, dim, index, source, *, alpha=1, out=None) -> Tensor
-
- See :meth:`~Tensor.index_add_` for function description.
- """
- ...
- @overload
- def index_add(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor:
- r"""
- index_add(input, dim, index, source, *, alpha=1, out=None) -> Tensor
-
- See :meth:`~Tensor.index_add_` for function description.
- """
- ...
- @overload
- def index_copy(input: Tensor, dim: _int, index: Tensor, source: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- index_copy(input, dim, index, source, *, out=None) -> Tensor
-
- See :meth:`~Tensor.index_add_` for function description.
- """
- ...
- @overload
- def index_copy(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor) -> Tensor:
- r"""
- index_copy(input, dim, index, source, *, out=None) -> Tensor
-
- See :meth:`~Tensor.index_add_` for function description.
- """
- ...
- @overload
- def index_fill(input: Tensor, dim: _int, index: Tensor, value: Tensor) -> Tensor: ...
- @overload
- def index_fill(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, value: Tensor) -> Tensor: ...
- @overload
- def index_fill(input: Tensor, dim: _int, index: Tensor, value: Union[Number, _complex]) -> Tensor: ...
- @overload
- def index_fill(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, value: Union[Number, _complex]) -> Tensor: ...
- def index_put(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False) -> Tensor: ...
- def index_put_(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False) -> Tensor: ...
- def index_reduce(input: Tensor, dim: _int, index: Tensor, source: Tensor, reduce: str, *, include_self: _bool = True, out: Optional[Tensor] = None) -> Tensor:
- r"""
- index_reduce(input, dim, index, source, reduce, *, include_self=True, out=None) -> Tensor
-
- See :meth:`~Tensor.index_reduce_` for function description.
- """
- ...
- @overload
- def index_select(input: Tensor, dim: _int, index: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- index_select(input, dim, index, *, out=None) -> Tensor
-
- Returns a new tensor which indexes the :attr:`input` tensor along dimension
- :attr:`dim` using the entries in :attr:`index` which is a `LongTensor`.
-
- The returned tensor has the same number of dimensions as the original tensor
- (:attr:`input`). The :attr:`dim`\ th dimension has the same size as the length
- of :attr:`index`; other dimensions have the same size as in the original tensor.
-
- .. note:: The returned tensor does **not** use the same storage as the original
- tensor. If :attr:`out` has a different shape than expected, we
- silently change it to the correct shape, reallocating the underlying
- storage if necessary.
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the dimension in which we index
- index (IntTensor or LongTensor): the 1-D tensor containing the indices to index
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> x = torch.randn(3, 4)
- >>> x
- tensor([[ 0.1427, 0.0231, -0.5414, -1.0009],
- [-0.4664, 0.2647, -0.1228, -1.1068],
- [-1.1734, -0.6571, 0.7230, -0.6004]])
- >>> indices = torch.tensor([0, 2])
- >>> torch.index_select(x, 0, indices)
- tensor([[ 0.1427, 0.0231, -0.5414, -1.0009],
- [-1.1734, -0.6571, 0.7230, -0.6004]])
- >>> torch.index_select(x, 1, indices)
- tensor([[ 0.1427, -0.5414],
- [-0.4664, -0.1228],
- [-1.1734, 0.7230]])
- """
- ...
- @overload
- def index_select(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- index_select(input, dim, index, *, out=None) -> Tensor
-
- Returns a new tensor which indexes the :attr:`input` tensor along dimension
- :attr:`dim` using the entries in :attr:`index` which is a `LongTensor`.
-
- The returned tensor has the same number of dimensions as the original tensor
- (:attr:`input`). The :attr:`dim`\ th dimension has the same size as the length
- of :attr:`index`; other dimensions have the same size as in the original tensor.
-
- .. note:: The returned tensor does **not** use the same storage as the original
- tensor. If :attr:`out` has a different shape than expected, we
- silently change it to the correct shape, reallocating the underlying
- storage if necessary.
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the dimension in which we index
- index (IntTensor or LongTensor): the 1-D tensor containing the indices to index
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> x = torch.randn(3, 4)
- >>> x
- tensor([[ 0.1427, 0.0231, -0.5414, -1.0009],
- [-0.4664, 0.2647, -0.1228, -1.1068],
- [-1.1734, -0.6571, 0.7230, -0.6004]])
- >>> indices = torch.tensor([0, 2])
- >>> torch.index_select(x, 0, indices)
- tensor([[ 0.1427, 0.0231, -0.5414, -1.0009],
- [-1.1734, -0.6571, 0.7230, -0.6004]])
- >>> torch.index_select(x, 1, indices)
- tensor([[ 0.1427, -0.5414],
- [-0.4664, -0.1228],
- [-1.1734, 0.7230]])
- """
- ...
- def indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- Performs the same operation as :func:`torch.indices`, but all output tensors
- are freshly created instead of aliasing the input.
- """
- ...
- def init_num_threads() -> None: ...
- def inner(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- inner(input, other, *, out=None) -> Tensor
-
- Computes the dot product for 1D tensors. For higher dimensions, sums the product
- of elements from :attr:`input` and :attr:`other` along their last dimension.
-
- .. note::
-
- If either :attr:`input` or :attr:`other` is a scalar, the result is equivalent
- to `torch.mul(input, other)`.
-
- If both :attr:`input` and :attr:`other` are non-scalars, the size of their last
- dimension must match and the result is equivalent to `torch.tensordot(input,
- other, dims=([-1], [-1]))`
-
- Args:
- input (Tensor): First input tensor
- other (Tensor): Second input tensor
-
- Keyword args:
- out (Tensor, optional): Optional output tensor to write result into. The output
- shape is `input.shape[:-1] + other.shape[:-1]`.
-
- Example::
-
- # Dot product
- >>> torch.inner(torch.tensor([1, 2, 3]), torch.tensor([0, 2, 1]))
- tensor(7)
-
- # Multidimensional input tensors
- >>> a = torch.randn(2, 3)
- >>> a
- tensor([[0.8173, 1.0874, 1.1784],
- [0.3279, 0.1234, 2.7894]])
- >>> b = torch.randn(2, 4, 3)
- >>> b
- tensor([[[-0.4682, -0.7159, 0.1506],
- [ 0.4034, -0.3657, 1.0387],
- [ 0.9892, -0.6684, 0.1774],
- [ 0.9482, 1.3261, 0.3917]],
-
- [[ 0.4537, 0.7493, 1.1724],
- [ 0.2291, 0.5749, -0.2267],
- [-0.7920, 0.3607, -0.3701],
- [ 1.3666, -0.5850, -1.7242]]])
- >>> torch.inner(a, b)
- tensor([[[-0.9837, 1.1560, 0.2907, 2.6785],
- [ 2.5671, 0.5452, -0.6912, -1.5509]],
-
- [[ 0.1782, 2.9843, 0.7366, 1.5672],
- [ 3.5115, -0.4864, -1.2476, -4.4337]]])
-
- # Scalar input
- >>> torch.inner(a, torch.tensor(2))
- tensor([[1.6347, 2.1748, 2.3567],
- [0.6558, 0.2469, 5.5787]])
- """
- ...
- def instance_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], use_input_stats: _bool, momentum: _float, eps: _float, cudnn_enabled: _bool) -> Tensor: ...
- def int_repr(input: Tensor) -> Tensor: ...
- def inverse(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- inverse(input, *, out=None) -> Tensor
-
- Alias for :func:`torch.linalg.inv`
- """
- ...
- def is_complex(input: Tensor) -> _bool:
- r"""
- is_complex(input) -> (bool)
-
- Returns True if the data type of :attr:`input` is a complex data type i.e.,
- one of ``torch.complex64``, and ``torch.complex128``.
-
- Args:
- input (Tensor): the input tensor.
- """
- ...
- def is_conj(input: Tensor) -> _bool:
- r"""
- is_conj(input) -> (bool)
-
- Returns True if the :attr:`input` is a conjugated tensor, i.e. its conjugate bit is set to `True`.
-
- Args:
- input (Tensor): the input tensor.
- """
- ...
- def is_distributed(input: Tensor) -> _bool: ...
- def is_floating_point(input: Tensor) -> _bool:
- r"""
- is_floating_point(input) -> (bool)
-
- Returns True if the data type of :attr:`input` is a floating point data type i.e.,
- one of ``torch.float64``, ``torch.float32``, ``torch.float16``, and ``torch.bfloat16``.
-
- Args:
- input (Tensor): the input tensor.
- """
- ...
- def is_grad_enabled() -> _bool:
- r"""
- is_grad_enabled() -> (bool)
-
- Returns True if grad mode is currently enabled.
- """
- ...
- def is_inference(input: Tensor) -> _bool:
- r"""
- is_inference(input) -> (bool)
-
- Returns True if :attr:`input` is an inference tensor.
-
- A non-view tensor is an inference tensor if and only if it was
- allocated during inference mode. A view tensor is an inference
- tensor if and only if the tensor it is a view of is an inference tensor.
-
- For details on inference mode please see
- `Inference Mode <https://pytorch.org/cppdocs/notes/inference_mode.html>`_.
-
- Args:
- input (Tensor): the input tensor.
- """
- ...
- def is_inference_mode_enabled() -> _bool:
- r"""
- is_inference_mode_enabled() -> (bool)
-
- Returns True if inference mode is currently enabled.
- """
- ...
- def is_neg(input: Tensor) -> _bool: ...
- def is_nonzero(input: Tensor) -> _bool:
- r"""
- is_nonzero(input) -> (bool)
-
- Returns True if the :attr:`input` is a single element tensor which is not equal to zero
- after type conversions.
- i.e. not equal to ``torch.tensor([0.])`` or ``torch.tensor([0])`` or
- ``torch.tensor([False])``.
- Throws a ``RuntimeError`` if ``torch.numel() != 1`` (even in case
- of sparse tensors).
-
- Args:
- input (Tensor): the input tensor.
-
- Examples::
-
- >>> torch.is_nonzero(torch.tensor([0.]))
- False
- >>> torch.is_nonzero(torch.tensor([1.5]))
- True
- >>> torch.is_nonzero(torch.tensor([False]))
- False
- >>> torch.is_nonzero(torch.tensor([3]))
- True
- >>> torch.is_nonzero(torch.tensor([1, 3, 5]))
- Traceback (most recent call last):
- ...
- RuntimeError: bool value of Tensor with more than one value is ambiguous
- >>> torch.is_nonzero(torch.tensor([]))
- Traceback (most recent call last):
- ...
- RuntimeError: bool value of Tensor with no values is ambiguous
- """
- ...
- def is_same_size(input: Tensor, other: Tensor) -> _bool: ...
- def is_signed(input: Tensor) -> _bool: ...
- def is_vulkan_available() -> _bool: ...
- def isclose(input: Tensor, other: Tensor, rtol: _float = 1e-05, atol: _float = 1e-08, equal_nan: _bool = False) -> Tensor:
- r"""
- isclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor
-
- Returns a new tensor with boolean elements representing if each element of
- :attr:`input` is "close" to the corresponding element of :attr:`other`.
- Closeness is defined as:
-
- .. math::
- \lvert \text{input} - \text{other} \rvert \leq \texttt{atol} + \texttt{rtol} \times \lvert \text{other} \rvert
-
-
- where :attr:`input` and :attr:`other` are finite. Where :attr:`input`
- and/or :attr:`other` are nonfinite they are close if and only if
- they are equal, with NaNs being considered equal to each other when
- :attr:`equal_nan` is True.
-
- Args:
- input (Tensor): first tensor to compare
- other (Tensor): second tensor to compare
- atol (float, optional): absolute tolerance. Default: 1e-08
- rtol (float, optional): relative tolerance. Default: 1e-05
- equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False``
-
- Examples::
-
- >>> torch.isclose(torch.tensor((1., 2, 3)), torch.tensor((1 + 1e-10, 3, 4)))
- tensor([ True, False, False])
- >>> torch.isclose(torch.tensor((float('inf'), 4)), torch.tensor((float('inf'), 6)), rtol=.5)
- tensor([True, True])
- """
- ...
- def isfinite(input: Tensor) -> Tensor:
- r"""
- isfinite(input) -> Tensor
-
- Returns a new tensor with boolean elements representing if each element is `finite` or not.
-
- Real values are finite when they are not NaN, negative infinity, or infinity.
- Complex values are finite when both their real and imaginary parts are finite.
-
- Args:
- input (Tensor): the input tensor.
-
- Returns:
- A boolean tensor that is True where :attr:`input` is finite and False elsewhere
-
- Example::
-
- >>> torch.isfinite(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')]))
- tensor([True, False, True, False, False])
- """
- ...
- @overload
- def isin(elements: Tensor, test_elements: Tensor, *, assume_unique: _bool = False, invert: _bool = False, out: Optional[Tensor] = None) -> Tensor:
- r"""
- isin(elements, test_elements, *, assume_unique=False, invert=False) -> Tensor
-
- Tests if each element of :attr:`elements` is in :attr:`test_elements`. Returns
- a boolean tensor of the same shape as :attr:`elements` that is True for elements
- in :attr:`test_elements` and False otherwise.
-
- .. note::
- One of :attr:`elements` or :attr:`test_elements` can be a scalar, but not both.
-
- Args:
- elements (Tensor or Scalar): Input elements
- test_elements (Tensor or Scalar): Values against which to test for each input element
- assume_unique (bool, optional): If True, assumes both :attr:`elements` and
- :attr:`test_elements` contain unique elements, which can speed up the
- calculation. Default: False
- invert (bool, optional): If True, inverts the boolean return tensor, resulting in True
- values for elements *not* in :attr:`test_elements`. Default: False
-
- Returns:
- A boolean tensor of the same shape as :attr:`elements` that is True for elements in
- :attr:`test_elements` and False otherwise
-
- Example:
- >>> torch.isin(torch.tensor([[1, 2], [3, 4]]), torch.tensor([2, 3]))
- tensor([[False, True],
- [ True, False]])
- """
- ...
- @overload
- def isin(element: Union[Number, _complex], test_elements: Tensor, *, assume_unique: _bool = False, invert: _bool = False, out: Optional[Tensor] = None) -> Tensor:
- r"""
- isin(elements, test_elements, *, assume_unique=False, invert=False) -> Tensor
-
- Tests if each element of :attr:`elements` is in :attr:`test_elements`. Returns
- a boolean tensor of the same shape as :attr:`elements` that is True for elements
- in :attr:`test_elements` and False otherwise.
-
- .. note::
- One of :attr:`elements` or :attr:`test_elements` can be a scalar, but not both.
-
- Args:
- elements (Tensor or Scalar): Input elements
- test_elements (Tensor or Scalar): Values against which to test for each input element
- assume_unique (bool, optional): If True, assumes both :attr:`elements` and
- :attr:`test_elements` contain unique elements, which can speed up the
- calculation. Default: False
- invert (bool, optional): If True, inverts the boolean return tensor, resulting in True
- values for elements *not* in :attr:`test_elements`. Default: False
-
- Returns:
- A boolean tensor of the same shape as :attr:`elements` that is True for elements in
- :attr:`test_elements` and False otherwise
-
- Example:
- >>> torch.isin(torch.tensor([[1, 2], [3, 4]]), torch.tensor([2, 3]))
- tensor([[False, True],
- [ True, False]])
- """
- ...
- @overload
- def isin(elements: Tensor, test_element: Union[Number, _complex], *, assume_unique: _bool = False, invert: _bool = False, out: Optional[Tensor] = None) -> Tensor:
- r"""
- isin(elements, test_elements, *, assume_unique=False, invert=False) -> Tensor
-
- Tests if each element of :attr:`elements` is in :attr:`test_elements`. Returns
- a boolean tensor of the same shape as :attr:`elements` that is True for elements
- in :attr:`test_elements` and False otherwise.
-
- .. note::
- One of :attr:`elements` or :attr:`test_elements` can be a scalar, but not both.
-
- Args:
- elements (Tensor or Scalar): Input elements
- test_elements (Tensor or Scalar): Values against which to test for each input element
- assume_unique (bool, optional): If True, assumes both :attr:`elements` and
- :attr:`test_elements` contain unique elements, which can speed up the
- calculation. Default: False
- invert (bool, optional): If True, inverts the boolean return tensor, resulting in True
- values for elements *not* in :attr:`test_elements`. Default: False
-
- Returns:
- A boolean tensor of the same shape as :attr:`elements` that is True for elements in
- :attr:`test_elements` and False otherwise
-
- Example:
- >>> torch.isin(torch.tensor([[1, 2], [3, 4]]), torch.tensor([2, 3]))
- tensor([[False, True],
- [ True, False]])
- """
- ...
- def isinf(input: Tensor) -> Tensor:
- r"""
- isinf(input) -> Tensor
-
- Tests if each element of :attr:`input` is infinite
- (positive or negative infinity) or not.
-
- .. note::
- Complex values are infinite when their real or imaginary part is
- infinite.
-
- Args:
- input (Tensor): the input tensor.
-
- Returns:
- A boolean tensor that is True where :attr:`input` is infinite and False elsewhere
-
- Example::
-
- >>> torch.isinf(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')]))
- tensor([False, True, False, True, False])
- """
- ...
- def isnan(input: Tensor) -> Tensor:
- r"""
- isnan(input) -> Tensor
-
- Returns a new tensor with boolean elements representing if each element of :attr:`input`
- is NaN or not. Complex values are considered NaN when either their real
- and/or imaginary part is NaN.
-
- Arguments:
- input (Tensor): the input tensor.
-
- Returns:
- A boolean tensor that is True where :attr:`input` is NaN and False elsewhere
-
- Example::
-
- >>> torch.isnan(torch.tensor([1, float('nan'), 2]))
- tensor([False, True, False])
- """
- ...
- def isneginf(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- isneginf(input, *, out=None) -> Tensor
- Tests if each element of :attr:`input` is negative infinity or not.
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.tensor([-float('inf'), float('inf'), 1.2])
- >>> torch.isneginf(a)
- tensor([ True, False, False])
- """
- ...
- def isposinf(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- isposinf(input, *, out=None) -> Tensor
- Tests if each element of :attr:`input` is positive infinity or not.
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.tensor([-float('inf'), float('inf'), 1.2])
- >>> torch.isposinf(a)
- tensor([False, True, False])
- """
- ...
- def isreal(input: Tensor) -> Tensor:
- r"""
- isreal(input) -> Tensor
-
- Returns a new tensor with boolean elements representing if each element of :attr:`input` is real-valued or not.
- All real-valued types are considered real. Complex values are considered real when their imaginary part is 0.
-
- Arguments:
- input (Tensor): the input tensor.
-
- Returns:
- A boolean tensor that is True where :attr:`input` is real and False elsewhere
-
- Example::
-
- >>> torch.isreal(torch.tensor([1, 1+1j, 2+0j]))
- tensor([True, False, True])
- """
- ...
- def istft(input: Tensor, n_fft: _int, hop_length: Optional[_int] = None, win_length: Optional[_int] = None, window: Optional[Tensor] = None, center: _bool = True, normalized: _bool = False, onesided: Optional[_bool] = None, length: Optional[_int] = None, return_complex: _bool = False) -> Tensor: ...
- @overload
- def kaiser_window(window_length: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- kaiser_window(window_length, periodic=True, beta=12.0, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Computes the Kaiser window with window length :attr:`window_length` and shape parameter :attr:`beta`.
-
- Let I_0 be the zeroth order modified Bessel function of the first kind (see :func:`torch.i0`) and
- ``N = L - 1`` if :attr:`periodic` is False and ``L`` if :attr:`periodic` is True,
- where ``L`` is the :attr:`window_length`. This function computes:
-
- .. math::
- out_i = I_0 \left( \beta \sqrt{1 - \left( {\frac{i - N/2}{N/2}} \right) ^2 } \right) / I_0( \beta )
-
- Calling ``torch.kaiser_window(L, B, periodic=True)`` is equivalent to calling
- ``torch.kaiser_window(L + 1, B, periodic=False)[:-1])``.
- The :attr:`periodic` argument is intended as a helpful shorthand
- to produce a periodic window as input to functions like :func:`torch.stft`.
-
- .. note::
- If :attr:`window_length` is one, then the returned window is a single element tensor containing a one.
-
-
- Args:
- window_length (int): length of the window.
- periodic (bool, optional): If True, returns a periodic window suitable for use in spectral analysis.
- If False, returns a symmetric window suitable for use in filter design.
- beta (float, optional): shape parameter for the window.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
- ``torch.strided`` (dense layout) is supported.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- """
- ...
- @overload
- def kaiser_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- kaiser_window(window_length, periodic=True, beta=12.0, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Computes the Kaiser window with window length :attr:`window_length` and shape parameter :attr:`beta`.
-
- Let I_0 be the zeroth order modified Bessel function of the first kind (see :func:`torch.i0`) and
- ``N = L - 1`` if :attr:`periodic` is False and ``L`` if :attr:`periodic` is True,
- where ``L`` is the :attr:`window_length`. This function computes:
-
- .. math::
- out_i = I_0 \left( \beta \sqrt{1 - \left( {\frac{i - N/2}{N/2}} \right) ^2 } \right) / I_0( \beta )
-
- Calling ``torch.kaiser_window(L, B, periodic=True)`` is equivalent to calling
- ``torch.kaiser_window(L + 1, B, periodic=False)[:-1])``.
- The :attr:`periodic` argument is intended as a helpful shorthand
- to produce a periodic window as input to functions like :func:`torch.stft`.
-
- .. note::
- If :attr:`window_length` is one, then the returned window is a single element tensor containing a one.
-
-
- Args:
- window_length (int): length of the window.
- periodic (bool, optional): If True, returns a periodic window suitable for use in spectral analysis.
- If False, returns a symmetric window suitable for use in filter design.
- beta (float, optional): shape parameter for the window.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
- ``torch.strided`` (dense layout) is supported.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- """
- ...
- @overload
- def kaiser_window(window_length: _int, periodic: _bool, beta: _float, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- kaiser_window(window_length, periodic=True, beta=12.0, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Computes the Kaiser window with window length :attr:`window_length` and shape parameter :attr:`beta`.
-
- Let I_0 be the zeroth order modified Bessel function of the first kind (see :func:`torch.i0`) and
- ``N = L - 1`` if :attr:`periodic` is False and ``L`` if :attr:`periodic` is True,
- where ``L`` is the :attr:`window_length`. This function computes:
-
- .. math::
- out_i = I_0 \left( \beta \sqrt{1 - \left( {\frac{i - N/2}{N/2}} \right) ^2 } \right) / I_0( \beta )
-
- Calling ``torch.kaiser_window(L, B, periodic=True)`` is equivalent to calling
- ``torch.kaiser_window(L + 1, B, periodic=False)[:-1])``.
- The :attr:`periodic` argument is intended as a helpful shorthand
- to produce a periodic window as input to functions like :func:`torch.stft`.
-
- .. note::
- If :attr:`window_length` is one, then the returned window is a single element tensor containing a one.
-
-
- Args:
- window_length (int): length of the window.
- periodic (bool, optional): If True, returns a periodic window suitable for use in spectral analysis.
- If False, returns a symmetric window suitable for use in filter design.
- beta (float, optional): shape parameter for the window.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
- ``torch.strided`` (dense layout) is supported.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- """
- ...
- def kl_div(input: Tensor, target: Tensor, reduction: _int = 1, *, log_target: _bool = False) -> Tensor: ...
- def kron(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- kron(input, other, *, out=None) -> Tensor
-
- Computes the Kronecker product, denoted by :math:`\otimes`, of :attr:`input` and :attr:`other`.
-
- If :attr:`input` is a :math:`(a_0 \times a_1 \times \dots \times a_n)` tensor and :attr:`other` is a
- :math:`(b_0 \times b_1 \times \dots \times b_n)` tensor, the result will be a
- :math:`(a_0*b_0 \times a_1*b_1 \times \dots \times a_n*b_n)` tensor with the following entries:
-
- .. math::
- (\text{input} \otimes \text{other})_{k_0, k_1, \dots, k_n} =
- \text{input}_{i_0, i_1, \dots, i_n} * \text{other}_{j_0, j_1, \dots, j_n},
-
- where :math:`k_t = i_t * b_t + j_t` for :math:`0 \leq t \leq n`.
- If one tensor has fewer dimensions than the other it is unsqueezed until it has the same number of dimensions.
-
- Supports real-valued and complex-valued inputs.
-
- .. note::
- This function generalizes the typical definition of the Kronecker product for two matrices to two tensors,
- as described above. When :attr:`input` is a :math:`(m \times n)` matrix and :attr:`other` is a
- :math:`(p \times q)` matrix, the result will be a :math:`(p*m \times q*n)` block matrix:
-
- .. math::
- \mathbf{A} \otimes \mathbf{B}=\begin{bmatrix}
- a_{11} \mathbf{B} & \cdots & a_{1 n} \mathbf{B} \\
- \vdots & \ddots & \vdots \\
- a_{m 1} \mathbf{B} & \cdots & a_{m n} \mathbf{B} \end{bmatrix}
-
- where :attr:`input` is :math:`\mathbf{A}` and :attr:`other` is :math:`\mathbf{B}`.
-
- Arguments:
- input (Tensor)
- other (Tensor)
-
- Keyword args:
- out (Tensor, optional): The output tensor. Ignored if ``None``. Default: ``None``
-
- Examples::
-
- >>> mat1 = torch.eye(2)
- >>> mat2 = torch.ones(2, 2)
- >>> torch.kron(mat1, mat2)
- tensor([[1., 1., 0., 0.],
- [1., 1., 0., 0.],
- [0., 0., 1., 1.],
- [0., 0., 1., 1.]])
-
- >>> mat1 = torch.eye(2)
- >>> mat2 = torch.arange(1, 5).reshape(2, 2)
- >>> torch.kron(mat1, mat2)
- tensor([[1., 2., 0., 0.],
- [3., 4., 0., 0.],
- [0., 0., 1., 2.],
- [0., 0., 3., 4.]])
- """
- ...
- @overload
- def kthvalue(input: Tensor, k: _int, dim: _int = -1, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.kthvalue:
- r"""
- kthvalue(input, k, dim=None, keepdim=False, *, out=None) -> (Tensor, LongTensor)
-
- Returns a namedtuple ``(values, indices)`` where ``values`` is the :attr:`k` th
- smallest element of each row of the :attr:`input` tensor in the given dimension
- :attr:`dim`. And ``indices`` is the index location of each element found.
-
- If :attr:`dim` is not given, the last dimension of the `input` is chosen.
-
- If :attr:`keepdim` is ``True``, both the :attr:`values` and :attr:`indices` tensors
- are the same size as :attr:`input`, except in the dimension :attr:`dim` where
- they are of size 1. Otherwise, :attr:`dim` is squeezed
- (see :func:`torch.squeeze`), resulting in both the :attr:`values` and
- :attr:`indices` tensors having 1 fewer dimension than the :attr:`input` tensor.
-
- .. note::
- When :attr:`input` is a CUDA tensor and there are multiple valid
- :attr:`k` th values, this function may nondeterministically return
- :attr:`indices` for any of them.
-
- Args:
- input (Tensor): the input tensor.
- k (int): k for the k-th smallest element
- dim (int, optional): the dimension to find the kth value along
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- out (tuple, optional): the output tuple of (Tensor, LongTensor)
- can be optionally given to be used as output buffers
-
- Example::
-
- >>> x = torch.arange(1., 6.)
- >>> x
- tensor([ 1., 2., 3., 4., 5.])
- >>> torch.kthvalue(x, 4)
- torch.return_types.kthvalue(values=tensor(4.), indices=tensor(3))
-
- >>> x=torch.arange(1.,7.).resize_(2,3)
- >>> x
- tensor([[ 1., 2., 3.],
- [ 4., 5., 6.]])
- >>> torch.kthvalue(x, 2, 0, True)
- torch.return_types.kthvalue(values=tensor([[4., 5., 6.]]), indices=tensor([[1, 1, 1]]))
- """
- ...
- @overload
- def kthvalue(input: Tensor, k: _int, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.kthvalue:
- r"""
- kthvalue(input, k, dim=None, keepdim=False, *, out=None) -> (Tensor, LongTensor)
-
- Returns a namedtuple ``(values, indices)`` where ``values`` is the :attr:`k` th
- smallest element of each row of the :attr:`input` tensor in the given dimension
- :attr:`dim`. And ``indices`` is the index location of each element found.
-
- If :attr:`dim` is not given, the last dimension of the `input` is chosen.
-
- If :attr:`keepdim` is ``True``, both the :attr:`values` and :attr:`indices` tensors
- are the same size as :attr:`input`, except in the dimension :attr:`dim` where
- they are of size 1. Otherwise, :attr:`dim` is squeezed
- (see :func:`torch.squeeze`), resulting in both the :attr:`values` and
- :attr:`indices` tensors having 1 fewer dimension than the :attr:`input` tensor.
-
- .. note::
- When :attr:`input` is a CUDA tensor and there are multiple valid
- :attr:`k` th values, this function may nondeterministically return
- :attr:`indices` for any of them.
-
- Args:
- input (Tensor): the input tensor.
- k (int): k for the k-th smallest element
- dim (int, optional): the dimension to find the kth value along
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- out (tuple, optional): the output tuple of (Tensor, LongTensor)
- can be optionally given to be used as output buffers
-
- Example::
-
- >>> x = torch.arange(1., 6.)
- >>> x
- tensor([ 1., 2., 3., 4., 5.])
- >>> torch.kthvalue(x, 4)
- torch.return_types.kthvalue(values=tensor(4.), indices=tensor(3))
-
- >>> x=torch.arange(1.,7.).resize_(2,3)
- >>> x
- tensor([[ 1., 2., 3.],
- [ 4., 5., 6.]])
- >>> torch.kthvalue(x, 2, 0, True)
- torch.return_types.kthvalue(values=tensor([[4., 5., 6.]]), indices=tensor([[1, 1, 1]]))
- """
- ...
- def layer_norm(input: Tensor, normalized_shape: Sequence[Union[_int, SymInt]], weight: Optional[Tensor] = None, bias: Optional[Tensor] = None, eps: _float = 1e-05, cudnn_enable: _bool = True) -> Tensor: ...
- def lcm(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- lcm(input, other, *, out=None) -> Tensor
-
- Computes the element-wise least common multiple (LCM) of :attr:`input` and :attr:`other`.
-
- Both :attr:`input` and :attr:`other` must have integer types.
-
- .. note::
- This defines :math:`lcm(0, 0) = 0` and :math:`lcm(0, a) = 0`.
-
- Args:
- input (Tensor): the input tensor.
- other (Tensor): the second input tensor
-
- Keyword arguments:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.tensor([5, 10, 15])
- >>> b = torch.tensor([3, 4, 5])
- >>> torch.lcm(a, b)
- tensor([15, 20, 15])
- >>> c = torch.tensor([3])
- >>> torch.lcm(a, c)
- tensor([15, 30, 15])
- """
- ...
- def lcm_(input: Tensor, other: Tensor) -> Tensor: ...
- def ldexp(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- ldexp(input, other, *, out=None) -> Tensor
-
- Multiplies :attr:`input` by 2 ** :attr:`other`.
-
- .. math::
- \text{{out}}_i = \text{{input}}_i * 2^\text{{other}}_i
-
-
- Typically this function is used to construct floating point numbers by multiplying
- mantissas in :attr:`input` with integral powers of two created from the exponents
- in :attr:`other`.
-
- Args:
- input (Tensor): the input tensor.
- other (Tensor): a tensor of exponents, typically integers.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.ldexp(torch.tensor([1.]), torch.tensor([1]))
- tensor([2.])
- >>> torch.ldexp(torch.tensor([1.0]), torch.tensor([1, 2, 3, 4]))
- tensor([ 2., 4., 8., 16.])
- """
- ...
- def ldexp_(input: Tensor, other: Tensor) -> Tensor: ...
- @overload
- def le(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- le(input, other, *, out=None) -> Tensor
-
- Computes :math:`\text{input} \leq \text{other}` element-wise.
-
-
- The second argument can be a number or a tensor whose shape is
- :ref:`broadcastable <broadcasting-semantics>` with the first argument.
-
- Args:
- input (Tensor): the tensor to compare
- other (Tensor or Scalar): the tensor or value to compare
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Returns:
- A boolean tensor that is True where :attr:`input` is less than or equal to
- :attr:`other` and False elsewhere
-
- Example::
-
- >>> torch.le(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
- tensor([[True, False], [True, True]])
- """
- ...
- @overload
- def le(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- le(input, other, *, out=None) -> Tensor
-
- Computes :math:`\text{input} \leq \text{other}` element-wise.
-
-
- The second argument can be a number or a tensor whose shape is
- :ref:`broadcastable <broadcasting-semantics>` with the first argument.
-
- Args:
- input (Tensor): the tensor to compare
- other (Tensor or Scalar): the tensor or value to compare
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Returns:
- A boolean tensor that is True where :attr:`input` is less than or equal to
- :attr:`other` and False elsewhere
-
- Example::
-
- >>> torch.le(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
- tensor([[True, False], [True, True]])
- """
- ...
- @overload
- def lerp(input: Tensor, end: Tensor, weight: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- lerp(input, end, weight, *, out=None)
-
- Does a linear interpolation of two tensors :attr:`start` (given by :attr:`input`) and :attr:`end` based
- on a scalar or tensor :attr:`weight` and returns the resulting :attr:`out` tensor.
-
- .. math::
- \text{out}_i = \text{start}_i + \text{weight}_i \times (\text{end}_i - \text{start}_i)
-
- The shapes of :attr:`start` and :attr:`end` must be
- :ref:`broadcastable <broadcasting-semantics>`. If :attr:`weight` is a tensor, then
- the shapes of :attr:`weight`, :attr:`start`, and :attr:`end` must be :ref:`broadcastable <broadcasting-semantics>`.
-
- Args:
- input (Tensor): the tensor with the starting points
- end (Tensor): the tensor with the ending points
- weight (float or tensor): the weight for the interpolation formula
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> start = torch.arange(1., 5.)
- >>> end = torch.empty(4).fill_(10)
- >>> start
- tensor([ 1., 2., 3., 4.])
- >>> end
- tensor([ 10., 10., 10., 10.])
- >>> torch.lerp(start, end, 0.5)
- tensor([ 5.5000, 6.0000, 6.5000, 7.0000])
- >>> torch.lerp(start, end, torch.full_like(start, 0.5))
- tensor([ 5.5000, 6.0000, 6.5000, 7.0000])
- """
- ...
- @overload
- def lerp(input: Tensor, end: Tensor, weight: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- lerp(input, end, weight, *, out=None)
-
- Does a linear interpolation of two tensors :attr:`start` (given by :attr:`input`) and :attr:`end` based
- on a scalar or tensor :attr:`weight` and returns the resulting :attr:`out` tensor.
-
- .. math::
- \text{out}_i = \text{start}_i + \text{weight}_i \times (\text{end}_i - \text{start}_i)
-
- The shapes of :attr:`start` and :attr:`end` must be
- :ref:`broadcastable <broadcasting-semantics>`. If :attr:`weight` is a tensor, then
- the shapes of :attr:`weight`, :attr:`start`, and :attr:`end` must be :ref:`broadcastable <broadcasting-semantics>`.
-
- Args:
- input (Tensor): the tensor with the starting points
- end (Tensor): the tensor with the ending points
- weight (float or tensor): the weight for the interpolation formula
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> start = torch.arange(1., 5.)
- >>> end = torch.empty(4).fill_(10)
- >>> start
- tensor([ 1., 2., 3., 4.])
- >>> end
- tensor([ 10., 10., 10., 10.])
- >>> torch.lerp(start, end, 0.5)
- tensor([ 5.5000, 6.0000, 6.5000, 7.0000])
- >>> torch.lerp(start, end, torch.full_like(start, 0.5))
- tensor([ 5.5000, 6.0000, 6.5000, 7.0000])
- """
- ...
- @overload
- def less(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- less(input, other, *, out=None) -> Tensor
-
- Alias for :func:`torch.lt`.
- """
- ...
- @overload
- def less(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- less(input, other, *, out=None) -> Tensor
-
- Alias for :func:`torch.lt`.
- """
- ...
- @overload
- def less_equal(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- less_equal(input, other, *, out=None) -> Tensor
-
- Alias for :func:`torch.le`.
- """
- ...
- @overload
- def less_equal(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- less_equal(input, other, *, out=None) -> Tensor
-
- Alias for :func:`torch.le`.
- """
- ...
- def lgamma(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- lgamma(input, *, out=None) -> Tensor
-
- Computes the natural logarithm of the absolute value of the gamma function on :attr:`input`.
-
- .. math::
- \text{out}_{i} = \ln |\Gamma(\text{input}_{i})|
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.arange(0.5, 2, 0.5)
- >>> torch.lgamma(a)
- tensor([ 0.5724, 0.0000, -0.1208])
- """
- ...
- @overload
- def linspace(start: Number, end: Number, steps: Optional[_int] = None, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
- r"""
- linspace(start, end, steps, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
- spaced from :attr:`start` to :attr:`end`, inclusive. That is, the value are:
-
- .. math::
- (\text{start},
- \text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1},
- \ldots,
- \text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1},
- \text{end})
-
-
- From PyTorch 1.11 linspace requires the steps argument. Use steps=100 to restore the previous behavior.
-
- Args:
- start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
- end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
- steps (int): size of the constructed tensor
-
- Keyword arguments:
- out (Tensor, optional): the output tensor.
- dtype (torch.dtype, optional): the data type to perform the computation in.
- Default: if None, uses the global default dtype (see torch.get_default_dtype())
- when both :attr:`start` and :attr:`end` are real,
- and corresponding complex dtype when either is complex.
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
-
- Example::
-
- >>> torch.linspace(3, 10, steps=5)
- tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000])
- >>> torch.linspace(-10, 10, steps=5)
- tensor([-10., -5., 0., 5., 10.])
- >>> torch.linspace(start=-10, end=10, steps=5)
- tensor([-10., -5., 0., 5., 10.])
- >>> torch.linspace(start=-10, end=10, steps=1)
- tensor([-10.])
- """
- ...
- @overload
- def linspace(start: Tensor, end: Tensor, steps: _int, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- linspace(start, end, steps, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
- spaced from :attr:`start` to :attr:`end`, inclusive. That is, the value are:
-
- .. math::
- (\text{start},
- \text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1},
- \ldots,
- \text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1},
- \text{end})
-
-
- From PyTorch 1.11 linspace requires the steps argument. Use steps=100 to restore the previous behavior.
-
- Args:
- start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
- end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
- steps (int): size of the constructed tensor
-
- Keyword arguments:
- out (Tensor, optional): the output tensor.
- dtype (torch.dtype, optional): the data type to perform the computation in.
- Default: if None, uses the global default dtype (see torch.get_default_dtype())
- when both :attr:`start` and :attr:`end` are real,
- and corresponding complex dtype when either is complex.
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
-
- Example::
-
- >>> torch.linspace(3, 10, steps=5)
- tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000])
- >>> torch.linspace(-10, 10, steps=5)
- tensor([-10., -5., 0., 5., 10.])
- >>> torch.linspace(start=-10, end=10, steps=5)
- tensor([-10., -5., 0., 5., 10.])
- >>> torch.linspace(start=-10, end=10, steps=1)
- tensor([-10.])
- """
- ...
- @overload
- def linspace(start: Union[Number, _complex], end: Tensor, steps: _int, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- linspace(start, end, steps, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
- spaced from :attr:`start` to :attr:`end`, inclusive. That is, the value are:
-
- .. math::
- (\text{start},
- \text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1},
- \ldots,
- \text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1},
- \text{end})
-
-
- From PyTorch 1.11 linspace requires the steps argument. Use steps=100 to restore the previous behavior.
-
- Args:
- start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
- end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
- steps (int): size of the constructed tensor
-
- Keyword arguments:
- out (Tensor, optional): the output tensor.
- dtype (torch.dtype, optional): the data type to perform the computation in.
- Default: if None, uses the global default dtype (see torch.get_default_dtype())
- when both :attr:`start` and :attr:`end` are real,
- and corresponding complex dtype when either is complex.
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
-
- Example::
-
- >>> torch.linspace(3, 10, steps=5)
- tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000])
- >>> torch.linspace(-10, 10, steps=5)
- tensor([-10., -5., 0., 5., 10.])
- >>> torch.linspace(start=-10, end=10, steps=5)
- tensor([-10., -5., 0., 5., 10.])
- >>> torch.linspace(start=-10, end=10, steps=1)
- tensor([-10.])
- """
- ...
- @overload
- def linspace(start: Tensor, end: Union[Number, _complex], steps: _int, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- linspace(start, end, steps, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
- spaced from :attr:`start` to :attr:`end`, inclusive. That is, the value are:
-
- .. math::
- (\text{start},
- \text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1},
- \ldots,
- \text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1},
- \text{end})
-
-
- From PyTorch 1.11 linspace requires the steps argument. Use steps=100 to restore the previous behavior.
-
- Args:
- start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
- end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
- steps (int): size of the constructed tensor
-
- Keyword arguments:
- out (Tensor, optional): the output tensor.
- dtype (torch.dtype, optional): the data type to perform the computation in.
- Default: if None, uses the global default dtype (see torch.get_default_dtype())
- when both :attr:`start` and :attr:`end` are real,
- and corresponding complex dtype when either is complex.
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
-
- Example::
-
- >>> torch.linspace(3, 10, steps=5)
- tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000])
- >>> torch.linspace(-10, 10, steps=5)
- tensor([-10., -5., 0., 5., 10.])
- >>> torch.linspace(start=-10, end=10, steps=5)
- tensor([-10., -5., 0., 5., 10.])
- >>> torch.linspace(start=-10, end=10, steps=1)
- tensor([-10.])
- """
- ...
- @overload
- def linspace(start: Union[Number, _complex], end: Union[Number, _complex], steps: _int, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- linspace(start, end, steps, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
- spaced from :attr:`start` to :attr:`end`, inclusive. That is, the value are:
-
- .. math::
- (\text{start},
- \text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1},
- \ldots,
- \text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1},
- \text{end})
-
-
- From PyTorch 1.11 linspace requires the steps argument. Use steps=100 to restore the previous behavior.
-
- Args:
- start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
- end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
- steps (int): size of the constructed tensor
-
- Keyword arguments:
- out (Tensor, optional): the output tensor.
- dtype (torch.dtype, optional): the data type to perform the computation in.
- Default: if None, uses the global default dtype (see torch.get_default_dtype())
- when both :attr:`start` and :attr:`end` are real,
- and corresponding complex dtype when either is complex.
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
-
- Example::
-
- >>> torch.linspace(3, 10, steps=5)
- tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000])
- >>> torch.linspace(-10, 10, steps=5)
- tensor([-10., -5., 0., 5., 10.])
- >>> torch.linspace(start=-10, end=10, steps=5)
- tensor([-10., -5., 0., 5., 10.])
- >>> torch.linspace(start=-10, end=10, steps=1)
- tensor([-10.])
- """
- ...
- def log(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- log(input, *, out=None) -> Tensor
-
- Returns a new tensor with the natural logarithm of the elements
- of :attr:`input`.
-
- .. math::
- y_{i} = \log_{e} (x_{i})
-
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.rand(5) * 5
- >>> a
- tensor([4.7767, 4.3234, 1.2156, 0.2411, 4.5739])
- >>> torch.log(a)
- tensor([ 1.5637, 1.4640, 0.1952, -1.4226, 1.5204])
- """
- ...
- def log10(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- log10(input, *, out=None) -> Tensor
-
- Returns a new tensor with the logarithm to the base 10 of the elements
- of :attr:`input`.
-
- .. math::
- y_{i} = \log_{10} (x_{i})
-
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.rand(5)
- >>> a
- tensor([ 0.5224, 0.9354, 0.7257, 0.1301, 0.2251])
-
-
- >>> torch.log10(a)
- tensor([-0.2820, -0.0290, -0.1392, -0.8857, -0.6476])
- """
- ...
- def log10_(input: Tensor) -> Tensor: ...
- def log1p(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- log1p(input, *, out=None) -> Tensor
-
- Returns a new tensor with the natural logarithm of (1 + :attr:`input`).
-
- .. math::
- y_i = \log_{e} (x_i + 1)
-
- .. note:: This function is more accurate than :func:`torch.log` for small
- values of :attr:`input`
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(5)
- >>> a
- tensor([-1.0090, -0.9923, 1.0249, -0.5372, 0.2492])
- >>> torch.log1p(a)
- tensor([ nan, -4.8653, 0.7055, -0.7705, 0.2225])
- """
- ...
- def log1p_(input: Tensor) -> Tensor: ...
- def log2(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- log2(input, *, out=None) -> Tensor
-
- Returns a new tensor with the logarithm to the base 2 of the elements
- of :attr:`input`.
-
- .. math::
- y_{i} = \log_{2} (x_{i})
-
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.rand(5)
- >>> a
- tensor([ 0.8419, 0.8003, 0.9971, 0.5287, 0.0490])
-
-
- >>> torch.log2(a)
- tensor([-0.2483, -0.3213, -0.0042, -0.9196, -4.3504])
- """
- ...
- def log2_(input: Tensor) -> Tensor: ...
- def log_(input: Tensor) -> Tensor: ...
- @overload
- def log_softmax(input: Tensor, dim: _int, dtype: Optional[_dtype] = None, *, out: Optional[Tensor] = None) -> Tensor: ...
- @overload
- def log_softmax(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor: ...
- def logaddexp(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- logaddexp(input, other, *, out=None) -> Tensor
-
- Logarithm of the sum of exponentiations of the inputs.
-
- Calculates pointwise :math:`\log\left(e^x + e^y\right)`. This function is useful
- in statistics where the calculated probabilities of events may be so small as to
- exceed the range of normal floating point numbers. In such cases the logarithm
- of the calculated probability is stored. This function allows adding
- probabilities stored in such a fashion.
-
- This op should be disambiguated with :func:`torch.logsumexp` which performs a
- reduction on a single tensor.
-
- Args:
- input (Tensor): the input tensor.
- other (Tensor): the second input tensor
-
- Keyword arguments:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.logaddexp(torch.tensor([-1.0]), torch.tensor([-1.0, -2, -3]))
- tensor([-0.3069, -0.6867, -0.8731])
- >>> torch.logaddexp(torch.tensor([-100.0, -200, -300]), torch.tensor([-1.0, -2, -3]))
- tensor([-1., -2., -3.])
- >>> torch.logaddexp(torch.tensor([1.0, 2000, 30000]), torch.tensor([-1.0, -2, -3]))
- tensor([1.1269e+00, 2.0000e+03, 3.0000e+04])
- """
- ...
- def logaddexp2(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- logaddexp2(input, other, *, out=None) -> Tensor
-
- Logarithm of the sum of exponentiations of the inputs in base-2.
-
- Calculates pointwise :math:`\log_2\left(2^x + 2^y\right)`. See
- :func:`torch.logaddexp` for more details.
-
- Args:
- input (Tensor): the input tensor.
- other (Tensor): the second input tensor
-
- Keyword arguments:
- out (Tensor, optional): the output tensor.
- """
- ...
- @overload
- def logcumsumexp(input: Tensor, dim: _int, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- logcumsumexp(input, dim, *, out=None) -> Tensor
- Returns the logarithm of the cumulative summation of the exponentiation of
- elements of :attr:`input` in the dimension :attr:`dim`.
-
- For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is
-
- .. math::
- \text{logcumsumexp}(x)_{ij} = \log \sum\limits_{j=0}^{i} \exp(x_{ij})
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the dimension to do the operation over
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(10)
- >>> torch.logcumsumexp(a, dim=0)
- tensor([-0.42296738, -0.04462666, 0.86278635, 0.94622083, 1.05277811,
- 1.39202815, 1.83525007, 1.84492621, 2.06084887, 2.06844475]))
- """
- ...
- @overload
- def logcumsumexp(input: Tensor, dim: Union[str, ellipsis, None], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- logcumsumexp(input, dim, *, out=None) -> Tensor
- Returns the logarithm of the cumulative summation of the exponentiation of
- elements of :attr:`input` in the dimension :attr:`dim`.
-
- For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is
-
- .. math::
- \text{logcumsumexp}(x)_{ij} = \log \sum\limits_{j=0}^{i} \exp(x_{ij})
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the dimension to do the operation over
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(10)
- >>> torch.logcumsumexp(a, dim=0)
- tensor([-0.42296738, -0.04462666, 0.86278635, 0.94622083, 1.05277811,
- 1.39202815, 1.83525007, 1.84492621, 2.06084887, 2.06844475]))
- """
- ...
- def logdet(input: Tensor) -> Tensor:
- r"""
- logdet(input) -> Tensor
-
- Calculates log determinant of a square matrix or batches of square matrices.
-
- It returns ``-inf`` if the input has a determinant of zero, and ``NaN`` if it has
- a negative determinant.
-
- .. note::
- Backward through :meth:`logdet` internally uses SVD results when :attr:`input`
- is not invertible. In this case, double backward through :meth:`logdet` will
- be unstable in when :attr:`input` doesn't have distinct singular values. See
- :func:`torch.linalg.svd` for details.
-
- .. seealso::
-
- :func:`torch.linalg.slogdet` computes the sign (resp. angle) and natural logarithm of the
- absolute value of the determinant of real-valued (resp. complex) square matrices.
-
- Arguments:
- input (Tensor): the input tensor of size ``(*, n, n)`` where ``*`` is zero or more
- batch dimensions.
-
- Example::
-
- >>> A = torch.randn(3, 3)
- >>> torch.det(A)
- tensor(0.2611)
- >>> torch.logdet(A)
- tensor(-1.3430)
- >>> A
- tensor([[[ 0.9254, -0.6213],
- [-0.5787, 1.6843]],
-
- [[ 0.3242, -0.9665],
- [ 0.4539, -0.0887]],
-
- [[ 1.1336, -0.4025],
- [-0.7089, 0.9032]]])
- >>> A.det()
- tensor([1.1990, 0.4099, 0.7386])
- >>> A.det().log()
- tensor([ 0.1815, -0.8917, -0.3031])
- """
- ...
- def logical_and(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- logical_and(input, other, *, out=None) -> Tensor
-
- Computes the element-wise logical AND of the given input tensors. Zeros are treated as ``False`` and nonzeros are
- treated as ``True``.
-
- Args:
- input (Tensor): the input tensor.
- other (Tensor): the tensor to compute AND with
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.logical_and(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
- tensor([ True, False, False])
- >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
- >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
- >>> torch.logical_and(a, b)
- tensor([False, False, True, False])
- >>> torch.logical_and(a.double(), b.double())
- tensor([False, False, True, False])
- >>> torch.logical_and(a.double(), b)
- tensor([False, False, True, False])
- >>> torch.logical_and(a, b, out=torch.empty(4, dtype=torch.bool))
- tensor([False, False, True, False])
- """
- ...
- def logical_not(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- logical_not(input, *, out=None) -> Tensor
-
- Computes the element-wise logical NOT of the given input tensor. If not specified, the output tensor will have the bool
- dtype. If the input tensor is not a bool tensor, zeros are treated as ``False`` and non-zeros are treated as ``True``.
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.logical_not(torch.tensor([True, False]))
- tensor([False, True])
- >>> torch.logical_not(torch.tensor([0, 1, -10], dtype=torch.int8))
- tensor([ True, False, False])
- >>> torch.logical_not(torch.tensor([0., 1.5, -10.], dtype=torch.double))
- tensor([ True, False, False])
- >>> torch.logical_not(torch.tensor([0., 1., -10.], dtype=torch.double), out=torch.empty(3, dtype=torch.int16))
- tensor([1, 0, 0], dtype=torch.int16)
- """
- ...
- def logical_or(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- logical_or(input, other, *, out=None) -> Tensor
-
- Computes the element-wise logical OR of the given input tensors. Zeros are treated as ``False`` and nonzeros are
- treated as ``True``.
-
- Args:
- input (Tensor): the input tensor.
- other (Tensor): the tensor to compute OR with
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.logical_or(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
- tensor([ True, False, True])
- >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
- >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
- >>> torch.logical_or(a, b)
- tensor([ True, True, True, False])
- >>> torch.logical_or(a.double(), b.double())
- tensor([ True, True, True, False])
- >>> torch.logical_or(a.double(), b)
- tensor([ True, True, True, False])
- >>> torch.logical_or(a, b, out=torch.empty(4, dtype=torch.bool))
- tensor([ True, True, True, False])
- """
- ...
- def logical_xor(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- logical_xor(input, other, *, out=None) -> Tensor
-
- Computes the element-wise logical XOR of the given input tensors. Zeros are treated as ``False`` and nonzeros are
- treated as ``True``.
-
- Args:
- input (Tensor): the input tensor.
- other (Tensor): the tensor to compute XOR with
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.logical_xor(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
- tensor([False, False, True])
- >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
- >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
- >>> torch.logical_xor(a, b)
- tensor([ True, True, False, False])
- >>> torch.logical_xor(a.double(), b.double())
- tensor([ True, True, False, False])
- >>> torch.logical_xor(a.double(), b)
- tensor([ True, True, False, False])
- >>> torch.logical_xor(a, b, out=torch.empty(4, dtype=torch.bool))
- tensor([ True, True, False, False])
- """
- ...
- def logit(input: Tensor, eps: Optional[_float] = None, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- logit(input, eps=None, *, out=None) -> Tensor
-
- Alias for :func:`torch.special.logit`.
- """
- ...
- def logit_(input: Tensor, eps: Optional[_float] = None) -> Tensor: ...
- @overload
- def logspace(start: Number, end: Number, steps: Optional[_int] = None, base: _float = 10.0, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
- r"""
- logspace(start, end, steps, base=10.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
-
- Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
- spaced from :math:`{{\text{{base}}}}^{{\text{{start}}}}` to
- :math:`{{\text{{base}}}}^{{\text{{end}}}}`, inclusive, on a logarithmic scale
- with base :attr:`base`. That is, the values are:
-
- .. math::
- (\text{base}^{\text{start}},
- \text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
- \ldots,
- \text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
- \text{base}^{\text{end}})
-
-
-
- From PyTorch 1.11 logspace requires the steps argument. Use steps=100 to restore the previous behavior.
-
- Args:
- start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
- end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
- steps (int): size of the constructed tensor
- base (float, optional): base of the logarithm function. Default: ``10.0``.
-
- Keyword arguments:
- out (Tensor, optional): the output tensor.
- dtype (torch.dtype, optional): the data type to perform the computation in.
- Default: if None, uses the global default dtype (see torch.get_default_dtype())
- when both :attr:`start` and :attr:`end` are real,
- and corresponding complex dtype when either is complex.
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Example::
-
- >>> torch.logspace(start=-10, end=10, steps=5)
- tensor([ 1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10])
- >>> torch.logspace(start=0.1, end=1.0, steps=5)
- tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000])
- >>> torch.logspace(start=0.1, end=1.0, steps=1)
- tensor([1.2589])
- >>> torch.logspace(start=2, end=2, steps=1, base=2)
- tensor([4.0])
- """
- ...
- @overload
- def logspace(start: Tensor, end: Tensor, steps: _int, base: _float = 10.0, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- logspace(start, end, steps, base=10.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
-
- Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
- spaced from :math:`{{\text{{base}}}}^{{\text{{start}}}}` to
- :math:`{{\text{{base}}}}^{{\text{{end}}}}`, inclusive, on a logarithmic scale
- with base :attr:`base`. That is, the values are:
-
- .. math::
- (\text{base}^{\text{start}},
- \text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
- \ldots,
- \text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
- \text{base}^{\text{end}})
-
-
-
- From PyTorch 1.11 logspace requires the steps argument. Use steps=100 to restore the previous behavior.
-
- Args:
- start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
- end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
- steps (int): size of the constructed tensor
- base (float, optional): base of the logarithm function. Default: ``10.0``.
-
- Keyword arguments:
- out (Tensor, optional): the output tensor.
- dtype (torch.dtype, optional): the data type to perform the computation in.
- Default: if None, uses the global default dtype (see torch.get_default_dtype())
- when both :attr:`start` and :attr:`end` are real,
- and corresponding complex dtype when either is complex.
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Example::
-
- >>> torch.logspace(start=-10, end=10, steps=5)
- tensor([ 1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10])
- >>> torch.logspace(start=0.1, end=1.0, steps=5)
- tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000])
- >>> torch.logspace(start=0.1, end=1.0, steps=1)
- tensor([1.2589])
- >>> torch.logspace(start=2, end=2, steps=1, base=2)
- tensor([4.0])
- """
- ...
- @overload
- def logspace(start: Union[Number, _complex], end: Tensor, steps: _int, base: _float = 10.0, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- logspace(start, end, steps, base=10.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
-
- Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
- spaced from :math:`{{\text{{base}}}}^{{\text{{start}}}}` to
- :math:`{{\text{{base}}}}^{{\text{{end}}}}`, inclusive, on a logarithmic scale
- with base :attr:`base`. That is, the values are:
-
- .. math::
- (\text{base}^{\text{start}},
- \text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
- \ldots,
- \text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
- \text{base}^{\text{end}})
-
-
-
- From PyTorch 1.11 logspace requires the steps argument. Use steps=100 to restore the previous behavior.
-
- Args:
- start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
- end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
- steps (int): size of the constructed tensor
- base (float, optional): base of the logarithm function. Default: ``10.0``.
-
- Keyword arguments:
- out (Tensor, optional): the output tensor.
- dtype (torch.dtype, optional): the data type to perform the computation in.
- Default: if None, uses the global default dtype (see torch.get_default_dtype())
- when both :attr:`start` and :attr:`end` are real,
- and corresponding complex dtype when either is complex.
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Example::
-
- >>> torch.logspace(start=-10, end=10, steps=5)
- tensor([ 1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10])
- >>> torch.logspace(start=0.1, end=1.0, steps=5)
- tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000])
- >>> torch.logspace(start=0.1, end=1.0, steps=1)
- tensor([1.2589])
- >>> torch.logspace(start=2, end=2, steps=1, base=2)
- tensor([4.0])
- """
- ...
- @overload
- def logspace(start: Tensor, end: Union[Number, _complex], steps: _int, base: _float = 10.0, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- logspace(start, end, steps, base=10.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
-
- Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
- spaced from :math:`{{\text{{base}}}}^{{\text{{start}}}}` to
- :math:`{{\text{{base}}}}^{{\text{{end}}}}`, inclusive, on a logarithmic scale
- with base :attr:`base`. That is, the values are:
-
- .. math::
- (\text{base}^{\text{start}},
- \text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
- \ldots,
- \text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
- \text{base}^{\text{end}})
-
-
-
- From PyTorch 1.11 logspace requires the steps argument. Use steps=100 to restore the previous behavior.
-
- Args:
- start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
- end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
- steps (int): size of the constructed tensor
- base (float, optional): base of the logarithm function. Default: ``10.0``.
-
- Keyword arguments:
- out (Tensor, optional): the output tensor.
- dtype (torch.dtype, optional): the data type to perform the computation in.
- Default: if None, uses the global default dtype (see torch.get_default_dtype())
- when both :attr:`start` and :attr:`end` are real,
- and corresponding complex dtype when either is complex.
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Example::
-
- >>> torch.logspace(start=-10, end=10, steps=5)
- tensor([ 1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10])
- >>> torch.logspace(start=0.1, end=1.0, steps=5)
- tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000])
- >>> torch.logspace(start=0.1, end=1.0, steps=1)
- tensor([1.2589])
- >>> torch.logspace(start=2, end=2, steps=1, base=2)
- tensor([4.0])
- """
- ...
- @overload
- def logspace(start: Union[Number, _complex], end: Union[Number, _complex], steps: _int, base: _float = 10.0, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- logspace(start, end, steps, base=10.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
-
- Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
- spaced from :math:`{{\text{{base}}}}^{{\text{{start}}}}` to
- :math:`{{\text{{base}}}}^{{\text{{end}}}}`, inclusive, on a logarithmic scale
- with base :attr:`base`. That is, the values are:
-
- .. math::
- (\text{base}^{\text{start}},
- \text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
- \ldots,
- \text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
- \text{base}^{\text{end}})
-
-
-
- From PyTorch 1.11 logspace requires the steps argument. Use steps=100 to restore the previous behavior.
-
- Args:
- start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
- end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
- steps (int): size of the constructed tensor
- base (float, optional): base of the logarithm function. Default: ``10.0``.
-
- Keyword arguments:
- out (Tensor, optional): the output tensor.
- dtype (torch.dtype, optional): the data type to perform the computation in.
- Default: if None, uses the global default dtype (see torch.get_default_dtype())
- when both :attr:`start` and :attr:`end` are real,
- and corresponding complex dtype when either is complex.
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Example::
-
- >>> torch.logspace(start=-10, end=10, steps=5)
- tensor([ 1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10])
- >>> torch.logspace(start=0.1, end=1.0, steps=5)
- tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000])
- >>> torch.logspace(start=0.1, end=1.0, steps=1)
- tensor([1.2589])
- >>> torch.logspace(start=2, end=2, steps=1, base=2)
- tensor([4.0])
- """
- ...
- @overload
- def logsumexp(input: Tensor, dim: Union[_int, _size], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- logsumexp(input, dim, keepdim=False, *, out=None)
-
- Returns the log of summed exponentials of each row of the :attr:`input`
- tensor in the given dimension :attr:`dim`. The computation is numerically
- stabilized.
-
- For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is
-
- .. math::
- \text{logsumexp}(x)_{i} = \log \sum_j \exp(x_{ij})
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
-
- dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
- If ``None``, all dimensions are reduced.
-
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(3, 3)
- >>> torch.logsumexp(a, 1)
- tensor([1.4907, 1.0593, 1.5696])
- >>> torch.dist(torch.logsumexp(a, 1), torch.log(torch.sum(torch.exp(a), 1)))
- tensor(1.6859e-07)
- """
- ...
- @overload
- def logsumexp(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- logsumexp(input, dim, keepdim=False, *, out=None)
-
- Returns the log of summed exponentials of each row of the :attr:`input`
- tensor in the given dimension :attr:`dim`. The computation is numerically
- stabilized.
-
- For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is
-
- .. math::
- \text{logsumexp}(x)_{i} = \log \sum_j \exp(x_{ij})
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
-
- dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
- If ``None``, all dimensions are reduced.
-
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(3, 3)
- >>> torch.logsumexp(a, 1)
- tensor([1.4907, 1.0593, 1.5696])
- >>> torch.dist(torch.logsumexp(a, 1), torch.log(torch.sum(torch.exp(a), 1)))
- tensor(1.6859e-07)
- """
- ...
- @overload
- def lstm(data: Tensor, batch_sizes: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor, Tensor]: ...
- @overload
- def lstm(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor, Tensor]: ...
- def lstm_cell(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor] = None, b_hh: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: ...
- @overload
- def lt(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- lt(input, other, *, out=None) -> Tensor
-
- Computes :math:`\text{input} < \text{other}` element-wise.
-
-
- The second argument can be a number or a tensor whose shape is
- :ref:`broadcastable <broadcasting-semantics>` with the first argument.
-
- Args:
- input (Tensor): the tensor to compare
- other (Tensor or float): the tensor or value to compare
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Returns:
- A boolean tensor that is True where :attr:`input` is less than :attr:`other` and False elsewhere
-
- Example::
-
- >>> torch.lt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
- tensor([[False, False], [True, False]])
- """
- ...
- @overload
- def lt(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- lt(input, other, *, out=None) -> Tensor
-
- Computes :math:`\text{input} < \text{other}` element-wise.
-
-
- The second argument can be a number or a tensor whose shape is
- :ref:`broadcastable <broadcasting-semantics>` with the first argument.
-
- Args:
- input (Tensor): the tensor to compare
- other (Tensor or float): the tensor or value to compare
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Returns:
- A boolean tensor that is True where :attr:`input` is less than :attr:`other` and False elsewhere
-
- Example::
-
- >>> torch.lt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
- tensor([[False, False], [True, False]])
- """
- ...
- def lu_solve(input: Tensor, LU_data: Tensor, LU_pivots: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- lu_solve(b, LU_data, LU_pivots, *, out=None) -> Tensor
-
- Returns the LU solve of the linear system :math:`Ax = b` using the partially pivoted
- LU factorization of A from :func:`~linalg.lu_factor`.
-
- This function supports ``float``, ``double``, ``cfloat`` and ``cdouble`` dtypes for :attr:`input`.
-
- .. warning::
-
- :func:`torch.lu_solve` is deprecated in favor of :func:`torch.linalg.lu_solve`.
- :func:`torch.lu_solve` will be removed in a future PyTorch release.
- ``X = torch.lu_solve(B, LU, pivots)`` should be replaced with
-
- .. code:: python
-
- X = linalg.lu_solve(LU, pivots, B)
-
- Arguments:
- b (Tensor): the RHS tensor of size :math:`(*, m, k)`, where :math:`*`
- is zero or more batch dimensions.
- LU_data (Tensor): the pivoted LU factorization of A from :meth:`~linalg.lu_factor` of size :math:`(*, m, m)`,
- where :math:`*` is zero or more batch dimensions.
- LU_pivots (IntTensor): the pivots of the LU factorization from :meth:`~linalg.lu_factor` of size :math:`(*, m)`,
- where :math:`*` is zero or more batch dimensions.
- The batch dimensions of :attr:`LU_pivots` must be equal to the batch dimensions of
- :attr:`LU_data`.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> A = torch.randn(2, 3, 3)
- >>> b = torch.randn(2, 3, 1)
- >>> LU, pivots = torch.linalg.lu_factor(A)
- >>> x = torch.lu_solve(b, LU, pivots)
- >>> torch.dist(A @ x, b)
- tensor(1.00000e-07 *
- 2.8312)
- """
- ...
- def lu_unpack(LU_data: Tensor, LU_pivots: Tensor, unpack_data: _bool = True, unpack_pivots: _bool = True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.lu_unpack:
- r"""
- lu_unpack(LU_data, LU_pivots, unpack_data=True, unpack_pivots=True, *, out=None) -> (Tensor, Tensor, Tensor)
-
- Unpacks the LU decomposition returned by :func:`~linalg.lu_factor` into the `P, L, U` matrices.
-
- .. seealso::
-
- :func:`~linalg.lu` returns the matrices from the LU decomposition. Its gradient formula is more efficient
- than that of doing :func:`~linalg.lu_factor` followed by :func:`~linalg.lu_unpack`.
-
- Args:
- LU_data (Tensor): the packed LU factorization data
- LU_pivots (Tensor): the packed LU factorization pivots
- unpack_data (bool): flag indicating if the data should be unpacked.
- If ``False``, then the returned ``L`` and ``U`` are empty tensors.
- Default: ``True``
- unpack_pivots (bool): flag indicating if the pivots should be unpacked into a permutation matrix ``P``.
- If ``False``, then the returned ``P`` is an empty tensor.
- Default: ``True``
-
- Keyword args:
- out (tuple, optional): output tuple of three tensors. Ignored if `None`.
-
- Returns:
- A namedtuple ``(P, L, U)``
-
- Examples::
-
- >>> A = torch.randn(2, 3, 3)
- >>> LU, pivots = torch.linalg.lu_factor(A)
- >>> P, L, U = torch.lu_unpack(LU, pivots)
- >>> # We can recover A from the factorization
- >>> A_ = P @ L @ U
- >>> torch.allclose(A, A_)
- True
-
- >>> # LU factorization of a rectangular matrix:
- >>> A = torch.randn(2, 3, 2)
- >>> LU, pivots = torch.linalg.lu_factor(A)
- >>> P, L, U = torch.lu_unpack(LU, pivots)
- >>> # P, L, U are the same as returned by linalg.lu
- >>> P_, L_, U_ = torch.linalg.lu(A)
- >>> torch.allclose(P, P_) and torch.allclose(L, L_) and torch.allclose(U, U_)
- True
- """
- ...
- def margin_ranking_loss(input1: Tensor, input2: Tensor, target: Tensor, margin: _float = 0.0, reduction: _int = 1) -> Tensor: ...
- @overload
- def masked_fill(input: Tensor, mask: Tensor, value: Tensor) -> Tensor: ...
- @overload
- def masked_fill(input: Tensor, mask: Tensor, value: Union[Number, _complex]) -> Tensor: ...
- def masked_scatter(input: Tensor, mask: Tensor, source: Tensor) -> Tensor: ...
- def masked_select(input: Tensor, mask: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- masked_select(input, mask, *, out=None) -> Tensor
-
- Returns a new 1-D tensor which indexes the :attr:`input` tensor according to
- the boolean mask :attr:`mask` which is a `BoolTensor`.
-
- The shapes of the :attr:`mask` tensor and the :attr:`input` tensor don't need
- to match, but they must be :ref:`broadcastable <broadcasting-semantics>`.
-
- .. note:: The returned tensor does **not** use the same storage
- as the original tensor
-
- Args:
- input (Tensor): the input tensor.
- mask (BoolTensor): the tensor containing the binary mask to index with
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> x = torch.randn(3, 4)
- >>> x
- tensor([[ 0.3552, -2.3825, -0.8297, 0.3477],
- [-1.2035, 1.2252, 0.5002, 0.6248],
- [ 0.1307, -2.0608, 0.1244, 2.0139]])
- >>> mask = x.ge(0.5)
- >>> mask
- tensor([[False, False, False, False],
- [False, True, True, True],
- [False, False, False, True]])
- >>> torch.masked_select(x, mask)
- tensor([ 1.2252, 0.5002, 0.6248, 2.0139])
- """
- ...
- def matmul(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- matmul(input, other, *, out=None) -> Tensor
-
- Matrix product of two tensors.
-
- The behavior depends on the dimensionality of the tensors as follows:
-
- - If both tensors are 1-dimensional, the dot product (scalar) is returned.
- - If both arguments are 2-dimensional, the matrix-matrix product is returned.
- - If the first argument is 1-dimensional and the second argument is 2-dimensional,
- a 1 is prepended to its dimension for the purpose of the matrix multiply.
- After the matrix multiply, the prepended dimension is removed.
- - If the first argument is 2-dimensional and the second argument is 1-dimensional,
- the matrix-vector product is returned.
- - If both arguments are at least 1-dimensional and at least one argument is
- N-dimensional (where N > 2), then a batched matrix multiply is returned. If the first
- argument is 1-dimensional, a 1 is prepended to its dimension for the purpose of the
- batched matrix multiply and removed after. If the second argument is 1-dimensional, a
- 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after.
- The non-matrix (i.e. batch) dimensions are :ref:`broadcasted <broadcasting-semantics>` (and thus
- must be broadcastable). For example, if :attr:`input` is a
- :math:`(j \times 1 \times n \times n)` tensor and :attr:`other` is a :math:`(k \times n \times n)`
- tensor, :attr:`out` will be a :math:`(j \times k \times n \times n)` tensor.
-
- Note that the broadcasting logic only looks at the batch dimensions when determining if the inputs
- are broadcastable, and not the matrix dimensions. For example, if :attr:`input` is a
- :math:`(j \times 1 \times n \times m)` tensor and :attr:`other` is a :math:`(k \times m \times p)`
- tensor, these inputs are valid for broadcasting even though the final two dimensions (i.e. the
- matrix dimensions) are different. :attr:`out` will be a :math:`(j \times k \times n \times p)` tensor.
-
- This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`. In particular the
- matrix-matrix (both arguments 2-dimensional) supports sparse arguments with the same restrictions
- as :func:`torch.mm`
-
-
- .. warning::
- Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported,
- or may not have autograd support. If you notice missing functionality please
- open a feature request.
-
- This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
-
- On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
-
- .. note::
-
- The 1-dimensional dot product version of this function does not support an :attr:`out` parameter.
-
- Arguments:
- input (Tensor): the first tensor to be multiplied
- other (Tensor): the second tensor to be multiplied
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> # vector x vector
- >>> tensor1 = torch.randn(3)
- >>> tensor2 = torch.randn(3)
- >>> torch.matmul(tensor1, tensor2).size()
- torch.Size([])
- >>> # matrix x vector
- >>> tensor1 = torch.randn(3, 4)
- >>> tensor2 = torch.randn(4)
- >>> torch.matmul(tensor1, tensor2).size()
- torch.Size([3])
- >>> # batched matrix x broadcasted vector
- >>> tensor1 = torch.randn(10, 3, 4)
- >>> tensor2 = torch.randn(4)
- >>> torch.matmul(tensor1, tensor2).size()
- torch.Size([10, 3])
- >>> # batched matrix x batched matrix
- >>> tensor1 = torch.randn(10, 3, 4)
- >>> tensor2 = torch.randn(10, 4, 5)
- >>> torch.matmul(tensor1, tensor2).size()
- torch.Size([10, 3, 5])
- >>> # batched matrix x broadcasted matrix
- >>> tensor1 = torch.randn(10, 3, 4)
- >>> tensor2 = torch.randn(4, 5)
- >>> torch.matmul(tensor1, tensor2).size()
- torch.Size([10, 3, 5])
- """
- ...
- def matrix_exp(input: Tensor) -> Tensor:
- r"""
- matrix_exp(A) -> Tensor
-
- Alias for :func:`torch.linalg.matrix_exp`.
- """
- ...
- def matrix_power(input: Tensor, n: _int, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- matrix_power(input, n, *, out=None) -> Tensor
-
- Alias for :func:`torch.linalg.matrix_power`
- """
- ...
- @overload
- def max(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- max(input) -> Tensor
-
- Returns the maximum value of all elements in the ``input`` tensor.
-
- .. warning::
- This function produces deterministic (sub)gradients unlike ``max(dim=0)``
-
- Args:
- input (Tensor): the input tensor.
-
- Example::
-
- >>> a = torch.randn(1, 3)
- >>> a
- tensor([[ 0.6763, 0.7445, -2.2369]])
- >>> torch.max(a)
- tensor(0.7445)
-
- .. function:: max(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
- :noindex:
-
- Returns a namedtuple ``(values, indices)`` where ``values`` is the maximum
- value of each row of the :attr:`input` tensor in the given dimension
- :attr:`dim`. And ``indices`` is the index location of each maximum value found
- (argmax).
-
- If ``keepdim`` is ``True``, the output tensors are of the same size
- as ``input`` except in the dimension ``dim`` where they are of size 1.
- Otherwise, ``dim`` is squeezed (see :func:`torch.squeeze`), resulting
- in the output tensors having 1 fewer dimension than ``input``.
-
- .. note:: If there are multiple maximal values in a reduced row then
- the indices of the first maximal value are returned.
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the dimension to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not. Default: ``False``.
-
- Keyword args:
- out (tuple, optional): the result tuple of two output tensors (max, max_indices)
-
- Example::
-
- >>> a = torch.randn(4, 4)
- >>> a
- tensor([[-1.2360, -0.2942, -0.1222, 0.8475],
- [ 1.1949, -1.1127, -2.2379, -0.6702],
- [ 1.5717, -0.9207, 0.1297, -1.8768],
- [-0.6172, 1.0036, -0.6060, -0.2432]])
- >>> torch.max(a, 1)
- torch.return_types.max(values=tensor([0.8475, 1.1949, 1.5717, 1.0036]), indices=tensor([3, 0, 0, 1]))
-
- .. function:: max(input, other, *, out=None) -> Tensor
- :noindex:
-
- See :func:`torch.maximum`.
- """
- ...
- @overload
- def max(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- max(input) -> Tensor
-
- Returns the maximum value of all elements in the ``input`` tensor.
-
- .. warning::
- This function produces deterministic (sub)gradients unlike ``max(dim=0)``
-
- Args:
- input (Tensor): the input tensor.
-
- Example::
-
- >>> a = torch.randn(1, 3)
- >>> a
- tensor([[ 0.6763, 0.7445, -2.2369]])
- >>> torch.max(a)
- tensor(0.7445)
-
- .. function:: max(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
- :noindex:
-
- Returns a namedtuple ``(values, indices)`` where ``values`` is the maximum
- value of each row of the :attr:`input` tensor in the given dimension
- :attr:`dim`. And ``indices`` is the index location of each maximum value found
- (argmax).
-
- If ``keepdim`` is ``True``, the output tensors are of the same size
- as ``input`` except in the dimension ``dim`` where they are of size 1.
- Otherwise, ``dim`` is squeezed (see :func:`torch.squeeze`), resulting
- in the output tensors having 1 fewer dimension than ``input``.
-
- .. note:: If there are multiple maximal values in a reduced row then
- the indices of the first maximal value are returned.
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the dimension to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not. Default: ``False``.
-
- Keyword args:
- out (tuple, optional): the result tuple of two output tensors (max, max_indices)
-
- Example::
-
- >>> a = torch.randn(4, 4)
- >>> a
- tensor([[-1.2360, -0.2942, -0.1222, 0.8475],
- [ 1.1949, -1.1127, -2.2379, -0.6702],
- [ 1.5717, -0.9207, 0.1297, -1.8768],
- [-0.6172, 1.0036, -0.6060, -0.2432]])
- >>> torch.max(a, 1)
- torch.return_types.max(values=tensor([0.8475, 1.1949, 1.5717, 1.0036]), indices=tensor([3, 0, 0, 1]))
-
- .. function:: max(input, other, *, out=None) -> Tensor
- :noindex:
-
- See :func:`torch.maximum`.
- """
- ...
- @overload
- def max(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.max:
- r"""
- max(input) -> Tensor
-
- Returns the maximum value of all elements in the ``input`` tensor.
-
- .. warning::
- This function produces deterministic (sub)gradients unlike ``max(dim=0)``
-
- Args:
- input (Tensor): the input tensor.
-
- Example::
-
- >>> a = torch.randn(1, 3)
- >>> a
- tensor([[ 0.6763, 0.7445, -2.2369]])
- >>> torch.max(a)
- tensor(0.7445)
-
- .. function:: max(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
- :noindex:
-
- Returns a namedtuple ``(values, indices)`` where ``values`` is the maximum
- value of each row of the :attr:`input` tensor in the given dimension
- :attr:`dim`. And ``indices`` is the index location of each maximum value found
- (argmax).
-
- If ``keepdim`` is ``True``, the output tensors are of the same size
- as ``input`` except in the dimension ``dim`` where they are of size 1.
- Otherwise, ``dim`` is squeezed (see :func:`torch.squeeze`), resulting
- in the output tensors having 1 fewer dimension than ``input``.
-
- .. note:: If there are multiple maximal values in a reduced row then
- the indices of the first maximal value are returned.
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the dimension to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not. Default: ``False``.
-
- Keyword args:
- out (tuple, optional): the result tuple of two output tensors (max, max_indices)
-
- Example::
-
- >>> a = torch.randn(4, 4)
- >>> a
- tensor([[-1.2360, -0.2942, -0.1222, 0.8475],
- [ 1.1949, -1.1127, -2.2379, -0.6702],
- [ 1.5717, -0.9207, 0.1297, -1.8768],
- [-0.6172, 1.0036, -0.6060, -0.2432]])
- >>> torch.max(a, 1)
- torch.return_types.max(values=tensor([0.8475, 1.1949, 1.5717, 1.0036]), indices=tensor([3, 0, 0, 1]))
-
- .. function:: max(input, other, *, out=None) -> Tensor
- :noindex:
-
- See :func:`torch.maximum`.
- """
- ...
- @overload
- def max(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.max:
- r"""
- max(input) -> Tensor
-
- Returns the maximum value of all elements in the ``input`` tensor.
-
- .. warning::
- This function produces deterministic (sub)gradients unlike ``max(dim=0)``
-
- Args:
- input (Tensor): the input tensor.
-
- Example::
-
- >>> a = torch.randn(1, 3)
- >>> a
- tensor([[ 0.6763, 0.7445, -2.2369]])
- >>> torch.max(a)
- tensor(0.7445)
-
- .. function:: max(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
- :noindex:
-
- Returns a namedtuple ``(values, indices)`` where ``values`` is the maximum
- value of each row of the :attr:`input` tensor in the given dimension
- :attr:`dim`. And ``indices`` is the index location of each maximum value found
- (argmax).
-
- If ``keepdim`` is ``True``, the output tensors are of the same size
- as ``input`` except in the dimension ``dim`` where they are of size 1.
- Otherwise, ``dim`` is squeezed (see :func:`torch.squeeze`), resulting
- in the output tensors having 1 fewer dimension than ``input``.
-
- .. note:: If there are multiple maximal values in a reduced row then
- the indices of the first maximal value are returned.
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the dimension to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not. Default: ``False``.
-
- Keyword args:
- out (tuple, optional): the result tuple of two output tensors (max, max_indices)
-
- Example::
-
- >>> a = torch.randn(4, 4)
- >>> a
- tensor([[-1.2360, -0.2942, -0.1222, 0.8475],
- [ 1.1949, -1.1127, -2.2379, -0.6702],
- [ 1.5717, -0.9207, 0.1297, -1.8768],
- [-0.6172, 1.0036, -0.6060, -0.2432]])
- >>> torch.max(a, 1)
- torch.return_types.max(values=tensor([0.8475, 1.1949, 1.5717, 1.0036]), indices=tensor([3, 0, 0, 1]))
-
- .. function:: max(input, other, *, out=None) -> Tensor
- :noindex:
-
- See :func:`torch.maximum`.
- """
- ...
- def max_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ...
- def max_pool1d_with_indices(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tuple[Tensor, Tensor]: ...
- def max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ...
- def max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ...
- def maximum(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- maximum(input, other, *, out=None) -> Tensor
-
- Computes the element-wise maximum of :attr:`input` and :attr:`other`.
-
- .. note::
- If one of the elements being compared is a NaN, then that element is returned.
- :func:`maximum` is not supported for tensors with complex dtypes.
-
- Args:
- input (Tensor): the input tensor.
- other (Tensor): the second input tensor
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.tensor((1, 2, -1))
- >>> b = torch.tensor((3, 0, 4))
- >>> torch.maximum(a, b)
- tensor([3, 2, 4])
- """
- ...
- @overload
- def mean(input: Tensor, *, dtype: Optional[_dtype] = None) -> Tensor:
- r"""
- mean(input, *, dtype=None) -> Tensor
-
- Returns the mean value of all elements in the :attr:`input` tensor. Input must be floating point or complex.
-
- Args:
- input (Tensor):
- the input tensor, either of floating point or complex dtype
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- If specified, the input tensor is casted to :attr:`dtype` before the operation
- is performed. This is useful for preventing data type overflows. Default: None.
-
- Example::
-
- >>> a = torch.randn(1, 3)
- >>> a
- tensor([[ 0.2294, -0.5481, 1.3288]])
- >>> torch.mean(a)
- tensor(0.3367)
-
- .. function:: mean(input, dim, keepdim=False, *, dtype=None, out=None) -> Tensor
- :noindex:
-
- Returns the mean value of each row of the :attr:`input` tensor in the given
- dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
- reduce over all of them.
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
- dim (int or tuple of ints): the dimension or dimensions to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- If specified, the input tensor is casted to :attr:`dtype` before the operation
- is performed. This is useful for preventing data type overflows. Default: None.
- out (Tensor, optional): the output tensor.
-
- .. seealso::
-
- :func:`torch.nanmean` computes the mean value of `non-NaN` elements.
-
- Example::
-
- >>> a = torch.randn(4, 4)
- >>> a
- tensor([[-0.3841, 0.6320, 0.4254, -0.7384],
- [-0.9644, 1.0131, -0.6549, -1.4279],
- [-0.2951, -1.3350, -0.7694, 0.5600],
- [ 1.0842, -0.9580, 0.3623, 0.2343]])
- >>> torch.mean(a, 1)
- tensor([-0.0163, -0.5085, -0.4599, 0.1807])
- >>> torch.mean(a, 1, True)
- tensor([[-0.0163],
- [-0.5085],
- [-0.4599],
- [ 0.1807]])
- """
- ...
- @overload
- def mean(input: Tensor, dim: Optional[Union[_int, _size]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
- r"""
- mean(input, *, dtype=None) -> Tensor
-
- Returns the mean value of all elements in the :attr:`input` tensor. Input must be floating point or complex.
-
- Args:
- input (Tensor):
- the input tensor, either of floating point or complex dtype
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- If specified, the input tensor is casted to :attr:`dtype` before the operation
- is performed. This is useful for preventing data type overflows. Default: None.
-
- Example::
-
- >>> a = torch.randn(1, 3)
- >>> a
- tensor([[ 0.2294, -0.5481, 1.3288]])
- >>> torch.mean(a)
- tensor(0.3367)
-
- .. function:: mean(input, dim, keepdim=False, *, dtype=None, out=None) -> Tensor
- :noindex:
-
- Returns the mean value of each row of the :attr:`input` tensor in the given
- dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
- reduce over all of them.
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
- dim (int or tuple of ints): the dimension or dimensions to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- If specified, the input tensor is casted to :attr:`dtype` before the operation
- is performed. This is useful for preventing data type overflows. Default: None.
- out (Tensor, optional): the output tensor.
-
- .. seealso::
-
- :func:`torch.nanmean` computes the mean value of `non-NaN` elements.
-
- Example::
-
- >>> a = torch.randn(4, 4)
- >>> a
- tensor([[-0.3841, 0.6320, 0.4254, -0.7384],
- [-0.9644, 1.0131, -0.6549, -1.4279],
- [-0.2951, -1.3350, -0.7694, 0.5600],
- [ 1.0842, -0.9580, 0.3623, 0.2343]])
- >>> torch.mean(a, 1)
- tensor([-0.0163, -0.5085, -0.4599, 0.1807])
- >>> torch.mean(a, 1, True)
- tensor([[-0.0163],
- [-0.5085],
- [-0.4599],
- [ 0.1807]])
- """
- ...
- @overload
- def mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
- r"""
- mean(input, *, dtype=None) -> Tensor
-
- Returns the mean value of all elements in the :attr:`input` tensor. Input must be floating point or complex.
-
- Args:
- input (Tensor):
- the input tensor, either of floating point or complex dtype
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- If specified, the input tensor is casted to :attr:`dtype` before the operation
- is performed. This is useful for preventing data type overflows. Default: None.
-
- Example::
-
- >>> a = torch.randn(1, 3)
- >>> a
- tensor([[ 0.2294, -0.5481, 1.3288]])
- >>> torch.mean(a)
- tensor(0.3367)
-
- .. function:: mean(input, dim, keepdim=False, *, dtype=None, out=None) -> Tensor
- :noindex:
-
- Returns the mean value of each row of the :attr:`input` tensor in the given
- dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
- reduce over all of them.
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
- dim (int or tuple of ints): the dimension or dimensions to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- If specified, the input tensor is casted to :attr:`dtype` before the operation
- is performed. This is useful for preventing data type overflows. Default: None.
- out (Tensor, optional): the output tensor.
-
- .. seealso::
-
- :func:`torch.nanmean` computes the mean value of `non-NaN` elements.
-
- Example::
-
- >>> a = torch.randn(4, 4)
- >>> a
- tensor([[-0.3841, 0.6320, 0.4254, -0.7384],
- [-0.9644, 1.0131, -0.6549, -1.4279],
- [-0.2951, -1.3350, -0.7694, 0.5600],
- [ 1.0842, -0.9580, 0.3623, 0.2343]])
- >>> torch.mean(a, 1)
- tensor([-0.0163, -0.5085, -0.4599, 0.1807])
- >>> torch.mean(a, 1, True)
- tensor([[-0.0163],
- [-0.5085],
- [-0.4599],
- [ 0.1807]])
- """
- ...
- @overload
- def median(input: Tensor) -> Tensor:
- r"""
- median(input) -> Tensor
-
- Returns the median of the values in :attr:`input`.
-
- .. note::
- The median is not unique for :attr:`input` tensors with an even number
- of elements. In this case the lower of the two medians is returned. To
- compute the mean of both medians, use :func:`torch.quantile` with ``q=0.5`` instead.
-
- .. warning::
- This function produces deterministic (sub)gradients unlike ``median(dim=0)``
-
- Args:
- input (Tensor): the input tensor.
-
- Example::
-
- >>> a = torch.randn(1, 3)
- >>> a
- tensor([[ 1.5219, -1.5212, 0.2202]])
- >>> torch.median(a)
- tensor(0.2202)
-
- .. function:: median(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
- :noindex:
-
- Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input`
- in the dimension :attr:`dim`, and ``indices`` contains the index of the median values found in the dimension :attr:`dim`.
-
- By default, :attr:`dim` is the last dimension of the :attr:`input` tensor.
-
- If :attr:`keepdim` is ``True``, the output tensors are of the same size
- as :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
- the outputs tensor having 1 fewer dimension than :attr:`input`.
-
- .. note::
- The median is not unique for :attr:`input` tensors with an even number
- of elements in the dimension :attr:`dim`. In this case the lower of the
- two medians is returned. To compute the mean of both medians in
- :attr:`input`, use :func:`torch.quantile` with ``q=0.5`` instead.
-
- .. warning::
- ``indices`` does not necessarily contain the first occurrence of each
- median value found, unless it is unique.
- The exact implementation details are device-specific.
- Do not expect the same result when run on CPU and GPU in general.
- For the same reason do not expect the gradients to be deterministic.
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the dimension to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second
- tensor, which must have dtype long, with their indices in the dimension
- :attr:`dim` of :attr:`input`.
-
- Example::
-
- >>> a = torch.randn(4, 5)
- >>> a
- tensor([[ 0.2505, -0.3982, -0.9948, 0.3518, -1.3131],
- [ 0.3180, -0.6993, 1.0436, 0.0438, 0.2270],
- [-0.2751, 0.7303, 0.2192, 0.3321, 0.2488],
- [ 1.0778, -1.9510, 0.7048, 0.4742, -0.7125]])
- >>> torch.median(a, 1)
- torch.return_types.median(values=tensor([-0.3982, 0.2270, 0.2488, 0.4742]), indices=tensor([1, 4, 4, 3]))
- """
- ...
- @overload
- def median(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.median:
- r"""
- median(input) -> Tensor
-
- Returns the median of the values in :attr:`input`.
-
- .. note::
- The median is not unique for :attr:`input` tensors with an even number
- of elements. In this case the lower of the two medians is returned. To
- compute the mean of both medians, use :func:`torch.quantile` with ``q=0.5`` instead.
-
- .. warning::
- This function produces deterministic (sub)gradients unlike ``median(dim=0)``
-
- Args:
- input (Tensor): the input tensor.
-
- Example::
-
- >>> a = torch.randn(1, 3)
- >>> a
- tensor([[ 1.5219, -1.5212, 0.2202]])
- >>> torch.median(a)
- tensor(0.2202)
-
- .. function:: median(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
- :noindex:
-
- Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input`
- in the dimension :attr:`dim`, and ``indices`` contains the index of the median values found in the dimension :attr:`dim`.
-
- By default, :attr:`dim` is the last dimension of the :attr:`input` tensor.
-
- If :attr:`keepdim` is ``True``, the output tensors are of the same size
- as :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
- the outputs tensor having 1 fewer dimension than :attr:`input`.
-
- .. note::
- The median is not unique for :attr:`input` tensors with an even number
- of elements in the dimension :attr:`dim`. In this case the lower of the
- two medians is returned. To compute the mean of both medians in
- :attr:`input`, use :func:`torch.quantile` with ``q=0.5`` instead.
-
- .. warning::
- ``indices`` does not necessarily contain the first occurrence of each
- median value found, unless it is unique.
- The exact implementation details are device-specific.
- Do not expect the same result when run on CPU and GPU in general.
- For the same reason do not expect the gradients to be deterministic.
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the dimension to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second
- tensor, which must have dtype long, with their indices in the dimension
- :attr:`dim` of :attr:`input`.
-
- Example::
-
- >>> a = torch.randn(4, 5)
- >>> a
- tensor([[ 0.2505, -0.3982, -0.9948, 0.3518, -1.3131],
- [ 0.3180, -0.6993, 1.0436, 0.0438, 0.2270],
- [-0.2751, 0.7303, 0.2192, 0.3321, 0.2488],
- [ 1.0778, -1.9510, 0.7048, 0.4742, -0.7125]])
- >>> torch.median(a, 1)
- torch.return_types.median(values=tensor([-0.3982, 0.2270, 0.2488, 0.4742]), indices=tensor([1, 4, 4, 3]))
- """
- ...
- @overload
- def median(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.median:
- r"""
- median(input) -> Tensor
-
- Returns the median of the values in :attr:`input`.
-
- .. note::
- The median is not unique for :attr:`input` tensors with an even number
- of elements. In this case the lower of the two medians is returned. To
- compute the mean of both medians, use :func:`torch.quantile` with ``q=0.5`` instead.
-
- .. warning::
- This function produces deterministic (sub)gradients unlike ``median(dim=0)``
-
- Args:
- input (Tensor): the input tensor.
-
- Example::
-
- >>> a = torch.randn(1, 3)
- >>> a
- tensor([[ 1.5219, -1.5212, 0.2202]])
- >>> torch.median(a)
- tensor(0.2202)
-
- .. function:: median(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
- :noindex:
-
- Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input`
- in the dimension :attr:`dim`, and ``indices`` contains the index of the median values found in the dimension :attr:`dim`.
-
- By default, :attr:`dim` is the last dimension of the :attr:`input` tensor.
-
- If :attr:`keepdim` is ``True``, the output tensors are of the same size
- as :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
- the outputs tensor having 1 fewer dimension than :attr:`input`.
-
- .. note::
- The median is not unique for :attr:`input` tensors with an even number
- of elements in the dimension :attr:`dim`. In this case the lower of the
- two medians is returned. To compute the mean of both medians in
- :attr:`input`, use :func:`torch.quantile` with ``q=0.5`` instead.
-
- .. warning::
- ``indices`` does not necessarily contain the first occurrence of each
- median value found, unless it is unique.
- The exact implementation details are device-specific.
- Do not expect the same result when run on CPU and GPU in general.
- For the same reason do not expect the gradients to be deterministic.
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the dimension to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second
- tensor, which must have dtype long, with their indices in the dimension
- :attr:`dim` of :attr:`input`.
-
- Example::
-
- >>> a = torch.randn(4, 5)
- >>> a
- tensor([[ 0.2505, -0.3982, -0.9948, 0.3518, -1.3131],
- [ 0.3180, -0.6993, 1.0436, 0.0438, 0.2270],
- [-0.2751, 0.7303, 0.2192, 0.3321, 0.2488],
- [ 1.0778, -1.9510, 0.7048, 0.4742, -0.7125]])
- >>> torch.median(a, 1)
- torch.return_types.median(values=tensor([-0.3982, 0.2270, 0.2488, 0.4742]), indices=tensor([1, 4, 4, 3]))
- """
- ...
- @overload
- def min(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- min(input) -> Tensor
-
- Returns the minimum value of all elements in the :attr:`input` tensor.
-
- .. warning::
- This function produces deterministic (sub)gradients unlike ``min(dim=0)``
-
- Args:
- input (Tensor): the input tensor.
-
- Example::
-
- >>> a = torch.randn(1, 3)
- >>> a
- tensor([[ 0.6750, 1.0857, 1.7197]])
- >>> torch.min(a)
- tensor(0.6750)
-
- .. function:: min(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
- :noindex:
-
- Returns a namedtuple ``(values, indices)`` where ``values`` is the minimum
- value of each row of the :attr:`input` tensor in the given dimension
- :attr:`dim`. And ``indices`` is the index location of each minimum value found
- (argmin).
-
- If :attr:`keepdim` is ``True``, the output tensors are of the same size as
- :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
- the output tensors having 1 fewer dimension than :attr:`input`.
-
- .. note:: If there are multiple minimal values in a reduced row then
- the indices of the first minimal value are returned.
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the dimension to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- out (tuple, optional): the tuple of two output tensors (min, min_indices)
-
- Example::
-
- >>> a = torch.randn(4, 4)
- >>> a
- tensor([[-0.6248, 1.1334, -1.1899, -0.2803],
- [-1.4644, -0.2635, -0.3651, 0.6134],
- [ 0.2457, 0.0384, 1.0128, 0.7015],
- [-0.1153, 2.9849, 2.1458, 0.5788]])
- >>> torch.min(a, 1)
- torch.return_types.min(values=tensor([-1.1899, -1.4644, 0.0384, -0.1153]), indices=tensor([2, 0, 1, 0]))
-
- .. function:: min(input, other, *, out=None) -> Tensor
- :noindex:
-
- See :func:`torch.minimum`.
- """
- ...
- @overload
- def min(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- min(input) -> Tensor
-
- Returns the minimum value of all elements in the :attr:`input` tensor.
-
- .. warning::
- This function produces deterministic (sub)gradients unlike ``min(dim=0)``
-
- Args:
- input (Tensor): the input tensor.
-
- Example::
-
- >>> a = torch.randn(1, 3)
- >>> a
- tensor([[ 0.6750, 1.0857, 1.7197]])
- >>> torch.min(a)
- tensor(0.6750)
-
- .. function:: min(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
- :noindex:
-
- Returns a namedtuple ``(values, indices)`` where ``values`` is the minimum
- value of each row of the :attr:`input` tensor in the given dimension
- :attr:`dim`. And ``indices`` is the index location of each minimum value found
- (argmin).
-
- If :attr:`keepdim` is ``True``, the output tensors are of the same size as
- :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
- the output tensors having 1 fewer dimension than :attr:`input`.
-
- .. note:: If there are multiple minimal values in a reduced row then
- the indices of the first minimal value are returned.
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the dimension to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- out (tuple, optional): the tuple of two output tensors (min, min_indices)
-
- Example::
-
- >>> a = torch.randn(4, 4)
- >>> a
- tensor([[-0.6248, 1.1334, -1.1899, -0.2803],
- [-1.4644, -0.2635, -0.3651, 0.6134],
- [ 0.2457, 0.0384, 1.0128, 0.7015],
- [-0.1153, 2.9849, 2.1458, 0.5788]])
- >>> torch.min(a, 1)
- torch.return_types.min(values=tensor([-1.1899, -1.4644, 0.0384, -0.1153]), indices=tensor([2, 0, 1, 0]))
-
- .. function:: min(input, other, *, out=None) -> Tensor
- :noindex:
-
- See :func:`torch.minimum`.
- """
- ...
- @overload
- def min(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.min:
- r"""
- min(input) -> Tensor
-
- Returns the minimum value of all elements in the :attr:`input` tensor.
-
- .. warning::
- This function produces deterministic (sub)gradients unlike ``min(dim=0)``
-
- Args:
- input (Tensor): the input tensor.
-
- Example::
-
- >>> a = torch.randn(1, 3)
- >>> a
- tensor([[ 0.6750, 1.0857, 1.7197]])
- >>> torch.min(a)
- tensor(0.6750)
-
- .. function:: min(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
- :noindex:
-
- Returns a namedtuple ``(values, indices)`` where ``values`` is the minimum
- value of each row of the :attr:`input` tensor in the given dimension
- :attr:`dim`. And ``indices`` is the index location of each minimum value found
- (argmin).
-
- If :attr:`keepdim` is ``True``, the output tensors are of the same size as
- :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
- the output tensors having 1 fewer dimension than :attr:`input`.
-
- .. note:: If there are multiple minimal values in a reduced row then
- the indices of the first minimal value are returned.
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the dimension to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- out (tuple, optional): the tuple of two output tensors (min, min_indices)
-
- Example::
-
- >>> a = torch.randn(4, 4)
- >>> a
- tensor([[-0.6248, 1.1334, -1.1899, -0.2803],
- [-1.4644, -0.2635, -0.3651, 0.6134],
- [ 0.2457, 0.0384, 1.0128, 0.7015],
- [-0.1153, 2.9849, 2.1458, 0.5788]])
- >>> torch.min(a, 1)
- torch.return_types.min(values=tensor([-1.1899, -1.4644, 0.0384, -0.1153]), indices=tensor([2, 0, 1, 0]))
-
- .. function:: min(input, other, *, out=None) -> Tensor
- :noindex:
-
- See :func:`torch.minimum`.
- """
- ...
- @overload
- def min(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.min:
- r"""
- min(input) -> Tensor
-
- Returns the minimum value of all elements in the :attr:`input` tensor.
-
- .. warning::
- This function produces deterministic (sub)gradients unlike ``min(dim=0)``
-
- Args:
- input (Tensor): the input tensor.
-
- Example::
-
- >>> a = torch.randn(1, 3)
- >>> a
- tensor([[ 0.6750, 1.0857, 1.7197]])
- >>> torch.min(a)
- tensor(0.6750)
-
- .. function:: min(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
- :noindex:
-
- Returns a namedtuple ``(values, indices)`` where ``values`` is the minimum
- value of each row of the :attr:`input` tensor in the given dimension
- :attr:`dim`. And ``indices`` is the index location of each minimum value found
- (argmin).
-
- If :attr:`keepdim` is ``True``, the output tensors are of the same size as
- :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
- the output tensors having 1 fewer dimension than :attr:`input`.
-
- .. note:: If there are multiple minimal values in a reduced row then
- the indices of the first minimal value are returned.
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the dimension to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- out (tuple, optional): the tuple of two output tensors (min, min_indices)
-
- Example::
-
- >>> a = torch.randn(4, 4)
- >>> a
- tensor([[-0.6248, 1.1334, -1.1899, -0.2803],
- [-1.4644, -0.2635, -0.3651, 0.6134],
- [ 0.2457, 0.0384, 1.0128, 0.7015],
- [-0.1153, 2.9849, 2.1458, 0.5788]])
- >>> torch.min(a, 1)
- torch.return_types.min(values=tensor([-1.1899, -1.4644, 0.0384, -0.1153]), indices=tensor([2, 0, 1, 0]))
-
- .. function:: min(input, other, *, out=None) -> Tensor
- :noindex:
-
- See :func:`torch.minimum`.
- """
- ...
- def minimum(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- minimum(input, other, *, out=None) -> Tensor
-
- Computes the element-wise minimum of :attr:`input` and :attr:`other`.
-
- .. note::
- If one of the elements being compared is a NaN, then that element is returned.
- :func:`minimum` is not supported for tensors with complex dtypes.
-
- Args:
- input (Tensor): the input tensor.
- other (Tensor): the second input tensor
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.tensor((1, 2, -1))
- >>> b = torch.tensor((3, 0, 4))
- >>> torch.minimum(a, b)
- tensor([1, 0, -1])
- """
- ...
- def miopen_batch_norm(input: Tensor, weight: Tensor, bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, exponential_average_factor: _float, epsilon: _float) -> Tuple[Tensor, Tensor, Tensor]: ...
- def miopen_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool) -> Tensor: ...
- def miopen_convolution_add_relu(input: Tensor, weight: Tensor, z: Tensor, alpha: Optional[Union[Number, _complex]], bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
- def miopen_convolution_relu(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
- def miopen_convolution_transpose(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], output_padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool) -> Tensor: ...
- def miopen_depthwise_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool) -> Tensor: ...
- def miopen_rnn(input: Tensor, weight: Union[Tuple[Tensor, ...], List[Tensor]], weight_stride0: _int, hx: Tensor, cx: Optional[Tensor], mode: _int, hidden_size: _int, num_layers: _int, batch_first: _bool, dropout: _float, train: _bool, bidirectional: _bool, batch_sizes: _size, dropout_state: Optional[Tensor]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: ...
- def mkldnn_adaptive_avg_pool2d(input: Tensor, output_size: Union[_int, _size], *, out: Optional[Tensor] = None) -> Tensor: ...
- def mkldnn_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ...
- def mkldnn_linear_backward_weights(grad_output: Tensor, input: Tensor, weight: Tensor, bias_defined: _bool) -> Tuple[Tensor, Tensor]: ...
- def mkldnn_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ...
- def mkldnn_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ...
- def mkldnn_rnn_layer(input: Tensor, weight0: Tensor, weight1: Tensor, weight2: Tensor, weight3: Tensor, hx_: Tensor, cx_: Tensor, reverse: _bool, batch_sizes: _size, mode: _int, hidden_size: _int, num_layers: _int, has_biases: _bool, bidirectional: _bool, batch_first: _bool, train: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
- def mm(input: Tensor, mat2: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- mm(input, mat2, *, out=None) -> Tensor
-
- Performs a matrix multiplication of the matrices :attr:`input` and :attr:`mat2`.
-
- If :attr:`input` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
- :math:`(m \times p)` tensor, :attr:`out` will be a :math:`(n \times p)` tensor.
-
- .. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
- For broadcasting matrix products, see :func:`torch.matmul`.
-
- Supports strided and sparse 2-D tensors as inputs, autograd with
- respect to strided inputs.
-
- This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`.
- If :attr:`out` is provided its layout will be used. Otherwise, the result
- layout will be deduced from that of :attr:`input`.
-
-
- .. warning::
- Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported,
- or may not have autograd support. If you notice missing functionality please
- open a feature request.
-
- This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
-
- On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
-
- Args:
- input (Tensor): the first matrix to be matrix multiplied
- mat2 (Tensor): the second matrix to be matrix multiplied
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> mat1 = torch.randn(2, 3)
- >>> mat2 = torch.randn(3, 3)
- >>> torch.mm(mat1, mat2)
- tensor([[ 0.4851, 0.5037, -0.3633],
- [-0.0760, -3.6705, 2.4784]])
- """
- ...
- @overload
- def mode(input: Tensor, dim: _int = -1, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.mode:
- r"""
- mode(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
-
- Returns a namedtuple ``(values, indices)`` where ``values`` is the mode
- value of each row of the :attr:`input` tensor in the given dimension
- :attr:`dim`, i.e. a value which appears most often
- in that row, and ``indices`` is the index location of each mode value found.
-
- By default, :attr:`dim` is the last dimension of the :attr:`input` tensor.
-
- If :attr:`keepdim` is ``True``, the output tensors are of the same size as
- :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting
- in the output tensors having 1 fewer dimension than :attr:`input`.
-
- .. note:: This function is not defined for ``torch.cuda.Tensor`` yet.
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the dimension to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- out (tuple, optional): the result tuple of two output tensors (values, indices)
-
- Example::
-
- >>> b = torch.tensor(
- [[0, 0, 0, 2, 0, 0, 2],
- [0, 3, 0, 0, 2, 0, 1],
- [2, 2, 2, 0, 0, 0, 3],
- [2, 2, 3, 0, 1, 1, 0],
- [1, 1, 0, 0, 2, 0, 2]])
- >>> torch.mode(b, 0)
- torch.return_types.mode(
- values=tensor([0, 2, 0, 0, 0, 0, 2]),
- indices=tensor([1, 3, 4, 4, 2, 4, 4]))
- """
- ...
- @overload
- def mode(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.mode:
- r"""
- mode(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
-
- Returns a namedtuple ``(values, indices)`` where ``values`` is the mode
- value of each row of the :attr:`input` tensor in the given dimension
- :attr:`dim`, i.e. a value which appears most often
- in that row, and ``indices`` is the index location of each mode value found.
-
- By default, :attr:`dim` is the last dimension of the :attr:`input` tensor.
-
- If :attr:`keepdim` is ``True``, the output tensors are of the same size as
- :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting
- in the output tensors having 1 fewer dimension than :attr:`input`.
-
- .. note:: This function is not defined for ``torch.cuda.Tensor`` yet.
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the dimension to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- out (tuple, optional): the result tuple of two output tensors (values, indices)
-
- Example::
-
- >>> b = torch.tensor(
- [[0, 0, 0, 2, 0, 0, 2],
- [0, 3, 0, 0, 2, 0, 1],
- [2, 2, 2, 0, 0, 0, 3],
- [2, 2, 3, 0, 1, 1, 0],
- [1, 1, 0, 0, 2, 0, 2]])
- >>> torch.mode(b, 0)
- torch.return_types.mode(
- values=tensor([0, 2, 0, 0, 0, 0, 2]),
- indices=tensor([1, 3, 4, 4, 2, 4, 4]))
- """
- ...
- @overload
- def moveaxis(input: Tensor, source: _int, destination: _int) -> Tensor:
- r"""
- moveaxis(input, source, destination) -> Tensor
-
- Alias for :func:`torch.movedim`.
-
- This function is equivalent to NumPy's moveaxis function.
-
- Examples::
-
- >>> t = torch.randn(3,2,1)
- >>> t
- tensor([[[-0.3362],
- [-0.8437]],
-
- [[-0.9627],
- [ 0.1727]],
-
- [[ 0.5173],
- [-0.1398]]])
- >>> torch.moveaxis(t, 1, 0).shape
- torch.Size([2, 3, 1])
- >>> torch.moveaxis(t, 1, 0)
- tensor([[[-0.3362],
- [-0.9627],
- [ 0.5173]],
-
- [[-0.8437],
- [ 0.1727],
- [-0.1398]]])
- >>> torch.moveaxis(t, (1, 2), (0, 1)).shape
- torch.Size([2, 1, 3])
- >>> torch.moveaxis(t, (1, 2), (0, 1))
- tensor([[[-0.3362, -0.9627, 0.5173]],
-
- [[-0.8437, 0.1727, -0.1398]]])
- """
- ...
- @overload
- def moveaxis(input: Tensor, source: _size, destination: _size) -> Tensor:
- r"""
- moveaxis(input, source, destination) -> Tensor
-
- Alias for :func:`torch.movedim`.
-
- This function is equivalent to NumPy's moveaxis function.
-
- Examples::
-
- >>> t = torch.randn(3,2,1)
- >>> t
- tensor([[[-0.3362],
- [-0.8437]],
-
- [[-0.9627],
- [ 0.1727]],
-
- [[ 0.5173],
- [-0.1398]]])
- >>> torch.moveaxis(t, 1, 0).shape
- torch.Size([2, 3, 1])
- >>> torch.moveaxis(t, 1, 0)
- tensor([[[-0.3362],
- [-0.9627],
- [ 0.5173]],
-
- [[-0.8437],
- [ 0.1727],
- [-0.1398]]])
- >>> torch.moveaxis(t, (1, 2), (0, 1)).shape
- torch.Size([2, 1, 3])
- >>> torch.moveaxis(t, (1, 2), (0, 1))
- tensor([[[-0.3362, -0.9627, 0.5173]],
-
- [[-0.8437, 0.1727, -0.1398]]])
- """
- ...
- @overload
- def movedim(input: Tensor, source: _int, destination: _int) -> Tensor:
- r"""
- movedim(input, source, destination) -> Tensor
-
- Moves the dimension(s) of :attr:`input` at the position(s) in :attr:`source`
- to the position(s) in :attr:`destination`.
-
- Other dimensions of :attr:`input` that are not explicitly moved remain in
- their original order and appear at the positions not specified in :attr:`destination`.
-
- Args:
- input (Tensor): the input tensor.
- source (int or tuple of ints): Original positions of the dims to move. These must be unique.
- destination (int or tuple of ints): Destination positions for each of the original dims. These must also be unique.
-
- Examples::
-
- >>> t = torch.randn(3,2,1)
- >>> t
- tensor([[[-0.3362],
- [-0.8437]],
-
- [[-0.9627],
- [ 0.1727]],
-
- [[ 0.5173],
- [-0.1398]]])
- >>> torch.movedim(t, 1, 0).shape
- torch.Size([2, 3, 1])
- >>> torch.movedim(t, 1, 0)
- tensor([[[-0.3362],
- [-0.9627],
- [ 0.5173]],
-
- [[-0.8437],
- [ 0.1727],
- [-0.1398]]])
- >>> torch.movedim(t, (1, 2), (0, 1)).shape
- torch.Size([2, 1, 3])
- >>> torch.movedim(t, (1, 2), (0, 1))
- tensor([[[-0.3362, -0.9627, 0.5173]],
-
- [[-0.8437, 0.1727, -0.1398]]])
- """
- ...
- @overload
- def movedim(input: Tensor, source: _size, destination: _size) -> Tensor:
- r"""
- movedim(input, source, destination) -> Tensor
-
- Moves the dimension(s) of :attr:`input` at the position(s) in :attr:`source`
- to the position(s) in :attr:`destination`.
-
- Other dimensions of :attr:`input` that are not explicitly moved remain in
- their original order and appear at the positions not specified in :attr:`destination`.
-
- Args:
- input (Tensor): the input tensor.
- source (int or tuple of ints): Original positions of the dims to move. These must be unique.
- destination (int or tuple of ints): Destination positions for each of the original dims. These must also be unique.
-
- Examples::
-
- >>> t = torch.randn(3,2,1)
- >>> t
- tensor([[[-0.3362],
- [-0.8437]],
-
- [[-0.9627],
- [ 0.1727]],
-
- [[ 0.5173],
- [-0.1398]]])
- >>> torch.movedim(t, 1, 0).shape
- torch.Size([2, 3, 1])
- >>> torch.movedim(t, 1, 0)
- tensor([[[-0.3362],
- [-0.9627],
- [ 0.5173]],
-
- [[-0.8437],
- [ 0.1727],
- [-0.1398]]])
- >>> torch.movedim(t, (1, 2), (0, 1)).shape
- torch.Size([2, 1, 3])
- >>> torch.movedim(t, (1, 2), (0, 1))
- tensor([[[-0.3362, -0.9627, 0.5173]],
-
- [[-0.8437, 0.1727, -0.1398]]])
- """
- ...
- def msort(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- msort(input, *, out=None) -> Tensor
-
- Sorts the elements of the :attr:`input` tensor along its first dimension
- in ascending order by value.
-
- .. note:: `torch.msort(t)` is equivalent to `torch.sort(t, dim=0)[0]`.
- See also :func:`torch.sort`.
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> t = torch.randn(3, 4)
- >>> t
- tensor([[-0.1321, 0.4370, -1.2631, -1.1289],
- [-2.0527, -1.1250, 0.2275, 0.3077],
- [-0.0881, -0.1259, -0.5495, 1.0284]])
- >>> torch.msort(t)
- tensor([[-2.0527, -1.1250, -1.2631, -1.1289],
- [-0.1321, -0.1259, -0.5495, 0.3077],
- [-0.0881, 0.4370, 0.2275, 1.0284]])
- """
- ...
- def mul(input: Union[Tensor, Number, _complex], other: Union[Tensor, Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- mul(input, other, *, out=None) -> Tensor
-
- Multiplies :attr:`input` by :attr:`other`.
-
-
- .. math::
- \text{out}_i = \text{input}_i \times \text{other}_i
-
-
- Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
- :ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
-
- Args:
- input (Tensor): the input tensor.
- other (Tensor or Number) - the tensor or number to multiply input by.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Examples::
-
- >>> a = torch.randn(3)
- >>> a
- tensor([ 0.2015, -0.4255, 2.6087])
- >>> torch.mul(a, 100)
- tensor([ 20.1494, -42.5491, 260.8663])
-
- >>> b = torch.randn(4, 1)
- >>> b
- tensor([[ 1.1207],
- [-0.3137],
- [ 0.0700],
- [ 0.8378]])
- >>> c = torch.randn(1, 4)
- >>> c
- tensor([[ 0.5146, 0.1216, -0.5244, 2.2382]])
- >>> torch.mul(b, c)
- tensor([[ 0.5767, 0.1363, -0.5877, 2.5083],
- [-0.1614, -0.0382, 0.1645, -0.7021],
- [ 0.0360, 0.0085, -0.0367, 0.1567],
- [ 0.4312, 0.1019, -0.4394, 1.8753]])
- """
- ...
- def multinomial(input: Tensor, num_samples: _int, replacement: _bool = False, *, generator: Optional[Generator] = None, out: Optional[Tensor] = None) -> Tensor:
- r"""
- multinomial(input, num_samples, replacement=False, *, generator=None, out=None) -> LongTensor
-
- Returns a tensor where each row contains :attr:`num_samples` indices sampled
- from the multinomial (a stricter definition would be multivariate,
- refer to torch.distributions.multinomial.Multinomial for more details)
- probability distribution located in the corresponding row
- of tensor :attr:`input`.
-
- .. note::
- The rows of :attr:`input` do not need to sum to one (in which case we use
- the values as weights), but must be non-negative, finite and have
- a non-zero sum.
-
- Indices are ordered from left to right according to when each was sampled
- (first samples are placed in first column).
-
- If :attr:`input` is a vector, :attr:`out` is a vector of size :attr:`num_samples`.
-
- If :attr:`input` is a matrix with `m` rows, :attr:`out` is an matrix of shape
- :math:`(m \times \text{num\_samples})`.
-
- If replacement is ``True``, samples are drawn with replacement.
-
- If not, they are drawn without replacement, which means that when a
- sample index is drawn for a row, it cannot be drawn again for that row.
-
- .. note::
- When drawn without replacement, :attr:`num_samples` must be lower than
- number of non-zero elements in :attr:`input` (or the min number of non-zero
- elements in each row of :attr:`input` if it is a matrix).
-
- Args:
- input (Tensor): the input tensor containing probabilities
- num_samples (int): number of samples to draw
- replacement (bool, optional): whether to draw with replacement or not
-
- Keyword args:
- generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> weights = torch.tensor([0, 10, 3, 0], dtype=torch.float) # create a tensor of weights
- >>> torch.multinomial(weights, 2)
- tensor([1, 2])
- >>> torch.multinomial(weights, 5) # ERROR!
- RuntimeError: cannot sample n_sample > prob_dist.size(-1) samples without replacement
- >>> torch.multinomial(weights, 4, replacement=True)
- tensor([ 2, 1, 1, 1])
- """
- ...
- @overload
- def multiply(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- multiply(input, other, *, out=None)
-
- Alias for :func:`torch.mul`.
- """
- ...
- @overload
- def multiply(input: Tensor, other: Union[Number, _complex]) -> Tensor:
- r"""
- multiply(input, other, *, out=None)
-
- Alias for :func:`torch.mul`.
- """
- ...
- def mv(input: Tensor, vec: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- mv(input, vec, *, out=None) -> Tensor
-
- Performs a matrix-vector product of the matrix :attr:`input` and the vector
- :attr:`vec`.
-
- If :attr:`input` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
- size :math:`m`, :attr:`out` will be 1-D of size :math:`n`.
-
- .. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
-
- Args:
- input (Tensor): matrix to be multiplied
- vec (Tensor): vector to be multiplied
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> mat = torch.randn(2, 3)
- >>> vec = torch.randn(3)
- >>> torch.mv(mat, vec)
- tensor([ 1.0404, -0.6361])
- """
- ...
- def mvlgamma(input: Tensor, p: _int, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- mvlgamma(input, p, *, out=None) -> Tensor
-
- Alias for :func:`torch.special.multigammaln`.
- """
- ...
- def nan_to_num(input: Tensor, nan: Optional[_float] = None, posinf: Optional[_float] = None, neginf: Optional[_float] = None, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None) -> Tensor
-
- Replaces :literal:`NaN`, positive infinity, and negative infinity values in :attr:`input`
- with the values specified by :attr:`nan`, :attr:`posinf`, and :attr:`neginf`, respectively.
- By default, :literal:`NaN`\ s are replaced with zero, positive infinity is replaced with the
- greatest finite value representable by :attr:`input`'s dtype, and negative infinity
- is replaced with the least finite value representable by :attr:`input`'s dtype.
-
- Args:
- input (Tensor): the input tensor.
- nan (Number, optional): the value to replace :literal:`NaN`\s with. Default is zero.
- posinf (Number, optional): if a Number, the value to replace positive infinity values with.
- If None, positive infinity values are replaced with the greatest finite value representable by :attr:`input`'s dtype.
- Default is None.
- neginf (Number, optional): if a Number, the value to replace negative infinity values with.
- If None, negative infinity values are replaced with the lowest finite value representable by :attr:`input`'s dtype.
- Default is None.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> x = torch.tensor([float('nan'), float('inf'), -float('inf'), 3.14])
- >>> torch.nan_to_num(x)
- tensor([ 0.0000e+00, 3.4028e+38, -3.4028e+38, 3.1400e+00])
- >>> torch.nan_to_num(x, nan=2.0)
- tensor([ 2.0000e+00, 3.4028e+38, -3.4028e+38, 3.1400e+00])
- >>> torch.nan_to_num(x, nan=2.0, posinf=1.0)
- tensor([ 2.0000e+00, 1.0000e+00, -3.4028e+38, 3.1400e+00])
- """
- ...
- def nan_to_num_(input: Tensor, nan: Optional[_float] = None, posinf: Optional[_float] = None, neginf: Optional[_float] = None) -> Tensor: ...
- def nanmean(input: Tensor, dim: Optional[Union[_int, _size]] = None, keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
- r"""
- nanmean(input, dim=None, keepdim=False, *, dtype=None, out=None) -> Tensor
-
- Computes the mean of all `non-NaN` elements along the specified dimensions.
-
- This function is identical to :func:`torch.mean` when there are no `NaN` values
- in the :attr:`input` tensor. In the presence of `NaN`, :func:`torch.mean` will
- propagate the `NaN` to the output whereas :func:`torch.nanmean` will ignore the
- `NaN` values (`torch.nanmean(a)` is equivalent to `torch.mean(a[~a.isnan()])`).
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
-
- dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
- If ``None``, all dimensions are reduced.
-
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- If specified, the input tensor is casted to :attr:`dtype` before the operation
- is performed. This is useful for preventing data type overflows. Default: None.
- out (Tensor, optional): the output tensor.
-
- .. seealso::
-
- :func:`torch.mean` computes the mean value, propagating `NaN`.
-
- Example::
-
- >>> x = torch.tensor([[torch.nan, 1, 2], [1, 2, 3]])
- >>> x.mean()
- tensor(nan)
- >>> x.nanmean()
- tensor(1.8000)
- >>> x.mean(dim=0)
- tensor([ nan, 1.5000, 2.5000])
- >>> x.nanmean(dim=0)
- tensor([1.0000, 1.5000, 2.5000])
-
- # If all elements in the reduced dimensions are NaN then the result is NaN
- >>> torch.tensor([torch.nan]).nanmean()
- tensor(nan)
- """
- ...
- @overload
- def nanmedian(input: Tensor) -> Tensor:
- r"""
- nanmedian(input) -> Tensor
-
- Returns the median of the values in :attr:`input`, ignoring ``NaN`` values.
-
- This function is identical to :func:`torch.median` when there are no ``NaN`` values in :attr:`input`.
- When :attr:`input` has one or more ``NaN`` values, :func:`torch.median` will always return ``NaN``,
- while this function will return the median of the non-``NaN`` elements in :attr:`input`.
- If all the elements in :attr:`input` are ``NaN`` it will also return ``NaN``.
-
- Args:
- input (Tensor): the input tensor.
-
- Example::
-
- >>> a = torch.tensor([1, float('nan'), 3, 2])
- >>> a.median()
- tensor(nan)
- >>> a.nanmedian()
- tensor(2.)
-
- .. function:: nanmedian(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
- :noindex:
-
- Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input`
- in the dimension :attr:`dim`, ignoring ``NaN`` values, and ``indices`` contains the index of the median values
- found in the dimension :attr:`dim`.
-
- This function is identical to :func:`torch.median` when there are no ``NaN`` values in a reduced row. When a reduced row has
- one or more ``NaN`` values, :func:`torch.median` will always reduce it to ``NaN``, while this function will reduce it to the
- median of the non-``NaN`` elements. If all the elements in a reduced row are ``NaN`` then it will be reduced to ``NaN``, too.
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the dimension to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second
- tensor, which must have dtype long, with their indices in the dimension
- :attr:`dim` of :attr:`input`.
-
- Example::
-
- >>> a = torch.tensor([[2, 3, 1], [float('nan'), 1, float('nan')]])
- >>> a
- tensor([[2., 3., 1.],
- [nan, 1., nan]])
- >>> a.median(0)
- torch.return_types.median(values=tensor([nan, 1., nan]), indices=tensor([1, 1, 1]))
- >>> a.nanmedian(0)
- torch.return_types.nanmedian(values=tensor([2., 1., 1.]), indices=tensor([0, 1, 0]))
- """
- ...
- @overload
- def nanmedian(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.nanmedian:
- r"""
- nanmedian(input) -> Tensor
-
- Returns the median of the values in :attr:`input`, ignoring ``NaN`` values.
-
- This function is identical to :func:`torch.median` when there are no ``NaN`` values in :attr:`input`.
- When :attr:`input` has one or more ``NaN`` values, :func:`torch.median` will always return ``NaN``,
- while this function will return the median of the non-``NaN`` elements in :attr:`input`.
- If all the elements in :attr:`input` are ``NaN`` it will also return ``NaN``.
-
- Args:
- input (Tensor): the input tensor.
-
- Example::
-
- >>> a = torch.tensor([1, float('nan'), 3, 2])
- >>> a.median()
- tensor(nan)
- >>> a.nanmedian()
- tensor(2.)
-
- .. function:: nanmedian(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
- :noindex:
-
- Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input`
- in the dimension :attr:`dim`, ignoring ``NaN`` values, and ``indices`` contains the index of the median values
- found in the dimension :attr:`dim`.
-
- This function is identical to :func:`torch.median` when there are no ``NaN`` values in a reduced row. When a reduced row has
- one or more ``NaN`` values, :func:`torch.median` will always reduce it to ``NaN``, while this function will reduce it to the
- median of the non-``NaN`` elements. If all the elements in a reduced row are ``NaN`` then it will be reduced to ``NaN``, too.
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the dimension to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second
- tensor, which must have dtype long, with their indices in the dimension
- :attr:`dim` of :attr:`input`.
-
- Example::
-
- >>> a = torch.tensor([[2, 3, 1], [float('nan'), 1, float('nan')]])
- >>> a
- tensor([[2., 3., 1.],
- [nan, 1., nan]])
- >>> a.median(0)
- torch.return_types.median(values=tensor([nan, 1., nan]), indices=tensor([1, 1, 1]))
- >>> a.nanmedian(0)
- torch.return_types.nanmedian(values=tensor([2., 1., 1.]), indices=tensor([0, 1, 0]))
- """
- ...
- @overload
- def nanmedian(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.nanmedian:
- r"""
- nanmedian(input) -> Tensor
-
- Returns the median of the values in :attr:`input`, ignoring ``NaN`` values.
-
- This function is identical to :func:`torch.median` when there are no ``NaN`` values in :attr:`input`.
- When :attr:`input` has one or more ``NaN`` values, :func:`torch.median` will always return ``NaN``,
- while this function will return the median of the non-``NaN`` elements in :attr:`input`.
- If all the elements in :attr:`input` are ``NaN`` it will also return ``NaN``.
-
- Args:
- input (Tensor): the input tensor.
-
- Example::
-
- >>> a = torch.tensor([1, float('nan'), 3, 2])
- >>> a.median()
- tensor(nan)
- >>> a.nanmedian()
- tensor(2.)
-
- .. function:: nanmedian(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
- :noindex:
-
- Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input`
- in the dimension :attr:`dim`, ignoring ``NaN`` values, and ``indices`` contains the index of the median values
- found in the dimension :attr:`dim`.
-
- This function is identical to :func:`torch.median` when there are no ``NaN`` values in a reduced row. When a reduced row has
- one or more ``NaN`` values, :func:`torch.median` will always reduce it to ``NaN``, while this function will reduce it to the
- median of the non-``NaN`` elements. If all the elements in a reduced row are ``NaN`` then it will be reduced to ``NaN``, too.
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the dimension to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second
- tensor, which must have dtype long, with their indices in the dimension
- :attr:`dim` of :attr:`input`.
-
- Example::
-
- >>> a = torch.tensor([[2, 3, 1], [float('nan'), 1, float('nan')]])
- >>> a
- tensor([[2., 3., 1.],
- [nan, 1., nan]])
- >>> a.median(0)
- torch.return_types.median(values=tensor([nan, 1., nan]), indices=tensor([1, 1, 1]))
- >>> a.nanmedian(0)
- torch.return_types.nanmedian(values=tensor([2., 1., 1.]), indices=tensor([0, 1, 0]))
- """
- ...
- @overload
- def nanquantile(input: Tensor, q: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear", out: Optional[Tensor] = None) -> Tensor:
- r"""
- nanquantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor
-
- This is a variant of :func:`torch.quantile` that "ignores" ``NaN`` values,
- computing the quantiles :attr:`q` as if ``NaN`` values in :attr:`input` did
- not exist. If all values in a reduced row are ``NaN`` then the quantiles for
- that reduction will be ``NaN``. See the documentation for :func:`torch.quantile`.
-
- Args:
- input (Tensor): the input tensor.
- q (float or Tensor): a scalar or 1D tensor of quantile values in the range [0, 1]
- dim (int): the dimension to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword arguments:
- interpolation (str): interpolation method to use when the desired quantile lies between two data points.
- Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``.
- Default is ``linear``.
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> t = torch.tensor([float('nan'), 1, 2])
- >>> t.quantile(0.5)
- tensor(nan)
- >>> t.nanquantile(0.5)
- tensor(1.5000)
- >>> t = torch.tensor([[float('nan'), float('nan')], [1, 2]])
- >>> t
- tensor([[nan, nan],
- [1., 2.]])
- >>> t.nanquantile(0.5, dim=0)
- tensor([1., 2.])
- >>> t.nanquantile(0.5, dim=1)
- tensor([ nan, 1.5000])
- """
- ...
- @overload
- def nanquantile(input: Tensor, q: _float, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear", out: Optional[Tensor] = None) -> Tensor:
- r"""
- nanquantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor
-
- This is a variant of :func:`torch.quantile` that "ignores" ``NaN`` values,
- computing the quantiles :attr:`q` as if ``NaN`` values in :attr:`input` did
- not exist. If all values in a reduced row are ``NaN`` then the quantiles for
- that reduction will be ``NaN``. See the documentation for :func:`torch.quantile`.
-
- Args:
- input (Tensor): the input tensor.
- q (float or Tensor): a scalar or 1D tensor of quantile values in the range [0, 1]
- dim (int): the dimension to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword arguments:
- interpolation (str): interpolation method to use when the desired quantile lies between two data points.
- Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``.
- Default is ``linear``.
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> t = torch.tensor([float('nan'), 1, 2])
- >>> t.quantile(0.5)
- tensor(nan)
- >>> t.nanquantile(0.5)
- tensor(1.5000)
- >>> t = torch.tensor([[float('nan'), float('nan')], [1, 2]])
- >>> t
- tensor([[nan, nan],
- [1., 2.]])
- >>> t.nanquantile(0.5, dim=0)
- tensor([1., 2.])
- >>> t.nanquantile(0.5, dim=1)
- tensor([ nan, 1.5000])
- """
- ...
- def nansum(input: Tensor, dim: Optional[Union[_int, _size]] = None, keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
- r"""
- nansum(input, *, dtype=None) -> Tensor
-
- Returns the sum of all elements, treating Not a Numbers (NaNs) as zero.
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- If specified, the input tensor is casted to :attr:`dtype` before the operation
- is performed. This is useful for preventing data type overflows. Default: None.
-
- Example::
-
- >>> a = torch.tensor([1., 2., float('nan'), 4.])
- >>> torch.nansum(a)
- tensor(7.)
-
- .. function:: nansum(input, dim, keepdim=False, *, dtype=None) -> Tensor
- :noindex:
-
- Returns the sum of each row of the :attr:`input` tensor in the given
- dimension :attr:`dim`, treating Not a Numbers (NaNs) as zero.
- If :attr:`dim` is a list of dimensions, reduce over all of them.
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
-
- dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
- If ``None``, all dimensions are reduced.
-
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- If specified, the input tensor is casted to :attr:`dtype` before the operation
- is performed. This is useful for preventing data type overflows. Default: None.
-
- Example::
-
- >>> torch.nansum(torch.tensor([1., float("nan")]))
- 1.0
- >>> a = torch.tensor([[1, 2], [3., float("nan")]])
- >>> torch.nansum(a)
- tensor(6.)
- >>> torch.nansum(a, dim=0)
- tensor([4., 2.])
- >>> torch.nansum(a, dim=1)
- tensor([3., 3.])
- """
- ...
- @overload
- def narrow(input: Tensor, dim: _int, start: Tensor, length: Union[_int, SymInt]) -> Tensor:
- r"""
- narrow(input, dim, start, length) -> Tensor
-
- Returns a new tensor that is a narrowed version of :attr:`input` tensor. The
- dimension :attr:`dim` is input from :attr:`start` to ``start + length``. The
- returned tensor and :attr:`input` tensor share the same underlying storage.
-
- Args:
- input (Tensor): the tensor to narrow
- dim (int): the dimension along which to narrow
- start (int or Tensor): index of the element to start the narrowed dimension
- from. Can be negative, which means indexing from the end of `dim`. If
- `Tensor`, it must be an 0-dim integral `Tensor` (bools not allowed)
- length (int): length of the narrowed dimension, must be weakly positive
-
- Example::
-
- >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
- >>> torch.narrow(x, 0, 0, 2)
- tensor([[ 1, 2, 3],
- [ 4, 5, 6]])
- >>> torch.narrow(x, 1, 1, 2)
- tensor([[ 2, 3],
- [ 5, 6],
- [ 8, 9]])
- >>> torch.narrow(x, -1, torch.tensor(-1), 1)
- tensor([[3],
- [6],
- [9]])
- """
- ...
- @overload
- def narrow(input: Tensor, dim: _int, start: Union[_int, SymInt], length: Union[_int, SymInt]) -> Tensor:
- r"""
- narrow(input, dim, start, length) -> Tensor
-
- Returns a new tensor that is a narrowed version of :attr:`input` tensor. The
- dimension :attr:`dim` is input from :attr:`start` to ``start + length``. The
- returned tensor and :attr:`input` tensor share the same underlying storage.
-
- Args:
- input (Tensor): the tensor to narrow
- dim (int): the dimension along which to narrow
- start (int or Tensor): index of the element to start the narrowed dimension
- from. Can be negative, which means indexing from the end of `dim`. If
- `Tensor`, it must be an 0-dim integral `Tensor` (bools not allowed)
- length (int): length of the narrowed dimension, must be weakly positive
-
- Example::
-
- >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
- >>> torch.narrow(x, 0, 0, 2)
- tensor([[ 1, 2, 3],
- [ 4, 5, 6]])
- >>> torch.narrow(x, 1, 1, 2)
- tensor([[ 2, 3],
- [ 5, 6],
- [ 8, 9]])
- >>> torch.narrow(x, -1, torch.tensor(-1), 1)
- tensor([[3],
- [6],
- [9]])
- """
- ...
- def narrow_copy(input: Tensor, dim: _int, start: Union[_int, SymInt], length: Union[_int, SymInt], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- narrow_copy(input, dim, start, length, *, out=None) -> Tensor
-
- Same as :meth:`Tensor.narrow` except this returns a copy rather
- than shared storage. This is primarily for sparse tensors, which
- do not have a shared-storage narrow method.
-
- Args:
- input (Tensor): the tensor to narrow
- dim (int): the dimension along which to narrow
- start (int): index of the element to start the narrowed dimension from. Can
- be negative, which means indexing from the end of `dim`
- length (int): length of the narrowed dimension, must be weakly positive
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
- >>> torch.narrow_copy(x, 0, 0, 2)
- tensor([[ 1, 2, 3],
- [ 4, 5, 6]])
- >>> torch.narrow_copy(x, 1, 1, 2)
- tensor([[ 2, 3],
- [ 5, 6],
- [ 8, 9]])
- >>> s = torch.arange(16).reshape(2, 2, 2, 2).to_sparse(2)
- >>> torch.narrow_copy(s, 0, 0, 1)
- tensor(indices=tensor([[0, 0],
- [0, 1]]),
- values=tensor([[[0, 1],
- [2, 3]],
-
- [[4, 5],
- [6, 7]]]),
- size=(1, 2, 2, 2), nnz=2, layout=torch.sparse_coo)
-
- .. seealso::
-
- :func:`torch.narrow` for a non copy variant
- """
- ...
- def native_batch_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, momentum: _float, eps: _float, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> Tuple[Tensor, Tensor, Tensor]: ...
- def native_channel_shuffle(input: Tensor, groups: Union[_int, SymInt]) -> Tensor: ...
- def native_dropout(input: Tensor, p: _float, train: Optional[_bool]) -> Tuple[Tensor, Tensor]: ...
- def native_group_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], N: Union[_int, SymInt], C: Union[_int, SymInt], HxW: Union[_int, SymInt], group: _int, eps: _float) -> Tuple[Tensor, Tensor, Tensor]: ...
- def native_layer_norm(input: Tensor, normalized_shape: Sequence[Union[_int, SymInt]], weight: Optional[Tensor], bias: Optional[Tensor], eps: _float) -> Tuple[Tensor, Tensor, Tensor]: ...
- @overload
- def native_norm(input: Tensor, p: Optional[Union[Number, _complex]], dim: Union[_int, _size], keepdim: _bool, dtype: Optional[_dtype]) -> Tensor: ...
- @overload
- def native_norm(input: Tensor, p: Union[Number, _complex] = 2) -> Tensor: ...
- @overload
- def ne(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- ne(input, other, *, out=None) -> Tensor
-
- Computes :math:`\text{input} \neq \text{other}` element-wise.
-
-
- The second argument can be a number or a tensor whose shape is
- :ref:`broadcastable <broadcasting-semantics>` with the first argument.
-
- Args:
- input (Tensor): the tensor to compare
- other (Tensor or float): the tensor or value to compare
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Returns:
- A boolean tensor that is True where :attr:`input` is not equal to :attr:`other` and False elsewhere
-
- Example::
-
- >>> torch.ne(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
- tensor([[False, True], [True, False]])
- """
- ...
- @overload
- def ne(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- ne(input, other, *, out=None) -> Tensor
-
- Computes :math:`\text{input} \neq \text{other}` element-wise.
-
-
- The second argument can be a number or a tensor whose shape is
- :ref:`broadcastable <broadcasting-semantics>` with the first argument.
-
- Args:
- input (Tensor): the tensor to compare
- other (Tensor or float): the tensor or value to compare
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Returns:
- A boolean tensor that is True where :attr:`input` is not equal to :attr:`other` and False elsewhere
-
- Example::
-
- >>> torch.ne(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
- tensor([[False, True], [True, False]])
- """
- ...
- def neg(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- neg(input, *, out=None) -> Tensor
-
- Returns a new tensor with the negative of the elements of :attr:`input`.
-
- .. math::
- \text{out} = -1 \times \text{input}
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(5)
- >>> a
- tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940])
- >>> torch.neg(a)
- tensor([-0.0090, 0.2262, 0.0682, 0.2866, -0.3940])
- """
- ...
- def neg_(input: Tensor) -> Tensor: ...
- def negative(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- negative(input, *, out=None) -> Tensor
-
- Alias for :func:`torch.neg`
- """
- ...
- def negative_(input: Tensor) -> Tensor: ...
- def nextafter(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- nextafter(input, other, *, out=None) -> Tensor
-
- Return the next floating-point value after :attr:`input` towards :attr:`other`, elementwise.
-
- The shapes of ``input`` and ``other`` must be
- :ref:`broadcastable <broadcasting-semantics>`.
-
- Args:
- input (Tensor): the first input tensor
- other (Tensor): the second input tensor
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> eps = torch.finfo(torch.float32).eps
- >>> torch.nextafter(torch.tensor([1.0, 2.0]), torch.tensor([2.0, 1.0])) == torch.tensor([eps + 1, 2 - eps])
- tensor([True, True])
- """
- ...
- @overload
- def nonzero(input: Tensor, *, as_tuple: Literal[False] = False, out: Optional[Tensor] = None) -> Tensor:
- r"""
- nonzero(input, *, out=None, as_tuple=False) -> LongTensor or tuple of LongTensors
-
- .. note::
- :func:`torch.nonzero(..., as_tuple=False) <torch.nonzero>` (default) returns a
- 2-D tensor where each row is the index for a nonzero value.
-
- :func:`torch.nonzero(..., as_tuple=True) <torch.nonzero>` returns a tuple of 1-D
- index tensors, allowing for advanced indexing, so ``x[x.nonzero(as_tuple=True)]``
- gives all nonzero values of tensor ``x``. Of the returned tuple, each index tensor
- contains nonzero indices for a certain dimension.
-
- See below for more details on the two behaviors.
-
- When :attr:`input` is on CUDA, :func:`torch.nonzero() <torch.nonzero>` causes
- host-device synchronization.
-
- **When** :attr:`as_tuple` **is** ``False`` **(default)**:
-
- Returns a tensor containing the indices of all non-zero elements of
- :attr:`input`. Each row in the result contains the indices of a non-zero
- element in :attr:`input`. The result is sorted lexicographically, with
- the last index changing the fastest (C-style).
-
- If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor
- :attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of
- non-zero elements in the :attr:`input` tensor.
-
- **When** :attr:`as_tuple` **is** ``True``:
-
- Returns a tuple of 1-D tensors, one for each dimension in :attr:`input`,
- each containing the indices (in that dimension) of all non-zero elements of
- :attr:`input` .
-
- If :attr:`input` has :math:`n` dimensions, then the resulting tuple contains :math:`n`
- tensors of size :math:`z`, where :math:`z` is the total number of
- non-zero elements in the :attr:`input` tensor.
-
- As a special case, when :attr:`input` has zero dimensions and a nonzero scalar
- value, it is treated as a one-dimensional tensor with one element.
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (LongTensor, optional): the output tensor containing indices
-
- Returns:
- LongTensor or tuple of LongTensor: If :attr:`as_tuple` is ``False``, the output
- tensor containing indices. If :attr:`as_tuple` is ``True``, one 1-D tensor for
- each dimension, containing the indices of each nonzero element along that
- dimension.
-
- Example::
-
- >>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]))
- tensor([[ 0],
- [ 1],
- [ 2],
- [ 4]])
- >>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0],
- ... [0.0, 0.4, 0.0, 0.0],
- ... [0.0, 0.0, 1.2, 0.0],
- ... [0.0, 0.0, 0.0,-0.4]]))
- tensor([[ 0, 0],
- [ 1, 1],
- [ 2, 2],
- [ 3, 3]])
- >>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]), as_tuple=True)
- (tensor([0, 1, 2, 4]),)
- >>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0],
- ... [0.0, 0.4, 0.0, 0.0],
- ... [0.0, 0.0, 1.2, 0.0],
- ... [0.0, 0.0, 0.0,-0.4]]), as_tuple=True)
- (tensor([0, 1, 2, 3]), tensor([0, 1, 2, 3]))
- >>> torch.nonzero(torch.tensor(5), as_tuple=True)
- (tensor([0]),)
- """
- ...
- @overload
- def nonzero(input: Tensor, *, as_tuple: Literal[True]) -> Tuple[Tensor, ...]:
- r"""
- nonzero(input, *, out=None, as_tuple=False) -> LongTensor or tuple of LongTensors
-
- .. note::
- :func:`torch.nonzero(..., as_tuple=False) <torch.nonzero>` (default) returns a
- 2-D tensor where each row is the index for a nonzero value.
-
- :func:`torch.nonzero(..., as_tuple=True) <torch.nonzero>` returns a tuple of 1-D
- index tensors, allowing for advanced indexing, so ``x[x.nonzero(as_tuple=True)]``
- gives all nonzero values of tensor ``x``. Of the returned tuple, each index tensor
- contains nonzero indices for a certain dimension.
-
- See below for more details on the two behaviors.
-
- When :attr:`input` is on CUDA, :func:`torch.nonzero() <torch.nonzero>` causes
- host-device synchronization.
-
- **When** :attr:`as_tuple` **is** ``False`` **(default)**:
-
- Returns a tensor containing the indices of all non-zero elements of
- :attr:`input`. Each row in the result contains the indices of a non-zero
- element in :attr:`input`. The result is sorted lexicographically, with
- the last index changing the fastest (C-style).
-
- If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor
- :attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of
- non-zero elements in the :attr:`input` tensor.
-
- **When** :attr:`as_tuple` **is** ``True``:
-
- Returns a tuple of 1-D tensors, one for each dimension in :attr:`input`,
- each containing the indices (in that dimension) of all non-zero elements of
- :attr:`input` .
-
- If :attr:`input` has :math:`n` dimensions, then the resulting tuple contains :math:`n`
- tensors of size :math:`z`, where :math:`z` is the total number of
- non-zero elements in the :attr:`input` tensor.
-
- As a special case, when :attr:`input` has zero dimensions and a nonzero scalar
- value, it is treated as a one-dimensional tensor with one element.
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (LongTensor, optional): the output tensor containing indices
-
- Returns:
- LongTensor or tuple of LongTensor: If :attr:`as_tuple` is ``False``, the output
- tensor containing indices. If :attr:`as_tuple` is ``True``, one 1-D tensor for
- each dimension, containing the indices of each nonzero element along that
- dimension.
-
- Example::
-
- >>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]))
- tensor([[ 0],
- [ 1],
- [ 2],
- [ 4]])
- >>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0],
- ... [0.0, 0.4, 0.0, 0.0],
- ... [0.0, 0.0, 1.2, 0.0],
- ... [0.0, 0.0, 0.0,-0.4]]))
- tensor([[ 0, 0],
- [ 1, 1],
- [ 2, 2],
- [ 3, 3]])
- >>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]), as_tuple=True)
- (tensor([0, 1, 2, 4]),)
- >>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0],
- ... [0.0, 0.4, 0.0, 0.0],
- ... [0.0, 0.0, 1.2, 0.0],
- ... [0.0, 0.0, 0.0,-0.4]]), as_tuple=True)
- (tensor([0, 1, 2, 3]), tensor([0, 1, 2, 3]))
- >>> torch.nonzero(torch.tensor(5), as_tuple=True)
- (tensor([0]),)
- """
- ...
- def nonzero_static(input: Tensor, *, size: _int, fill_value: _int = -1, out: Optional[Tensor] = None) -> Tensor: ...
- def norm_except_dim(v: Tensor, pow: _int = 2, dim: _int = 0) -> Tensor: ...
- @overload
- def normal(mean: Tensor, std: Tensor, *, generator: Optional[Generator] = None, out: Optional[Tensor] = None) -> Tensor:
- r"""
- normal(mean, std, *, generator=None, out=None) -> Tensor
-
- Returns a tensor of random numbers drawn from separate normal distributions
- whose mean and standard deviation are given.
-
- The :attr:`mean` is a tensor with the mean of
- each output element's normal distribution
-
- The :attr:`std` is a tensor with the standard deviation of
- each output element's normal distribution
-
- The shapes of :attr:`mean` and :attr:`std` don't need to match, but the
- total number of elements in each tensor need to be the same.
-
- .. note:: When the shapes do not match, the shape of :attr:`mean`
- is used as the shape for the returned output tensor
-
- .. note:: When :attr:`std` is a CUDA tensor, this function synchronizes
- its device with the CPU.
-
- Args:
- mean (Tensor): the tensor of per-element means
- std (Tensor): the tensor of per-element standard deviations
-
- Keyword args:
- generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.normal(mean=torch.arange(1., 11.), std=torch.arange(1, 0, -0.1))
- tensor([ 1.0425, 3.5672, 2.7969, 4.2925, 4.7229, 6.2134,
- 8.0505, 8.1408, 9.0563, 10.0566])
-
- .. function:: normal(mean=0.0, std, *, out=None) -> Tensor
- :noindex:
-
- Similar to the function above, but the means are shared among all drawn
- elements.
-
- Args:
- mean (float, optional): the mean for all distributions
- std (Tensor): the tensor of per-element standard deviations
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.normal(mean=0.5, std=torch.arange(1., 6.))
- tensor([-1.2793, -1.0732, -2.0687, 5.1177, -1.2303])
-
- .. function:: normal(mean, std=1.0, *, out=None) -> Tensor
- :noindex:
-
- Similar to the function above, but the standard deviations are shared among
- all drawn elements.
-
- Args:
- mean (Tensor): the tensor of per-element means
- std (float, optional): the standard deviation for all distributions
-
- Keyword args:
- out (Tensor, optional): the output tensor
-
- Example::
-
- >>> torch.normal(mean=torch.arange(1., 6.))
- tensor([ 1.1552, 2.6148, 2.6535, 5.8318, 4.2361])
-
- .. function:: normal(mean, std, size, *, out=None) -> Tensor
- :noindex:
-
- Similar to the function above, but the means and standard deviations are shared
- among all drawn elements. The resulting tensor has size given by :attr:`size`.
-
- Args:
- mean (float): the mean for all distributions
- std (float): the standard deviation for all distributions
- size (int...): a sequence of integers defining the shape of the output tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.normal(2, 3, size=(1, 4))
- tensor([[-1.3987, -1.9544, 3.6048, 0.7909]])
- """
- ...
- @overload
- def normal(mean: Tensor, std: _float = 1, *, generator: Optional[Generator] = None, out: Optional[Tensor] = None) -> Tensor:
- r"""
- normal(mean, std, *, generator=None, out=None) -> Tensor
-
- Returns a tensor of random numbers drawn from separate normal distributions
- whose mean and standard deviation are given.
-
- The :attr:`mean` is a tensor with the mean of
- each output element's normal distribution
-
- The :attr:`std` is a tensor with the standard deviation of
- each output element's normal distribution
-
- The shapes of :attr:`mean` and :attr:`std` don't need to match, but the
- total number of elements in each tensor need to be the same.
-
- .. note:: When the shapes do not match, the shape of :attr:`mean`
- is used as the shape for the returned output tensor
-
- .. note:: When :attr:`std` is a CUDA tensor, this function synchronizes
- its device with the CPU.
-
- Args:
- mean (Tensor): the tensor of per-element means
- std (Tensor): the tensor of per-element standard deviations
-
- Keyword args:
- generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.normal(mean=torch.arange(1., 11.), std=torch.arange(1, 0, -0.1))
- tensor([ 1.0425, 3.5672, 2.7969, 4.2925, 4.7229, 6.2134,
- 8.0505, 8.1408, 9.0563, 10.0566])
-
- .. function:: normal(mean=0.0, std, *, out=None) -> Tensor
- :noindex:
-
- Similar to the function above, but the means are shared among all drawn
- elements.
-
- Args:
- mean (float, optional): the mean for all distributions
- std (Tensor): the tensor of per-element standard deviations
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.normal(mean=0.5, std=torch.arange(1., 6.))
- tensor([-1.2793, -1.0732, -2.0687, 5.1177, -1.2303])
-
- .. function:: normal(mean, std=1.0, *, out=None) -> Tensor
- :noindex:
-
- Similar to the function above, but the standard deviations are shared among
- all drawn elements.
-
- Args:
- mean (Tensor): the tensor of per-element means
- std (float, optional): the standard deviation for all distributions
-
- Keyword args:
- out (Tensor, optional): the output tensor
-
- Example::
-
- >>> torch.normal(mean=torch.arange(1., 6.))
- tensor([ 1.1552, 2.6148, 2.6535, 5.8318, 4.2361])
-
- .. function:: normal(mean, std, size, *, out=None) -> Tensor
- :noindex:
-
- Similar to the function above, but the means and standard deviations are shared
- among all drawn elements. The resulting tensor has size given by :attr:`size`.
-
- Args:
- mean (float): the mean for all distributions
- std (float): the standard deviation for all distributions
- size (int...): a sequence of integers defining the shape of the output tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.normal(2, 3, size=(1, 4))
- tensor([[-1.3987, -1.9544, 3.6048, 0.7909]])
- """
- ...
- @overload
- def normal(mean: _float, std: Tensor, *, generator: Optional[Generator] = None, out: Optional[Tensor] = None) -> Tensor:
- r"""
- normal(mean, std, *, generator=None, out=None) -> Tensor
-
- Returns a tensor of random numbers drawn from separate normal distributions
- whose mean and standard deviation are given.
-
- The :attr:`mean` is a tensor with the mean of
- each output element's normal distribution
-
- The :attr:`std` is a tensor with the standard deviation of
- each output element's normal distribution
-
- The shapes of :attr:`mean` and :attr:`std` don't need to match, but the
- total number of elements in each tensor need to be the same.
-
- .. note:: When the shapes do not match, the shape of :attr:`mean`
- is used as the shape for the returned output tensor
-
- .. note:: When :attr:`std` is a CUDA tensor, this function synchronizes
- its device with the CPU.
-
- Args:
- mean (Tensor): the tensor of per-element means
- std (Tensor): the tensor of per-element standard deviations
-
- Keyword args:
- generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.normal(mean=torch.arange(1., 11.), std=torch.arange(1, 0, -0.1))
- tensor([ 1.0425, 3.5672, 2.7969, 4.2925, 4.7229, 6.2134,
- 8.0505, 8.1408, 9.0563, 10.0566])
-
- .. function:: normal(mean=0.0, std, *, out=None) -> Tensor
- :noindex:
-
- Similar to the function above, but the means are shared among all drawn
- elements.
-
- Args:
- mean (float, optional): the mean for all distributions
- std (Tensor): the tensor of per-element standard deviations
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.normal(mean=0.5, std=torch.arange(1., 6.))
- tensor([-1.2793, -1.0732, -2.0687, 5.1177, -1.2303])
-
- .. function:: normal(mean, std=1.0, *, out=None) -> Tensor
- :noindex:
-
- Similar to the function above, but the standard deviations are shared among
- all drawn elements.
-
- Args:
- mean (Tensor): the tensor of per-element means
- std (float, optional): the standard deviation for all distributions
-
- Keyword args:
- out (Tensor, optional): the output tensor
-
- Example::
-
- >>> torch.normal(mean=torch.arange(1., 6.))
- tensor([ 1.1552, 2.6148, 2.6535, 5.8318, 4.2361])
-
- .. function:: normal(mean, std, size, *, out=None) -> Tensor
- :noindex:
-
- Similar to the function above, but the means and standard deviations are shared
- among all drawn elements. The resulting tensor has size given by :attr:`size`.
-
- Args:
- mean (float): the mean for all distributions
- std (float): the standard deviation for all distributions
- size (int...): a sequence of integers defining the shape of the output tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.normal(2, 3, size=(1, 4))
- tensor([[-1.3987, -1.9544, 3.6048, 0.7909]])
- """
- ...
- @overload
- def normal(mean: _float, std: _float, size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator] = None, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- normal(mean, std, *, generator=None, out=None) -> Tensor
-
- Returns a tensor of random numbers drawn from separate normal distributions
- whose mean and standard deviation are given.
-
- The :attr:`mean` is a tensor with the mean of
- each output element's normal distribution
-
- The :attr:`std` is a tensor with the standard deviation of
- each output element's normal distribution
-
- The shapes of :attr:`mean` and :attr:`std` don't need to match, but the
- total number of elements in each tensor need to be the same.
-
- .. note:: When the shapes do not match, the shape of :attr:`mean`
- is used as the shape for the returned output tensor
-
- .. note:: When :attr:`std` is a CUDA tensor, this function synchronizes
- its device with the CPU.
-
- Args:
- mean (Tensor): the tensor of per-element means
- std (Tensor): the tensor of per-element standard deviations
-
- Keyword args:
- generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.normal(mean=torch.arange(1., 11.), std=torch.arange(1, 0, -0.1))
- tensor([ 1.0425, 3.5672, 2.7969, 4.2925, 4.7229, 6.2134,
- 8.0505, 8.1408, 9.0563, 10.0566])
-
- .. function:: normal(mean=0.0, std, *, out=None) -> Tensor
- :noindex:
-
- Similar to the function above, but the means are shared among all drawn
- elements.
-
- Args:
- mean (float, optional): the mean for all distributions
- std (Tensor): the tensor of per-element standard deviations
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.normal(mean=0.5, std=torch.arange(1., 6.))
- tensor([-1.2793, -1.0732, -2.0687, 5.1177, -1.2303])
-
- .. function:: normal(mean, std=1.0, *, out=None) -> Tensor
- :noindex:
-
- Similar to the function above, but the standard deviations are shared among
- all drawn elements.
-
- Args:
- mean (Tensor): the tensor of per-element means
- std (float, optional): the standard deviation for all distributions
-
- Keyword args:
- out (Tensor, optional): the output tensor
-
- Example::
-
- >>> torch.normal(mean=torch.arange(1., 6.))
- tensor([ 1.1552, 2.6148, 2.6535, 5.8318, 4.2361])
-
- .. function:: normal(mean, std, size, *, out=None) -> Tensor
- :noindex:
-
- Similar to the function above, but the means and standard deviations are shared
- among all drawn elements. The resulting tensor has size given by :attr:`size`.
-
- Args:
- mean (float): the mean for all distributions
- std (float): the standard deviation for all distributions
- size (int...): a sequence of integers defining the shape of the output tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.normal(2, 3, size=(1, 4))
- tensor([[-1.3987, -1.9544, 3.6048, 0.7909]])
- """
- ...
- @overload
- def not_equal(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- not_equal(input, other, *, out=None) -> Tensor
-
- Alias for :func:`torch.ne`.
- """
- ...
- @overload
- def not_equal(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- not_equal(input, other, *, out=None) -> Tensor
-
- Alias for :func:`torch.ne`.
- """
- ...
- @overload
- def nuclear_norm(input: Tensor, dim: Union[_int, _size], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ...
- @overload
- def nuclear_norm(input: Tensor, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ...
- def numel(self: Tensor) -> _int:
- r"""
- numel(input) -> int
-
- Returns the total number of elements in the :attr:`input` tensor.
-
- Args:
- input (Tensor): the input tensor.
-
- Example::
-
- >>> a = torch.randn(1, 2, 3, 4, 5)
- >>> torch.numel(a)
- 120
- >>> a = torch.zeros(4,4)
- >>> torch.numel(a)
- 16
- """
- ...
- @overload
- def ones(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- ones(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Returns a tensor filled with the scalar value `1`, with the shape defined
- by the variable argument :attr:`size`.
-
- Args:
- size (int...): a sequence of integers defining the shape of the output tensor.
- Can be a variable number of arguments or a collection like a list or tuple.
-
- Keyword arguments:
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Example::
-
- >>> torch.ones(2, 3)
- tensor([[ 1., 1., 1.],
- [ 1., 1., 1.]])
-
- >>> torch.ones(5)
- tensor([ 1., 1., 1., 1., 1.])
- """
- ...
- @overload
- def ones(*size: _int, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- ones(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Returns a tensor filled with the scalar value `1`, with the shape defined
- by the variable argument :attr:`size`.
-
- Args:
- size (int...): a sequence of integers defining the shape of the output tensor.
- Can be a variable number of arguments or a collection like a list or tuple.
-
- Keyword arguments:
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Example::
-
- >>> torch.ones(2, 3)
- tensor([[ 1., 1., 1.],
- [ 1., 1., 1.]])
-
- >>> torch.ones(5)
- tensor([ 1., 1., 1., 1., 1.])
- """
- ...
- @overload
- def ones(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- ones(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Returns a tensor filled with the scalar value `1`, with the shape defined
- by the variable argument :attr:`size`.
-
- Args:
- size (int...): a sequence of integers defining the shape of the output tensor.
- Can be a variable number of arguments or a collection like a list or tuple.
-
- Keyword arguments:
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Example::
-
- >>> torch.ones(2, 3)
- tensor([[ 1., 1., 1.],
- [ 1., 1., 1.]])
-
- >>> torch.ones(5)
- tensor([ 1., 1., 1., 1., 1.])
- """
- ...
- @overload
- def ones(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- ones(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Returns a tensor filled with the scalar value `1`, with the shape defined
- by the variable argument :attr:`size`.
-
- Args:
- size (int...): a sequence of integers defining the shape of the output tensor.
- Can be a variable number of arguments or a collection like a list or tuple.
-
- Keyword arguments:
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Example::
-
- >>> torch.ones(2, 3)
- tensor([[ 1., 1., 1.],
- [ 1., 1., 1.]])
-
- >>> torch.ones(5)
- tensor([ 1., 1., 1., 1., 1.])
- """
- ...
- def ones_like(input: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- ones_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
-
- Returns a tensor filled with the scalar value `1`, with the same size as
- :attr:`input`. ``torch.ones_like(input)`` is equivalent to
- ``torch.ones(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
-
- .. warning::
- As of 0.4, this function does not support an :attr:`out` keyword. As an alternative,
- the old ``torch.ones_like(input, out=output)`` is equivalent to
- ``torch.ones(input.size(), out=output)``.
-
- Args:
- input (Tensor): the size of :attr:`input` will determine size of the output tensor.
-
- Keyword arguments:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
- Default: if ``None``, defaults to the dtype of :attr:`input`.
- layout (:class:`torch.layout`, optional): the desired layout of returned tensor.
- Default: if ``None``, defaults to the layout of :attr:`input`.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, defaults to the device of :attr:`input`.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned Tensor. Default: ``torch.preserve_format``.
-
- Example::
-
- >>> input = torch.empty(2, 3)
- >>> torch.ones_like(input)
- tensor([[ 1., 1., 1.],
- [ 1., 1., 1.]])
- """
- ...
- def orgqr(input: Tensor, input2: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- orgqr(input, tau) -> Tensor
-
- Alias for :func:`torch.linalg.householder_product`.
- """
- ...
- def ormqr(input: Tensor, input2: Tensor, input3: Tensor, left: _bool = True, transpose: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- ormqr(input, tau, other, left=True, transpose=False, *, out=None) -> Tensor
-
- Computes the matrix-matrix multiplication of a product of Householder matrices with a general matrix.
-
- Multiplies a :math:`m \times n` matrix `C` (given by :attr:`other`) with a matrix `Q`,
- where `Q` is represented using Householder reflectors `(input, tau)`.
- See `Representation of Orthogonal or Unitary Matrices`_ for further details.
-
- If :attr:`left` is `True` then `op(Q)` times `C` is computed, otherwise the result is `C` times `op(Q)`.
- When :attr:`left` is `True`, the implicit matrix `Q` has size :math:`m \times m`.
- It has size :math:`n \times n` otherwise.
- If :attr:`transpose` is `True` then `op` is the conjugate transpose operation, otherwise it's a no-op.
-
- Supports inputs of float, double, cfloat and cdouble dtypes.
- Also supports batched inputs, and, if the input is batched, the output is batched with the same dimensions.
-
- .. seealso::
- :func:`torch.geqrf` can be used to form the Householder representation `(input, tau)` of matrix `Q`
- from the QR decomposition.
-
- .. note::
- This function supports backward but it is only fast when ``(input, tau)`` do not require gradients
- and/or ``tau.size(-1)`` is very small.
- ``
-
- Args:
- input (Tensor): tensor of shape `(*, mn, k)` where `*` is zero or more batch dimensions
- and `mn` equals to `m` or `n` depending on the :attr:`left`.
- tau (Tensor): tensor of shape `(*, min(mn, k))` where `*` is zero or more batch dimensions.
- other (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
- left (bool): controls the order of multiplication.
- transpose (bool): controls whether the matrix `Q` is conjugate transposed or not.
-
- Keyword args:
- out (Tensor, optional): the output Tensor. Ignored if `None`. Default: `None`.
-
- .. _Representation of Orthogonal or Unitary Matrices:
- https://www.netlib.org/lapack/lug/node128.html
- """
- ...
- def outer(input: Tensor, vec2: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- outer(input, vec2, *, out=None) -> Tensor
-
- Outer product of :attr:`input` and :attr:`vec2`.
- If :attr:`input` is a vector of size :math:`n` and :attr:`vec2` is a vector of
- size :math:`m`, then :attr:`out` must be a matrix of size :math:`(n \times m)`.
-
- .. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
-
- Args:
- input (Tensor): 1-D input vector
- vec2 (Tensor): 1-D input vector
-
- Keyword args:
- out (Tensor, optional): optional output matrix
-
- Example::
-
- >>> v1 = torch.arange(1., 5.)
- >>> v2 = torch.arange(1., 4.)
- >>> torch.outer(v1, v2)
- tensor([[ 1., 2., 3.],
- [ 2., 4., 6.],
- [ 3., 6., 9.],
- [ 4., 8., 12.]])
- """
- ...
- def pairwise_distance(x1: Tensor, x2: Tensor, p: _float = 2, eps: _float = 1e-06, keepdim: _bool = False) -> Tensor: ...
- def pdist(input: Tensor, p: _float = 2) -> Tensor: ...
- def permute(input: Tensor, dims: _size) -> Tensor:
- r"""
- permute(input, dims) -> Tensor
-
- Returns a view of the original tensor :attr:`input` with its dimensions permuted.
-
- Args:
- input (Tensor): the input tensor.
- dims (tuple of int): The desired ordering of dimensions
-
- Example:
- >>> x = torch.randn(2, 3, 5)
- >>> x.size()
- torch.Size([2, 3, 5])
- >>> torch.permute(x, (2, 0, 1)).size()
- torch.Size([5, 2, 3])
- """
- ...
- def permute_copy(input: Tensor, dims: _size, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- Performs the same operation as :func:`torch.permute`, but all output tensors
- are freshly created instead of aliasing the input.
- """
- ...
- def pinverse(input: Tensor, rcond: _float = 1e-15) -> Tensor:
- r"""
- pinverse(input, rcond=1e-15) -> Tensor
-
- Alias for :func:`torch.linalg.pinv`
- """
- ...
- def pixel_shuffle(input: Tensor, upscale_factor: _int) -> Tensor: ...
- def pixel_unshuffle(input: Tensor, downscale_factor: _int) -> Tensor: ...
- def poisson(input: Tensor, generator: Optional[Generator] = None) -> Tensor:
- r"""
- poisson(input, generator=None) -> Tensor
-
- Returns a tensor of the same size as :attr:`input` with each element
- sampled from a Poisson distribution with rate parameter given by the corresponding
- element in :attr:`input` i.e.,
-
- .. math::
- \text{out}_i \sim \text{Poisson}(\text{input}_i)
-
- :attr:`input` must be non-negative.
-
- Args:
- input (Tensor): the input tensor containing the rates of the Poisson distribution
-
- Keyword args:
- generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
-
- Example::
-
- >>> rates = torch.rand(4, 4) * 5 # rate parameter between 0 and 5
- >>> torch.poisson(rates)
- tensor([[9., 1., 3., 5.],
- [8., 6., 6., 0.],
- [0., 4., 5., 3.],
- [2., 1., 4., 2.]])
- """
- ...
- def poisson_nll_loss(input: Tensor, target: Tensor, log_input: _bool, full: _bool, eps: _float, reduction: _int) -> Tensor: ...
- def polar(abs: Tensor, angle: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- polar(abs, angle, *, out=None) -> Tensor
-
- Constructs a complex tensor whose elements are Cartesian coordinates
- corresponding to the polar coordinates with absolute value :attr:`abs` and angle
- :attr:`angle`.
-
- .. math::
- \text{out} = \text{abs} \cdot \cos(\text{angle}) + \text{abs} \cdot \sin(\text{angle}) \cdot j
-
- .. note::
- `torch.polar` is similar to
- `std::polar <https://en.cppreference.com/w/cpp/numeric/complex/polar>`_
- and does not compute the polar decomposition
- of a complex tensor like Python's `cmath.polar` and SciPy's `linalg.polar` do.
- The behavior of this function is undefined if `abs` is negative or NaN, or if `angle` is
- infinite.
-
-
- Args:
- abs (Tensor): The absolute value the complex tensor. Must be float or double.
- angle (Tensor): The angle of the complex tensor. Must be same dtype as
- :attr:`abs`.
-
- Keyword args:
- out (Tensor): If the inputs are ``torch.float32``, must be
- ``torch.complex64``. If the inputs are ``torch.float64``, must be
- ``torch.complex128``.
-
- Example::
-
- >>> import numpy as np
- >>> abs = torch.tensor([1, 2], dtype=torch.float64)
- >>> angle = torch.tensor([np.pi / 2, 5 * np.pi / 4], dtype=torch.float64)
- >>> z = torch.polar(abs, angle)
- >>> z
- tensor([(0.0000+1.0000j), (-1.4142-1.4142j)], dtype=torch.complex128)
- """
- ...
- def polygamma(n: _int, input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- polygamma(n, input, *, out=None) -> Tensor
-
- Alias for :func:`torch.special.polygamma`.
- """
- ...
- def positive(input: Tensor) -> Tensor:
- r"""
- positive(input) -> Tensor
-
- Returns :attr:`input`.
- Throws a runtime error if :attr:`input` is a bool tensor.
-
- Args:
- input (Tensor): the input tensor.
-
- Example::
-
- >>> t = torch.randn(5)
- >>> t
- tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940])
- >>> torch.positive(t)
- tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940])
- """
- ...
- @overload
- def pow(input: Tensor, exponent: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- pow(input, exponent, *, out=None) -> Tensor
-
- Takes the power of each element in :attr:`input` with :attr:`exponent` and
- returns a tensor with the result.
-
- :attr:`exponent` can be either a single ``float`` number or a `Tensor`
- with the same number of elements as :attr:`input`.
-
- When :attr:`exponent` is a scalar value, the operation applied is:
-
- .. math::
- \text{out}_i = x_i ^ \text{exponent}
-
- When :attr:`exponent` is a tensor, the operation applied is:
-
- .. math::
- \text{out}_i = x_i ^ {\text{exponent}_i}
-
- When :attr:`exponent` is a tensor, the shapes of :attr:`input`
- and :attr:`exponent` must be :ref:`broadcastable <broadcasting-semantics>`.
-
- Args:
- input (Tensor): the input tensor.
- exponent (float or tensor): the exponent value
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(4)
- >>> a
- tensor([ 0.4331, 1.2475, 0.6834, -0.2791])
- >>> torch.pow(a, 2)
- tensor([ 0.1875, 1.5561, 0.4670, 0.0779])
- >>> exp = torch.arange(1., 5.)
-
- >>> a = torch.arange(1., 5.)
- >>> a
- tensor([ 1., 2., 3., 4.])
- >>> exp
- tensor([ 1., 2., 3., 4.])
- >>> torch.pow(a, exp)
- tensor([ 1., 4., 27., 256.])
-
- .. function:: pow(self, exponent, *, out=None) -> Tensor
- :noindex:
-
- :attr:`self` is a scalar ``float`` value, and :attr:`exponent` is a tensor.
- The returned tensor :attr:`out` is of the same shape as :attr:`exponent`
-
- The operation applied is:
-
- .. math::
- \text{out}_i = \text{self} ^ {\text{exponent}_i}
-
- Args:
- self (float): the scalar base value for the power operation
- exponent (Tensor): the exponent tensor
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> exp = torch.arange(1., 5.)
- >>> base = 2
- >>> torch.pow(base, exp)
- tensor([ 2., 4., 8., 16.])
- """
- ...
- @overload
- def pow(self: Union[Number, _complex], exponent: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- pow(input, exponent, *, out=None) -> Tensor
-
- Takes the power of each element in :attr:`input` with :attr:`exponent` and
- returns a tensor with the result.
-
- :attr:`exponent` can be either a single ``float`` number or a `Tensor`
- with the same number of elements as :attr:`input`.
-
- When :attr:`exponent` is a scalar value, the operation applied is:
-
- .. math::
- \text{out}_i = x_i ^ \text{exponent}
-
- When :attr:`exponent` is a tensor, the operation applied is:
-
- .. math::
- \text{out}_i = x_i ^ {\text{exponent}_i}
-
- When :attr:`exponent` is a tensor, the shapes of :attr:`input`
- and :attr:`exponent` must be :ref:`broadcastable <broadcasting-semantics>`.
-
- Args:
- input (Tensor): the input tensor.
- exponent (float or tensor): the exponent value
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(4)
- >>> a
- tensor([ 0.4331, 1.2475, 0.6834, -0.2791])
- >>> torch.pow(a, 2)
- tensor([ 0.1875, 1.5561, 0.4670, 0.0779])
- >>> exp = torch.arange(1., 5.)
-
- >>> a = torch.arange(1., 5.)
- >>> a
- tensor([ 1., 2., 3., 4.])
- >>> exp
- tensor([ 1., 2., 3., 4.])
- >>> torch.pow(a, exp)
- tensor([ 1., 4., 27., 256.])
-
- .. function:: pow(self, exponent, *, out=None) -> Tensor
- :noindex:
-
- :attr:`self` is a scalar ``float`` value, and :attr:`exponent` is a tensor.
- The returned tensor :attr:`out` is of the same shape as :attr:`exponent`
-
- The operation applied is:
-
- .. math::
- \text{out}_i = \text{self} ^ {\text{exponent}_i}
-
- Args:
- self (float): the scalar base value for the power operation
- exponent (Tensor): the exponent tensor
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> exp = torch.arange(1., 5.)
- >>> base = 2
- >>> torch.pow(base, exp)
- tensor([ 2., 4., 8., 16.])
- """
- ...
- @overload
- def pow(input: Tensor, exponent: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- pow(input, exponent, *, out=None) -> Tensor
-
- Takes the power of each element in :attr:`input` with :attr:`exponent` and
- returns a tensor with the result.
-
- :attr:`exponent` can be either a single ``float`` number or a `Tensor`
- with the same number of elements as :attr:`input`.
-
- When :attr:`exponent` is a scalar value, the operation applied is:
-
- .. math::
- \text{out}_i = x_i ^ \text{exponent}
-
- When :attr:`exponent` is a tensor, the operation applied is:
-
- .. math::
- \text{out}_i = x_i ^ {\text{exponent}_i}
-
- When :attr:`exponent` is a tensor, the shapes of :attr:`input`
- and :attr:`exponent` must be :ref:`broadcastable <broadcasting-semantics>`.
-
- Args:
- input (Tensor): the input tensor.
- exponent (float or tensor): the exponent value
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(4)
- >>> a
- tensor([ 0.4331, 1.2475, 0.6834, -0.2791])
- >>> torch.pow(a, 2)
- tensor([ 0.1875, 1.5561, 0.4670, 0.0779])
- >>> exp = torch.arange(1., 5.)
-
- >>> a = torch.arange(1., 5.)
- >>> a
- tensor([ 1., 2., 3., 4.])
- >>> exp
- tensor([ 1., 2., 3., 4.])
- >>> torch.pow(a, exp)
- tensor([ 1., 4., 27., 256.])
-
- .. function:: pow(self, exponent, *, out=None) -> Tensor
- :noindex:
-
- :attr:`self` is a scalar ``float`` value, and :attr:`exponent` is a tensor.
- The returned tensor :attr:`out` is of the same shape as :attr:`exponent`
-
- The operation applied is:
-
- .. math::
- \text{out}_i = \text{self} ^ {\text{exponent}_i}
-
- Args:
- self (float): the scalar base value for the power operation
- exponent (Tensor): the exponent tensor
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> exp = torch.arange(1., 5.)
- >>> base = 2
- >>> torch.pow(base, exp)
- tensor([ 2., 4., 8., 16.])
- """
- ...
- def prelu(input: Tensor, weight: Tensor) -> Tensor: ...
- @overload
- def prod(input: Tensor, *, dtype: Optional[_dtype] = None) -> Tensor:
- r"""
- prod(input, *, dtype=None) -> Tensor
-
- Returns the product of all elements in the :attr:`input` tensor.
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- If specified, the input tensor is casted to :attr:`dtype` before the operation
- is performed. This is useful for preventing data type overflows. Default: None.
-
- Example::
-
- >>> a = torch.randn(1, 3)
- >>> a
- tensor([[-0.8020, 0.5428, -1.5854]])
- >>> torch.prod(a)
- tensor(0.6902)
-
- .. function:: prod(input, dim, keepdim=False, *, dtype=None) -> Tensor
- :noindex:
-
- Returns the product of each row of the :attr:`input` tensor in the given
- dimension :attr:`dim`.
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
- the output tensor having 1 fewer dimension than :attr:`input`.
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the dimension to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- If specified, the input tensor is casted to :attr:`dtype` before the operation
- is performed. This is useful for preventing data type overflows. Default: None.
-
- Example::
-
- >>> a = torch.randn(4, 2)
- >>> a
- tensor([[ 0.5261, -0.3837],
- [ 1.1857, -0.2498],
- [-1.1646, 0.0705],
- [ 1.1131, -1.0629]])
- >>> torch.prod(a, 1)
- tensor([-0.2018, -0.2962, -0.0821, -1.1831])
- """
- ...
- @overload
- def prod(input: Tensor, dim: _int, keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
- r"""
- prod(input, *, dtype=None) -> Tensor
-
- Returns the product of all elements in the :attr:`input` tensor.
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- If specified, the input tensor is casted to :attr:`dtype` before the operation
- is performed. This is useful for preventing data type overflows. Default: None.
-
- Example::
-
- >>> a = torch.randn(1, 3)
- >>> a
- tensor([[-0.8020, 0.5428, -1.5854]])
- >>> torch.prod(a)
- tensor(0.6902)
-
- .. function:: prod(input, dim, keepdim=False, *, dtype=None) -> Tensor
- :noindex:
-
- Returns the product of each row of the :attr:`input` tensor in the given
- dimension :attr:`dim`.
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
- the output tensor having 1 fewer dimension than :attr:`input`.
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the dimension to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- If specified, the input tensor is casted to :attr:`dtype` before the operation
- is performed. This is useful for preventing data type overflows. Default: None.
-
- Example::
-
- >>> a = torch.randn(4, 2)
- >>> a
- tensor([[ 0.5261, -0.3837],
- [ 1.1857, -0.2498],
- [-1.1646, 0.0705],
- [ 1.1131, -1.0629]])
- >>> torch.prod(a, 1)
- tensor([-0.2018, -0.2962, -0.0821, -1.1831])
- """
- ...
- @overload
- def prod(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
- r"""
- prod(input, *, dtype=None) -> Tensor
-
- Returns the product of all elements in the :attr:`input` tensor.
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- If specified, the input tensor is casted to :attr:`dtype` before the operation
- is performed. This is useful for preventing data type overflows. Default: None.
-
- Example::
-
- >>> a = torch.randn(1, 3)
- >>> a
- tensor([[-0.8020, 0.5428, -1.5854]])
- >>> torch.prod(a)
- tensor(0.6902)
-
- .. function:: prod(input, dim, keepdim=False, *, dtype=None) -> Tensor
- :noindex:
-
- Returns the product of each row of the :attr:`input` tensor in the given
- dimension :attr:`dim`.
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
- the output tensor having 1 fewer dimension than :attr:`input`.
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the dimension to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- If specified, the input tensor is casted to :attr:`dtype` before the operation
- is performed. This is useful for preventing data type overflows. Default: None.
-
- Example::
-
- >>> a = torch.randn(4, 2)
- >>> a
- tensor([[ 0.5261, -0.3837],
- [ 1.1857, -0.2498],
- [-1.1646, 0.0705],
- [ 1.1131, -1.0629]])
- >>> torch.prod(a, 1)
- tensor([-0.2018, -0.2962, -0.0821, -1.1831])
- """
- ...
- def promote_types(type1: _dtype, type2: _dtype) -> _dtype:
- r"""
- promote_types(type1, type2) -> dtype
-
- Returns the :class:`torch.dtype` with the smallest size and scalar kind that is
- not smaller nor of lower kind than either `type1` or `type2`. See type promotion
- :ref:`documentation <type-promotion-doc>` for more information on the type
- promotion logic.
-
- Args:
- type1 (:class:`torch.dtype`)
- type2 (:class:`torch.dtype`)
-
- Example::
-
- >>> torch.promote_types(torch.int32, torch.float32)
- torch.float32
- >>> torch.promote_types(torch.uint8, torch.long)
- torch.long
- """
- ...
- def put(input: Tensor, index: Tensor, source: Tensor, accumulate: _bool = False) -> Tensor: ...
- def q_per_channel_axis(input: Tensor) -> _int: ...
- def q_per_channel_scales(input: Tensor) -> Tensor: ...
- def q_per_channel_zero_points(input: Tensor) -> Tensor: ...
- def q_scale(input: Tensor) -> _float: ...
- def q_zero_point(input: Tensor) -> _int: ...
- def qr(input: Tensor, some: _bool = True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.qr:
- r"""
- qr(input, some=True, *, out=None) -> (Tensor, Tensor)
-
- Computes the QR decomposition of a matrix or a batch of matrices :attr:`input`,
- and returns a namedtuple (Q, R) of tensors such that :math:`\text{input} = Q R`
- with :math:`Q` being an orthogonal matrix or batch of orthogonal matrices and
- :math:`R` being an upper triangular matrix or batch of upper triangular matrices.
-
- If :attr:`some` is ``True``, then this function returns the thin (reduced) QR factorization.
- Otherwise, if :attr:`some` is ``False``, this function returns the complete QR factorization.
-
- .. warning::
-
- :func:`torch.qr` is deprecated in favor of :func:`torch.linalg.qr`
- and will be removed in a future PyTorch release. The boolean parameter :attr:`some` has been
- replaced with a string parameter :attr:`mode`.
-
- ``Q, R = torch.qr(A)`` should be replaced with
-
- .. code:: python
-
- Q, R = torch.linalg.qr(A)
-
- ``Q, R = torch.qr(A, some=False)`` should be replaced with
-
- .. code:: python
-
- Q, R = torch.linalg.qr(A, mode="complete")
-
- .. warning::
- If you plan to backpropagate through QR, note that the current backward implementation
- is only well-defined when the first :math:`\min(input.size(-1), input.size(-2))`
- columns of :attr:`input` are linearly independent.
- This behavior will probably change once QR supports pivoting.
-
- .. note:: This function uses LAPACK for CPU inputs and MAGMA for CUDA inputs,
- and may produce different (valid) decompositions on different device types
- or different platforms.
-
- Args:
- input (Tensor): the input tensor of size :math:`(*, m, n)` where `*` is zero or more
- batch dimensions consisting of matrices of dimension :math:`m \times n`.
- some (bool, optional): Set to ``True`` for reduced QR decomposition and ``False`` for
- complete QR decomposition. If `k = min(m, n)` then:
-
- * ``some=True`` : returns `(Q, R)` with dimensions (m, k), (k, n) (default)
-
- * ``'some=False'``: returns `(Q, R)` with dimensions (m, m), (m, n)
-
- Keyword args:
- out (tuple, optional): tuple of `Q` and `R` tensors.
- The dimensions of `Q` and `R` are detailed in the description of :attr:`some` above.
-
- Example::
-
- >>> a = torch.tensor([[12., -51, 4], [6, 167, -68], [-4, 24, -41]])
- >>> q, r = torch.qr(a)
- >>> q
- tensor([[-0.8571, 0.3943, 0.3314],
- [-0.4286, -0.9029, -0.0343],
- [ 0.2857, -0.1714, 0.9429]])
- >>> r
- tensor([[ -14.0000, -21.0000, 14.0000],
- [ 0.0000, -175.0000, 70.0000],
- [ 0.0000, 0.0000, -35.0000]])
- >>> torch.mm(q, r).round()
- tensor([[ 12., -51., 4.],
- [ 6., 167., -68.],
- [ -4., 24., -41.]])
- >>> torch.mm(q.t(), q).round()
- tensor([[ 1., 0., 0.],
- [ 0., 1., -0.],
- [ 0., -0., 1.]])
- >>> a = torch.randn(3, 4, 5)
- >>> q, r = torch.qr(a, some=False)
- >>> torch.allclose(torch.matmul(q, r), a)
- True
- >>> torch.allclose(torch.matmul(q.mT, q), torch.eye(5))
- True
- """
- ...
- @overload
- def quantile(input: Tensor, q: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear", out: Optional[Tensor] = None) -> Tensor:
- r"""
- quantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor
-
- Computes the q-th quantiles of each row of the :attr:`input` tensor along the dimension :attr:`dim`.
-
- To compute the quantile, we map q in [0, 1] to the range of indices [0, n] to find the location
- of the quantile in the sorted input. If the quantile lies between two data points ``a < b`` with
- indices ``i`` and ``j`` in the sorted order, result is computed according to the given
- :attr:`interpolation` method as follows:
-
- - ``linear``: ``a + (b - a) * fraction``, where ``fraction`` is the fractional part of the computed quantile index.
- - ``lower``: ``a``.
- - ``higher``: ``b``.
- - ``nearest``: ``a`` or ``b``, whichever's index is closer to the computed quantile index (rounding down for .5 fractions).
- - ``midpoint``: ``(a + b) / 2``.
-
- If :attr:`q` is a 1D tensor, the first dimension of the output represents the quantiles and has size
- equal to the size of :attr:`q`, the remaining dimensions are what remains from the reduction.
-
- .. note::
- By default :attr:`dim` is ``None`` resulting in the :attr:`input` tensor being flattened before computation.
-
- Args:
- input (Tensor): the input tensor.
- q (float or Tensor): a scalar or 1D tensor of values in the range [0, 1].
- dim (int): the dimension to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword arguments:
- interpolation (str): interpolation method to use when the desired quantile lies between two data points.
- Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``.
- Default is ``linear``.
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(2, 3)
- >>> a
- tensor([[ 0.0795, -1.2117, 0.9765],
- [ 1.1707, 0.6706, 0.4884]])
- >>> q = torch.tensor([0.25, 0.5, 0.75])
- >>> torch.quantile(a, q, dim=1, keepdim=True)
- tensor([[[-0.5661],
- [ 0.5795]],
-
- [[ 0.0795],
- [ 0.6706]],
-
- [[ 0.5280],
- [ 0.9206]]])
- >>> torch.quantile(a, q, dim=1, keepdim=True).shape
- torch.Size([3, 2, 1])
- >>> a = torch.arange(4.)
- >>> a
- tensor([0., 1., 2., 3.])
- >>> torch.quantile(a, 0.6, interpolation='linear')
- tensor(1.8000)
- >>> torch.quantile(a, 0.6, interpolation='lower')
- tensor(1.)
- >>> torch.quantile(a, 0.6, interpolation='higher')
- tensor(2.)
- >>> torch.quantile(a, 0.6, interpolation='midpoint')
- tensor(1.5000)
- >>> torch.quantile(a, 0.6, interpolation='nearest')
- tensor(2.)
- >>> torch.quantile(a, 0.4, interpolation='nearest')
- tensor(1.)
- """
- ...
- @overload
- def quantile(input: Tensor, q: _float, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear", out: Optional[Tensor] = None) -> Tensor:
- r"""
- quantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor
-
- Computes the q-th quantiles of each row of the :attr:`input` tensor along the dimension :attr:`dim`.
-
- To compute the quantile, we map q in [0, 1] to the range of indices [0, n] to find the location
- of the quantile in the sorted input. If the quantile lies between two data points ``a < b`` with
- indices ``i`` and ``j`` in the sorted order, result is computed according to the given
- :attr:`interpolation` method as follows:
-
- - ``linear``: ``a + (b - a) * fraction``, where ``fraction`` is the fractional part of the computed quantile index.
- - ``lower``: ``a``.
- - ``higher``: ``b``.
- - ``nearest``: ``a`` or ``b``, whichever's index is closer to the computed quantile index (rounding down for .5 fractions).
- - ``midpoint``: ``(a + b) / 2``.
-
- If :attr:`q` is a 1D tensor, the first dimension of the output represents the quantiles and has size
- equal to the size of :attr:`q`, the remaining dimensions are what remains from the reduction.
-
- .. note::
- By default :attr:`dim` is ``None`` resulting in the :attr:`input` tensor being flattened before computation.
-
- Args:
- input (Tensor): the input tensor.
- q (float or Tensor): a scalar or 1D tensor of values in the range [0, 1].
- dim (int): the dimension to reduce.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword arguments:
- interpolation (str): interpolation method to use when the desired quantile lies between two data points.
- Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``.
- Default is ``linear``.
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(2, 3)
- >>> a
- tensor([[ 0.0795, -1.2117, 0.9765],
- [ 1.1707, 0.6706, 0.4884]])
- >>> q = torch.tensor([0.25, 0.5, 0.75])
- >>> torch.quantile(a, q, dim=1, keepdim=True)
- tensor([[[-0.5661],
- [ 0.5795]],
-
- [[ 0.0795],
- [ 0.6706]],
-
- [[ 0.5280],
- [ 0.9206]]])
- >>> torch.quantile(a, q, dim=1, keepdim=True).shape
- torch.Size([3, 2, 1])
- >>> a = torch.arange(4.)
- >>> a
- tensor([0., 1., 2., 3.])
- >>> torch.quantile(a, 0.6, interpolation='linear')
- tensor(1.8000)
- >>> torch.quantile(a, 0.6, interpolation='lower')
- tensor(1.)
- >>> torch.quantile(a, 0.6, interpolation='higher')
- tensor(2.)
- >>> torch.quantile(a, 0.6, interpolation='midpoint')
- tensor(1.5000)
- >>> torch.quantile(a, 0.6, interpolation='nearest')
- tensor(2.)
- >>> torch.quantile(a, 0.4, interpolation='nearest')
- tensor(1.)
- """
- ...
- def quantize_per_channel(input: Tensor, scales: Tensor, zero_points: Tensor, axis: _int, dtype: _dtype) -> Tensor:
- r"""
- quantize_per_channel(input, scales, zero_points, axis, dtype) -> Tensor
-
- Converts a float tensor to a per-channel quantized tensor with given scales and zero points.
-
- Arguments:
- input (Tensor): float tensor to quantize
- scales (Tensor): float 1D tensor of scales to use, size should match ``input.size(axis)``
- zero_points (int): integer 1D tensor of offset to use, size should match ``input.size(axis)``
- axis (int): dimension on which apply per-channel quantization
- dtype (:class:`torch.dtype`): the desired data type of returned tensor.
- Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32``
-
- Returns:
- Tensor: A newly quantized tensor
-
- Example::
-
- >>> x = torch.tensor([[-1.0, 0.0], [1.0, 2.0]])
- >>> torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8)
- tensor([[-1., 0.],
- [ 1., 2.]], size=(2, 2), dtype=torch.quint8,
- quantization_scheme=torch.per_channel_affine,
- scale=tensor([0.1000, 0.0100], dtype=torch.float64),
- zero_point=tensor([10, 0]), axis=0)
- >>> torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8).int_repr()
- tensor([[ 0, 10],
- [100, 200]], dtype=torch.uint8)
- """
- ...
- @overload
- def quantize_per_tensor(input: Tensor, scale: Tensor, zero_point: Tensor, dtype: _dtype) -> Tensor:
- r"""
- quantize_per_tensor(input, scale, zero_point, dtype) -> Tensor
-
- Converts a float tensor to a quantized tensor with given scale and zero point.
-
- Arguments:
- input (Tensor): float tensor or list of tensors to quantize
- scale (float or Tensor): scale to apply in quantization formula
- zero_point (int or Tensor): offset in integer value that maps to float zero
- dtype (:class:`torch.dtype`): the desired data type of returned tensor.
- Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32``
-
- Returns:
- Tensor: A newly quantized tensor or list of quantized tensors.
-
- Example::
-
- >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8)
- tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
- quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10)
- >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8).int_repr()
- tensor([ 0, 10, 20, 30], dtype=torch.uint8)
- >>> torch.quantize_per_tensor([torch.tensor([-1.0, 0.0]), torch.tensor([-2.0, 2.0])],
- >>> torch.tensor([0.1, 0.2]), torch.tensor([10, 20]), torch.quint8)
- (tensor([-1., 0.], size=(2,), dtype=torch.quint8,
- quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10),
- tensor([-2., 2.], size=(2,), dtype=torch.quint8,
- quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=20))
- >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.tensor(0.1), torch.tensor(10), torch.quint8)
- tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
- quantization_scheme=torch.per_tensor_affine, scale=0.10, zero_point=10)
- """
- ...
- @overload
- def quantize_per_tensor(input: Tensor, scale: _float, zero_point: _int, dtype: _dtype) -> Tensor:
- r"""
- quantize_per_tensor(input, scale, zero_point, dtype) -> Tensor
-
- Converts a float tensor to a quantized tensor with given scale and zero point.
-
- Arguments:
- input (Tensor): float tensor or list of tensors to quantize
- scale (float or Tensor): scale to apply in quantization formula
- zero_point (int or Tensor): offset in integer value that maps to float zero
- dtype (:class:`torch.dtype`): the desired data type of returned tensor.
- Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32``
-
- Returns:
- Tensor: A newly quantized tensor or list of quantized tensors.
-
- Example::
-
- >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8)
- tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
- quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10)
- >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8).int_repr()
- tensor([ 0, 10, 20, 30], dtype=torch.uint8)
- >>> torch.quantize_per_tensor([torch.tensor([-1.0, 0.0]), torch.tensor([-2.0, 2.0])],
- >>> torch.tensor([0.1, 0.2]), torch.tensor([10, 20]), torch.quint8)
- (tensor([-1., 0.], size=(2,), dtype=torch.quint8,
- quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10),
- tensor([-2., 2.], size=(2,), dtype=torch.quint8,
- quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=20))
- >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.tensor(0.1), torch.tensor(10), torch.quint8)
- tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
- quantization_scheme=torch.per_tensor_affine, scale=0.10, zero_point=10)
- """
- ...
- @overload
- def quantize_per_tensor(tensors: Union[Tuple[Tensor, ...], List[Tensor]], scales: Tensor, zero_points: Tensor, dtype: _dtype) -> Tuple[Tensor, ...]:
- r"""
- quantize_per_tensor(input, scale, zero_point, dtype) -> Tensor
-
- Converts a float tensor to a quantized tensor with given scale and zero point.
-
- Arguments:
- input (Tensor): float tensor or list of tensors to quantize
- scale (float or Tensor): scale to apply in quantization formula
- zero_point (int or Tensor): offset in integer value that maps to float zero
- dtype (:class:`torch.dtype`): the desired data type of returned tensor.
- Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32``
-
- Returns:
- Tensor: A newly quantized tensor or list of quantized tensors.
-
- Example::
-
- >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8)
- tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
- quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10)
- >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8).int_repr()
- tensor([ 0, 10, 20, 30], dtype=torch.uint8)
- >>> torch.quantize_per_tensor([torch.tensor([-1.0, 0.0]), torch.tensor([-2.0, 2.0])],
- >>> torch.tensor([0.1, 0.2]), torch.tensor([10, 20]), torch.quint8)
- (tensor([-1., 0.], size=(2,), dtype=torch.quint8,
- quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10),
- tensor([-2., 2.], size=(2,), dtype=torch.quint8,
- quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=20))
- >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.tensor(0.1), torch.tensor(10), torch.quint8)
- tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
- quantization_scheme=torch.per_tensor_affine, scale=0.10, zero_point=10)
- """
- ...
- def quantize_per_tensor_dynamic(input: Tensor, dtype: _dtype, reduce_range: _bool) -> Tensor:
- r"""
- quantize_per_tensor_dynamic(input, dtype, reduce_range) -> Tensor
-
- Converts a float tensor to a quantized tensor with scale and zero_point calculated
- dynamically based on the input.
-
- Arguments:
- input (Tensor): float tensor or list of tensors to quantize
- dtype (:class:`torch.dtype`): the desired data type of returned tensor.
- Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``
- reduce_range (bool): a flag to indicate whether to reduce the range of quantized
- data by 1 bit, it's required to avoid instruction overflow for some hardwares
-
- Returns:
- Tensor: A newly (dynamically) quantized tensor
-
- Example::
-
- >>> t = torch.quantize_per_tensor_dynamic(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.quint8, False)
- >>> print(t)
- tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
- quantization_scheme=torch.per_tensor_affine, scale=0.011764705882352941,
- zero_point=85)
- >>> t.int_repr()
- tensor([ 0, 85, 170, 255], dtype=torch.uint8)
- """
- ...
- def quantized_batch_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], mean: Tensor, var: Tensor, eps: _float, output_scale: _float, output_zero_point: _int) -> Tensor:
- r"""
- quantized_batch_norm(input, weight=None, bias=None, mean, var, eps, output_scale, output_zero_point) -> Tensor
-
- Applies batch normalization on a 4D (NCHW) quantized tensor.
-
- .. math::
-
- y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
-
- Arguments:
- input (Tensor): quantized tensor
- weight (Tensor): float tensor that corresponds to the gamma, size C
- bias (Tensor): float tensor that corresponds to the beta, size C
- mean (Tensor): float mean value in batch normalization, size C
- var (Tensor): float tensor for variance, size C
- eps (float): a value added to the denominator for numerical stability.
- output_scale (float): output quantized tensor scale
- output_zero_point (int): output quantized tensor zero_point
-
- Returns:
- Tensor: A quantized tensor with batch normalization applied.
-
- Example::
-
- >>> qx = torch.quantize_per_tensor(torch.rand(2, 2, 2, 2), 1.5, 3, torch.quint8)
- >>> torch.quantized_batch_norm(qx, torch.ones(2), torch.zeros(2), torch.rand(2), torch.rand(2), 0.00001, 0.2, 2)
- tensor([[[[-0.2000, -0.2000],
- [ 1.6000, -0.2000]],
-
- [[-0.4000, -0.4000],
- [-0.4000, 0.6000]]],
-
-
- [[[-0.2000, -0.2000],
- [-0.2000, -0.2000]],
-
- [[ 0.6000, -0.4000],
- [ 0.6000, -0.4000]]]], size=(2, 2, 2, 2), dtype=torch.quint8,
- quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=2)
- """
- ...
- def quantized_gru_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Union[Number, _complex], scale_hh: Union[Number, _complex], zero_point_ih: Union[Number, _complex], zero_point_hh: Union[Number, _complex]) -> Tensor: ...
- def quantized_lstm_cell(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Union[Number, _complex], scale_hh: Union[Number, _complex], zero_point_ih: Union[Number, _complex], zero_point_hh: Union[Number, _complex]) -> Tuple[Tensor, Tensor]: ...
- def quantized_max_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor:
- r"""
- quantized_max_pool1d(input, kernel_size, stride=[], padding=0, dilation=1, ceil_mode=False) -> Tensor
-
- Applies a 1D max pooling over an input quantized tensor composed of several input planes.
-
- Arguments:
- input (Tensor): quantized tensor
- kernel_size (list of int): the size of the sliding window
- stride (``list of int``, optional): the stride of the sliding window
- padding (``list of int``, optional): padding to be added on both sides, must be >= 0 and <= kernel_size / 2
- dilation (``list of int``, optional): The stride between elements within a sliding window, must be > 0. Default 1
- ceil_mode (bool, optional): If True, will use ceil instead of floor to compute the output shape.
- Defaults to False.
-
-
- Returns:
- Tensor: A quantized tensor with max_pool1d applied.
-
- Example::
-
- >>> qx = torch.quantize_per_tensor(torch.rand(2, 2), 1.5, 3, torch.quint8)
- >>> torch.quantized_max_pool1d(qx, [2])
- tensor([[0.0000],
- [1.5000]], size=(2, 1), dtype=torch.quint8,
- quantization_scheme=torch.per_tensor_affine, scale=1.5, zero_point=3)
- """
- ...
- def quantized_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor:
- r"""
- quantized_max_pool2d(input, kernel_size, stride=[], padding=0, dilation=1, ceil_mode=False) -> Tensor
-
- Applies a 2D max pooling over an input quantized tensor composed of several input planes.
-
- Arguments:
- input (Tensor): quantized tensor
- kernel_size (``list of int``): the size of the sliding window
- stride (``list of int``, optional): the stride of the sliding window
- padding (``list of int``, optional): padding to be added on both sides, must be >= 0 and <= kernel_size / 2
- dilation (``list of int``, optional): The stride between elements within a sliding window, must be > 0. Default 1
- ceil_mode (bool, optional): If True, will use ceil instead of floor to compute the output shape.
- Defaults to False.
-
-
- Returns:
- Tensor: A quantized tensor with max_pool2d applied.
-
- Example::
-
- >>> qx = torch.quantize_per_tensor(torch.rand(2, 2, 2, 2), 1.5, 3, torch.quint8)
- >>> torch.quantized_max_pool2d(qx, [2,2])
- tensor([[[[1.5000]],
-
- [[1.5000]]],
-
-
- [[[0.0000]],
-
- [[0.0000]]]], size=(2, 2, 1, 1), dtype=torch.quint8,
- quantization_scheme=torch.per_tensor_affine, scale=1.5, zero_point=3)
- """
- ...
- def quantized_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ...
- def quantized_rnn_relu_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Union[Number, _complex], scale_hh: Union[Number, _complex], zero_point_ih: Union[Number, _complex], zero_point_hh: Union[Number, _complex]) -> Tensor: ...
- def quantized_rnn_tanh_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Union[Number, _complex], scale_hh: Union[Number, _complex], zero_point_ih: Union[Number, _complex], zero_point_hh: Union[Number, _complex]) -> Tensor: ...
- def rad2deg(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- rad2deg(input, *, out=None) -> Tensor
-
- Returns a new tensor with each of the elements of :attr:`input`
- converted from angles in radians to degrees.
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword arguments:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.tensor([[3.142, -3.142], [6.283, -6.283], [1.570, -1.570]])
- >>> torch.rad2deg(a)
- tensor([[ 180.0233, -180.0233],
- [ 359.9894, -359.9894],
- [ 89.9544, -89.9544]])
- """
- ...
- def rad2deg_(input: Tensor) -> Tensor: ...
- @overload
- def rand(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
-
- Returns a tensor filled with random numbers from a uniform distribution
- on the interval :math:`[0, 1)`
-
- The shape of the tensor is defined by the variable argument :attr:`size`.
-
- Args:
- size (int...): a sequence of integers defining the shape of the output tensor.
- Can be a variable number of arguments or a collection like a list or tuple.
-
- Keyword args:
- generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
-
- Example::
-
- >>> torch.rand(4)
- tensor([ 0.5204, 0.2503, 0.3525, 0.5673])
- >>> torch.rand(2, 3)
- tensor([[ 0.8237, 0.5781, 0.6879],
- [ 0.3816, 0.7249, 0.0998]])
- """
- ...
- @overload
- def rand(*size: _int, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
-
- Returns a tensor filled with random numbers from a uniform distribution
- on the interval :math:`[0, 1)`
-
- The shape of the tensor is defined by the variable argument :attr:`size`.
-
- Args:
- size (int...): a sequence of integers defining the shape of the output tensor.
- Can be a variable number of arguments or a collection like a list or tuple.
-
- Keyword args:
- generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
-
- Example::
-
- >>> torch.rand(4)
- tensor([ 0.5204, 0.2503, 0.3525, 0.5673])
- >>> torch.rand(2, 3)
- tensor([[ 0.8237, 0.5781, 0.6879],
- [ 0.3816, 0.7249, 0.0998]])
- """
- ...
- @overload
- def rand(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
-
- Returns a tensor filled with random numbers from a uniform distribution
- on the interval :math:`[0, 1)`
-
- The shape of the tensor is defined by the variable argument :attr:`size`.
-
- Args:
- size (int...): a sequence of integers defining the shape of the output tensor.
- Can be a variable number of arguments or a collection like a list or tuple.
-
- Keyword args:
- generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
-
- Example::
-
- >>> torch.rand(4)
- tensor([ 0.5204, 0.2503, 0.3525, 0.5673])
- >>> torch.rand(2, 3)
- tensor([[ 0.8237, 0.5781, 0.6879],
- [ 0.3816, 0.7249, 0.0998]])
- """
- ...
- @overload
- def rand(*size: _int, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
-
- Returns a tensor filled with random numbers from a uniform distribution
- on the interval :math:`[0, 1)`
-
- The shape of the tensor is defined by the variable argument :attr:`size`.
-
- Args:
- size (int...): a sequence of integers defining the shape of the output tensor.
- Can be a variable number of arguments or a collection like a list or tuple.
-
- Keyword args:
- generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
-
- Example::
-
- >>> torch.rand(4)
- tensor([ 0.5204, 0.2503, 0.3525, 0.5673])
- >>> torch.rand(2, 3)
- tensor([[ 0.8237, 0.5781, 0.6879],
- [ 0.3816, 0.7249, 0.0998]])
- """
- ...
- @overload
- def rand(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
-
- Returns a tensor filled with random numbers from a uniform distribution
- on the interval :math:`[0, 1)`
-
- The shape of the tensor is defined by the variable argument :attr:`size`.
-
- Args:
- size (int...): a sequence of integers defining the shape of the output tensor.
- Can be a variable number of arguments or a collection like a list or tuple.
-
- Keyword args:
- generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
-
- Example::
-
- >>> torch.rand(4)
- tensor([ 0.5204, 0.2503, 0.3525, 0.5673])
- >>> torch.rand(2, 3)
- tensor([[ 0.8237, 0.5781, 0.6879],
- [ 0.3816, 0.7249, 0.0998]])
- """
- ...
- @overload
- def rand(*size: _int, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
-
- Returns a tensor filled with random numbers from a uniform distribution
- on the interval :math:`[0, 1)`
-
- The shape of the tensor is defined by the variable argument :attr:`size`.
-
- Args:
- size (int...): a sequence of integers defining the shape of the output tensor.
- Can be a variable number of arguments or a collection like a list or tuple.
-
- Keyword args:
- generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
-
- Example::
-
- >>> torch.rand(4)
- tensor([ 0.5204, 0.2503, 0.3525, 0.5673])
- >>> torch.rand(2, 3)
- tensor([[ 0.8237, 0.5781, 0.6879],
- [ 0.3816, 0.7249, 0.0998]])
- """
- ...
- @overload
- def rand(size: Sequence[Union[_int, SymInt]], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
-
- Returns a tensor filled with random numbers from a uniform distribution
- on the interval :math:`[0, 1)`
-
- The shape of the tensor is defined by the variable argument :attr:`size`.
-
- Args:
- size (int...): a sequence of integers defining the shape of the output tensor.
- Can be a variable number of arguments or a collection like a list or tuple.
-
- Keyword args:
- generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
-
- Example::
-
- >>> torch.rand(4)
- tensor([ 0.5204, 0.2503, 0.3525, 0.5673])
- >>> torch.rand(2, 3)
- tensor([[ 0.8237, 0.5781, 0.6879],
- [ 0.3816, 0.7249, 0.0998]])
- """
- ...
- @overload
- def rand(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
-
- Returns a tensor filled with random numbers from a uniform distribution
- on the interval :math:`[0, 1)`
-
- The shape of the tensor is defined by the variable argument :attr:`size`.
-
- Args:
- size (int...): a sequence of integers defining the shape of the output tensor.
- Can be a variable number of arguments or a collection like a list or tuple.
-
- Keyword args:
- generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
-
- Example::
-
- >>> torch.rand(4)
- tensor([ 0.5204, 0.2503, 0.3525, 0.5673])
- >>> torch.rand(2, 3)
- tensor([[ 0.8237, 0.5781, 0.6879],
- [ 0.3816, 0.7249, 0.0998]])
- """
- ...
- def rand_like(input: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- rand_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
-
- Returns a tensor with the same size as :attr:`input` that is filled with
- random numbers from a uniform distribution on the interval :math:`[0, 1)`.
- ``torch.rand_like(input)`` is equivalent to
- ``torch.rand(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
-
- Args:
- input (Tensor): the size of :attr:`input` will determine size of the output tensor.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
- Default: if ``None``, defaults to the dtype of :attr:`input`.
- layout (:class:`torch.layout`, optional): the desired layout of returned tensor.
- Default: if ``None``, defaults to the layout of :attr:`input`.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, defaults to the device of :attr:`input`.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned Tensor. Default: ``torch.preserve_format``.
- """
- ...
- @overload
- def randint(low: _int, high: _int, size: _size, *, generator: Optional[Generator] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
- r"""
- randint(low=0, high, size, \*, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Returns a tensor filled with random integers generated uniformly
- between :attr:`low` (inclusive) and :attr:`high` (exclusive).
-
- The shape of the tensor is defined by the variable argument :attr:`size`.
-
- .. note::
- With the global dtype default (``torch.float32``), this function returns
- a tensor with dtype ``torch.int64``.
-
- Args:
- low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
- high (int): One above the highest integer to be drawn from the distribution.
- size (tuple): a tuple defining the shape of the output tensor.
-
- Keyword args:
- generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
- out (Tensor, optional): the output tensor.
- dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``,
- this function returns a tensor with dtype ``torch.int64``.
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Example::
-
- >>> torch.randint(3, 5, (3,))
- tensor([4, 3, 4])
-
-
- >>> torch.randint(10, (2, 2))
- tensor([[0, 2],
- [5, 5]])
-
-
- >>> torch.randint(3, 10, (2, 2))
- tensor([[4, 5],
- [6, 7]])
- """
- ...
- @overload
- def randint(high: _int, size: _size, *, generator: Optional[Generator] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
- r"""
- randint(low=0, high, size, \*, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Returns a tensor filled with random integers generated uniformly
- between :attr:`low` (inclusive) and :attr:`high` (exclusive).
-
- The shape of the tensor is defined by the variable argument :attr:`size`.
-
- .. note::
- With the global dtype default (``torch.float32``), this function returns
- a tensor with dtype ``torch.int64``.
-
- Args:
- low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
- high (int): One above the highest integer to be drawn from the distribution.
- size (tuple): a tuple defining the shape of the output tensor.
-
- Keyword args:
- generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
- out (Tensor, optional): the output tensor.
- dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``,
- this function returns a tensor with dtype ``torch.int64``.
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Example::
-
- >>> torch.randint(3, 5, (3,))
- tensor([4, 3, 4])
-
-
- >>> torch.randint(10, (2, 2))
- tensor([[0, 2],
- [5, 5]])
-
-
- >>> torch.randint(3, 10, (2, 2))
- tensor([[4, 5],
- [6, 7]])
- """
- ...
- @overload
- def randint(high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- randint(low=0, high, size, \*, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Returns a tensor filled with random integers generated uniformly
- between :attr:`low` (inclusive) and :attr:`high` (exclusive).
-
- The shape of the tensor is defined by the variable argument :attr:`size`.
-
- .. note::
- With the global dtype default (``torch.float32``), this function returns
- a tensor with dtype ``torch.int64``.
-
- Args:
- low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
- high (int): One above the highest integer to be drawn from the distribution.
- size (tuple): a tuple defining the shape of the output tensor.
-
- Keyword args:
- generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
- out (Tensor, optional): the output tensor.
- dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``,
- this function returns a tensor with dtype ``torch.int64``.
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Example::
-
- >>> torch.randint(3, 5, (3,))
- tensor([4, 3, 4])
-
-
- >>> torch.randint(10, (2, 2))
- tensor([[0, 2],
- [5, 5]])
-
-
- >>> torch.randint(3, 10, (2, 2))
- tensor([[4, 5],
- [6, 7]])
- """
- ...
- @overload
- def randint(high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- randint(low=0, high, size, \*, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Returns a tensor filled with random integers generated uniformly
- between :attr:`low` (inclusive) and :attr:`high` (exclusive).
-
- The shape of the tensor is defined by the variable argument :attr:`size`.
-
- .. note::
- With the global dtype default (``torch.float32``), this function returns
- a tensor with dtype ``torch.int64``.
-
- Args:
- low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
- high (int): One above the highest integer to be drawn from the distribution.
- size (tuple): a tuple defining the shape of the output tensor.
-
- Keyword args:
- generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
- out (Tensor, optional): the output tensor.
- dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``,
- this function returns a tensor with dtype ``torch.int64``.
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Example::
-
- >>> torch.randint(3, 5, (3,))
- tensor([4, 3, 4])
-
-
- >>> torch.randint(10, (2, 2))
- tensor([[0, 2],
- [5, 5]])
-
-
- >>> torch.randint(3, 10, (2, 2))
- tensor([[4, 5],
- [6, 7]])
- """
- ...
- @overload
- def randint(low: Union[_int, SymInt], high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- randint(low=0, high, size, \*, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Returns a tensor filled with random integers generated uniformly
- between :attr:`low` (inclusive) and :attr:`high` (exclusive).
-
- The shape of the tensor is defined by the variable argument :attr:`size`.
-
- .. note::
- With the global dtype default (``torch.float32``), this function returns
- a tensor with dtype ``torch.int64``.
-
- Args:
- low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
- high (int): One above the highest integer to be drawn from the distribution.
- size (tuple): a tuple defining the shape of the output tensor.
-
- Keyword args:
- generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
- out (Tensor, optional): the output tensor.
- dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``,
- this function returns a tensor with dtype ``torch.int64``.
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Example::
-
- >>> torch.randint(3, 5, (3,))
- tensor([4, 3, 4])
-
-
- >>> torch.randint(10, (2, 2))
- tensor([[0, 2],
- [5, 5]])
-
-
- >>> torch.randint(3, 10, (2, 2))
- tensor([[4, 5],
- [6, 7]])
- """
- ...
- @overload
- def randint(low: Union[_int, SymInt], high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- randint(low=0, high, size, \*, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Returns a tensor filled with random integers generated uniformly
- between :attr:`low` (inclusive) and :attr:`high` (exclusive).
-
- The shape of the tensor is defined by the variable argument :attr:`size`.
-
- .. note::
- With the global dtype default (``torch.float32``), this function returns
- a tensor with dtype ``torch.int64``.
-
- Args:
- low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
- high (int): One above the highest integer to be drawn from the distribution.
- size (tuple): a tuple defining the shape of the output tensor.
-
- Keyword args:
- generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
- out (Tensor, optional): the output tensor.
- dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``,
- this function returns a tensor with dtype ``torch.int64``.
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Example::
-
- >>> torch.randint(3, 5, (3,))
- tensor([4, 3, 4])
-
-
- >>> torch.randint(10, (2, 2))
- tensor([[0, 2],
- [5, 5]])
-
-
- >>> torch.randint(3, 10, (2, 2))
- tensor([[4, 5],
- [6, 7]])
- """
- ...
- @overload
- def randint_like(input: Tensor, high: Union[_int, SymInt], *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- randint_like(input, low=0, high, \*, dtype=None, layout=torch.strided, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
-
- Returns a tensor with the same shape as Tensor :attr:`input` filled with
- random integers generated uniformly between :attr:`low` (inclusive) and
- :attr:`high` (exclusive).
-
- .. note:
- With the global dtype default (``torch.float32``), this function returns
- a tensor with dtype ``torch.int64``.
-
- Args:
- input (Tensor): the size of :attr:`input` will determine size of the output tensor.
- low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
- high (int): One above the highest integer to be drawn from the distribution.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
- Default: if ``None``, defaults to the dtype of :attr:`input`.
- layout (:class:`torch.layout`, optional): the desired layout of returned tensor.
- Default: if ``None``, defaults to the layout of :attr:`input`.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, defaults to the device of :attr:`input`.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned Tensor. Default: ``torch.preserve_format``.
- """
- ...
- @overload
- def randint_like(input: Tensor, low: Union[_int, SymInt], high: Union[_int, SymInt], *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- randint_like(input, low=0, high, \*, dtype=None, layout=torch.strided, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
-
- Returns a tensor with the same shape as Tensor :attr:`input` filled with
- random integers generated uniformly between :attr:`low` (inclusive) and
- :attr:`high` (exclusive).
-
- .. note:
- With the global dtype default (``torch.float32``), this function returns
- a tensor with dtype ``torch.int64``.
-
- Args:
- input (Tensor): the size of :attr:`input` will determine size of the output tensor.
- low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
- high (int): One above the highest integer to be drawn from the distribution.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
- Default: if ``None``, defaults to the dtype of :attr:`input`.
- layout (:class:`torch.layout`, optional): the desired layout of returned tensor.
- Default: if ``None``, defaults to the layout of :attr:`input`.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, defaults to the device of :attr:`input`.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned Tensor. Default: ``torch.preserve_format``.
- """
- ...
- @overload
- def randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
-
-
- Returns a tensor filled with random numbers from a normal distribution
- with mean `0` and variance `1` (also called the standard normal
- distribution).
-
- .. math::
- \text{out}_{i} \sim \mathcal{N}(0, 1)
-
- For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and
- unit variance as
-
- .. math::
- \text{out}_{i} \sim \mathcal{CN}(0, 1)
-
- This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary
- :math:`(\operatorname{Im})` part of :math:`\text{out}_i` as
-
- .. math::
- \operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad
- \operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2})
-
- The shape of the tensor is defined by the variable argument :attr:`size`.
-
-
- Args:
- size (int...): a sequence of integers defining the shape of the output tensor.
- Can be a variable number of arguments or a collection like a list or tuple.
-
- Keyword args:
- generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
-
- Example::
-
- >>> torch.randn(4)
- tensor([-2.1436, 0.9966, 2.3426, -0.6366])
- >>> torch.randn(2, 3)
- tensor([[ 1.5954, 2.8929, -1.0923],
- [ 1.1719, -0.4709, -0.1996]])
-
- .. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution
- """
- ...
- @overload
- def randn(*size: _int, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
-
-
- Returns a tensor filled with random numbers from a normal distribution
- with mean `0` and variance `1` (also called the standard normal
- distribution).
-
- .. math::
- \text{out}_{i} \sim \mathcal{N}(0, 1)
-
- For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and
- unit variance as
-
- .. math::
- \text{out}_{i} \sim \mathcal{CN}(0, 1)
-
- This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary
- :math:`(\operatorname{Im})` part of :math:`\text{out}_i` as
-
- .. math::
- \operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad
- \operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2})
-
- The shape of the tensor is defined by the variable argument :attr:`size`.
-
-
- Args:
- size (int...): a sequence of integers defining the shape of the output tensor.
- Can be a variable number of arguments or a collection like a list or tuple.
-
- Keyword args:
- generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
-
- Example::
-
- >>> torch.randn(4)
- tensor([-2.1436, 0.9966, 2.3426, -0.6366])
- >>> torch.randn(2, 3)
- tensor([[ 1.5954, 2.8929, -1.0923],
- [ 1.1719, -0.4709, -0.1996]])
-
- .. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution
- """
- ...
- @overload
- def randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
-
-
- Returns a tensor filled with random numbers from a normal distribution
- with mean `0` and variance `1` (also called the standard normal
- distribution).
-
- .. math::
- \text{out}_{i} \sim \mathcal{N}(0, 1)
-
- For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and
- unit variance as
-
- .. math::
- \text{out}_{i} \sim \mathcal{CN}(0, 1)
-
- This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary
- :math:`(\operatorname{Im})` part of :math:`\text{out}_i` as
-
- .. math::
- \operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad
- \operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2})
-
- The shape of the tensor is defined by the variable argument :attr:`size`.
-
-
- Args:
- size (int...): a sequence of integers defining the shape of the output tensor.
- Can be a variable number of arguments or a collection like a list or tuple.
-
- Keyword args:
- generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
-
- Example::
-
- >>> torch.randn(4)
- tensor([-2.1436, 0.9966, 2.3426, -0.6366])
- >>> torch.randn(2, 3)
- tensor([[ 1.5954, 2.8929, -1.0923],
- [ 1.1719, -0.4709, -0.1996]])
-
- .. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution
- """
- ...
- @overload
- def randn(*size: _int, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
-
-
- Returns a tensor filled with random numbers from a normal distribution
- with mean `0` and variance `1` (also called the standard normal
- distribution).
-
- .. math::
- \text{out}_{i} \sim \mathcal{N}(0, 1)
-
- For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and
- unit variance as
-
- .. math::
- \text{out}_{i} \sim \mathcal{CN}(0, 1)
-
- This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary
- :math:`(\operatorname{Im})` part of :math:`\text{out}_i` as
-
- .. math::
- \operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad
- \operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2})
-
- The shape of the tensor is defined by the variable argument :attr:`size`.
-
-
- Args:
- size (int...): a sequence of integers defining the shape of the output tensor.
- Can be a variable number of arguments or a collection like a list or tuple.
-
- Keyword args:
- generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
-
- Example::
-
- >>> torch.randn(4)
- tensor([-2.1436, 0.9966, 2.3426, -0.6366])
- >>> torch.randn(2, 3)
- tensor([[ 1.5954, 2.8929, -1.0923],
- [ 1.1719, -0.4709, -0.1996]])
-
- .. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution
- """
- ...
- @overload
- def randn(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
-
-
- Returns a tensor filled with random numbers from a normal distribution
- with mean `0` and variance `1` (also called the standard normal
- distribution).
-
- .. math::
- \text{out}_{i} \sim \mathcal{N}(0, 1)
-
- For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and
- unit variance as
-
- .. math::
- \text{out}_{i} \sim \mathcal{CN}(0, 1)
-
- This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary
- :math:`(\operatorname{Im})` part of :math:`\text{out}_i` as
-
- .. math::
- \operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad
- \operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2})
-
- The shape of the tensor is defined by the variable argument :attr:`size`.
-
-
- Args:
- size (int...): a sequence of integers defining the shape of the output tensor.
- Can be a variable number of arguments or a collection like a list or tuple.
-
- Keyword args:
- generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
-
- Example::
-
- >>> torch.randn(4)
- tensor([-2.1436, 0.9966, 2.3426, -0.6366])
- >>> torch.randn(2, 3)
- tensor([[ 1.5954, 2.8929, -1.0923],
- [ 1.1719, -0.4709, -0.1996]])
-
- .. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution
- """
- ...
- @overload
- def randn(*size: _int, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
-
-
- Returns a tensor filled with random numbers from a normal distribution
- with mean `0` and variance `1` (also called the standard normal
- distribution).
-
- .. math::
- \text{out}_{i} \sim \mathcal{N}(0, 1)
-
- For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and
- unit variance as
-
- .. math::
- \text{out}_{i} \sim \mathcal{CN}(0, 1)
-
- This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary
- :math:`(\operatorname{Im})` part of :math:`\text{out}_i` as
-
- .. math::
- \operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad
- \operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2})
-
- The shape of the tensor is defined by the variable argument :attr:`size`.
-
-
- Args:
- size (int...): a sequence of integers defining the shape of the output tensor.
- Can be a variable number of arguments or a collection like a list or tuple.
-
- Keyword args:
- generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
-
- Example::
-
- >>> torch.randn(4)
- tensor([-2.1436, 0.9966, 2.3426, -0.6366])
- >>> torch.randn(2, 3)
- tensor([[ 1.5954, 2.8929, -1.0923],
- [ 1.1719, -0.4709, -0.1996]])
-
- .. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution
- """
- ...
- @overload
- def randn(size: Sequence[Union[_int, SymInt]], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
-
-
- Returns a tensor filled with random numbers from a normal distribution
- with mean `0` and variance `1` (also called the standard normal
- distribution).
-
- .. math::
- \text{out}_{i} \sim \mathcal{N}(0, 1)
-
- For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and
- unit variance as
-
- .. math::
- \text{out}_{i} \sim \mathcal{CN}(0, 1)
-
- This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary
- :math:`(\operatorname{Im})` part of :math:`\text{out}_i` as
-
- .. math::
- \operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad
- \operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2})
-
- The shape of the tensor is defined by the variable argument :attr:`size`.
-
-
- Args:
- size (int...): a sequence of integers defining the shape of the output tensor.
- Can be a variable number of arguments or a collection like a list or tuple.
-
- Keyword args:
- generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
-
- Example::
-
- >>> torch.randn(4)
- tensor([-2.1436, 0.9966, 2.3426, -0.6366])
- >>> torch.randn(2, 3)
- tensor([[ 1.5954, 2.8929, -1.0923],
- [ 1.1719, -0.4709, -0.1996]])
-
- .. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution
- """
- ...
- @overload
- def randn(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
-
-
- Returns a tensor filled with random numbers from a normal distribution
- with mean `0` and variance `1` (also called the standard normal
- distribution).
-
- .. math::
- \text{out}_{i} \sim \mathcal{N}(0, 1)
-
- For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and
- unit variance as
-
- .. math::
- \text{out}_{i} \sim \mathcal{CN}(0, 1)
-
- This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary
- :math:`(\operatorname{Im})` part of :math:`\text{out}_i` as
-
- .. math::
- \operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad
- \operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2})
-
- The shape of the tensor is defined by the variable argument :attr:`size`.
-
-
- Args:
- size (int...): a sequence of integers defining the shape of the output tensor.
- Can be a variable number of arguments or a collection like a list or tuple.
-
- Keyword args:
- generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
-
- Example::
-
- >>> torch.randn(4)
- tensor([-2.1436, 0.9966, 2.3426, -0.6366])
- >>> torch.randn(2, 3)
- tensor([[ 1.5954, 2.8929, -1.0923],
- [ 1.1719, -0.4709, -0.1996]])
-
- .. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution
- """
- ...
- def randn_like(input: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- randn_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
-
- Returns a tensor with the same size as :attr:`input` that is filled with
- random numbers from a normal distribution with mean 0 and variance 1. Please refer to :func:`torch.randn` for the
- sampling process of complex dtypes. ``torch.randn_like(input)`` is equivalent to
- ``torch.randn(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
-
- Args:
- input (Tensor): the size of :attr:`input` will determine size of the output tensor.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
- Default: if ``None``, defaults to the dtype of :attr:`input`.
- layout (:class:`torch.layout`, optional): the desired layout of returned tensor.
- Default: if ``None``, defaults to the layout of :attr:`input`.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, defaults to the device of :attr:`input`.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned Tensor. Default: ``torch.preserve_format``.
- """
- ...
- @overload
- def randperm(n: Union[_int, SymInt], *, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- randperm(n, *, generator=None, out=None, dtype=torch.int64,layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
-
- Returns a random permutation of integers from ``0`` to ``n - 1``.
-
- Args:
- n (int): the upper bound (exclusive)
-
- Keyword args:
- generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: ``torch.int64``.
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
-
- Example::
-
- >>> torch.randperm(4)
- tensor([2, 1, 0, 3])
- """
- ...
- @overload
- def randperm(n: Union[_int, SymInt], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- randperm(n, *, generator=None, out=None, dtype=torch.int64,layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
-
- Returns a random permutation of integers from ``0`` to ``n - 1``.
-
- Args:
- n (int): the upper bound (exclusive)
-
- Keyword args:
- generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: ``torch.int64``.
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
-
- Example::
-
- >>> torch.randperm(4)
- tensor([2, 1, 0, 3])
- """
- ...
- def range(start: Number, end: Number, step: Number = 1, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
- r"""
- range(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Returns a 1-D tensor of size :math:`\left\lfloor \frac{\text{end} - \text{start}}{\text{step}} \right\rfloor + 1`
- with values from :attr:`start` to :attr:`end` with step :attr:`step`. Step is
- the gap between two values in the tensor.
-
- .. math::
- \text{out}_{i+1} = \text{out}_i + \text{step}.
-
- .. warning::
- This function is deprecated and will be removed in a future release because its behavior is inconsistent with
- Python's range builtin. Instead, use :func:`torch.arange`, which produces values in [start, end).
-
- Args:
- start (float): the starting value for the set of points. Default: ``0``.
- end (float): the ending value for the set of points
- step (float): the gap between each pair of adjacent points. Default: ``1``.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). If `dtype` is not given, infer the data type from the other input
- arguments. If any of `start`, `end`, or `stop` are floating-point, the
- `dtype` is inferred to be the default dtype, see
- :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
- be `torch.int64`.
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Example::
-
- >>> torch.range(1, 4)
- tensor([ 1., 2., 3., 4.])
- >>> torch.range(1, 4, 0.5)
- tensor([ 1.0000, 1.5000, 2.0000, 2.5000, 3.0000, 3.5000, 4.0000])
- """
- ...
- def ravel(input: Tensor) -> Tensor:
- r"""
- ravel(input) -> Tensor
-
- Return a contiguous flattened tensor. A copy is made only if needed.
-
- Args:
- input (Tensor): the input tensor.
-
- Example::
-
- >>> t = torch.tensor([[[1, 2],
- ... [3, 4]],
- ... [[5, 6],
- ... [7, 8]]])
- >>> torch.ravel(t)
- tensor([1, 2, 3, 4, 5, 6, 7, 8])
- """
- ...
- def real(input: Tensor) -> Tensor:
- r"""
- real(input) -> Tensor
-
- Returns a new tensor containing real values of the :attr:`self` tensor.
- The returned tensor and :attr:`self` share the same underlying storage.
-
- Args:
- input (Tensor): the input tensor.
-
- Example::
-
- >>> x=torch.randn(4, dtype=torch.cfloat)
- >>> x
- tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
- >>> x.real
- tensor([ 0.3100, -0.5445, -1.6492, -0.0638])
- """
- ...
- def reciprocal(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- reciprocal(input, *, out=None) -> Tensor
-
- Returns a new tensor with the reciprocal of the elements of :attr:`input`
-
- .. math::
- \text{out}_{i} = \frac{1}{\text{input}_{i}}
-
- .. note::
- Unlike NumPy's reciprocal, torch.reciprocal supports integral inputs. Integral
- inputs to reciprocal are automatically :ref:`promoted <type-promotion-doc>` to
- the default scalar type.
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(4)
- >>> a
- tensor([-0.4595, -2.1219, -1.4314, 0.7298])
- >>> torch.reciprocal(a)
- tensor([-2.1763, -0.4713, -0.6986, 1.3702])
- """
- ...
- def reciprocal_(input: Tensor) -> Tensor: ...
- def relu(input: Tensor) -> Tensor: ...
- def relu_(input: Tensor) -> Tensor: ...
- @overload
- def remainder(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- remainder(input, other, *, out=None) -> Tensor
-
- Computes
- `Python's modulus operation <https://docs.python.org/3/reference/expressions.html#binary-arithmetic-operations>`_
- entrywise. The result has the same sign as the divisor :attr:`other` and its absolute value
- is less than that of :attr:`other`.
-
- It may also be defined in terms of :func:`torch.div` as
-
- .. code:: python
-
- torch.remainder(a, b) == a - a.div(b, rounding_mode="floor") * b
-
- Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
- :ref:`type promotion <type-promotion-doc>`, and integer and float inputs.
-
- .. note::
- Complex inputs are not supported. In some cases, it is not mathematically
- possible to satisfy the definition of a modulo operation with complex numbers.
- See :func:`torch.fmod` for how division by zero is handled.
-
- .. seealso::
-
- :func:`torch.fmod` which implements C++'s `std::fmod <https://en.cppreference.com/w/cpp/numeric/math/fmod>`_.
- This one is defined in terms of division rounding towards zero.
-
- Args:
- input (Tensor or Scalar): the dividend
- other (Tensor or Scalar): the divisor
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.remainder(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
- tensor([ 1., 0., 1., 1., 0., 1.])
- >>> torch.remainder(torch.tensor([1, 2, 3, 4, 5]), -1.5)
- tensor([ -0.5000, -1.0000, 0.0000, -0.5000, -1.0000 ])
- """
- ...
- @overload
- def remainder(self: Union[Number, _complex], other: Tensor) -> Tensor:
- r"""
- remainder(input, other, *, out=None) -> Tensor
-
- Computes
- `Python's modulus operation <https://docs.python.org/3/reference/expressions.html#binary-arithmetic-operations>`_
- entrywise. The result has the same sign as the divisor :attr:`other` and its absolute value
- is less than that of :attr:`other`.
-
- It may also be defined in terms of :func:`torch.div` as
-
- .. code:: python
-
- torch.remainder(a, b) == a - a.div(b, rounding_mode="floor") * b
-
- Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
- :ref:`type promotion <type-promotion-doc>`, and integer and float inputs.
-
- .. note::
- Complex inputs are not supported. In some cases, it is not mathematically
- possible to satisfy the definition of a modulo operation with complex numbers.
- See :func:`torch.fmod` for how division by zero is handled.
-
- .. seealso::
-
- :func:`torch.fmod` which implements C++'s `std::fmod <https://en.cppreference.com/w/cpp/numeric/math/fmod>`_.
- This one is defined in terms of division rounding towards zero.
-
- Args:
- input (Tensor or Scalar): the dividend
- other (Tensor or Scalar): the divisor
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.remainder(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
- tensor([ 1., 0., 1., 1., 0., 1.])
- >>> torch.remainder(torch.tensor([1, 2, 3, 4, 5]), -1.5)
- tensor([ -0.5000, -1.0000, 0.0000, -0.5000, -1.0000 ])
- """
- ...
- @overload
- def remainder(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- remainder(input, other, *, out=None) -> Tensor
-
- Computes
- `Python's modulus operation <https://docs.python.org/3/reference/expressions.html#binary-arithmetic-operations>`_
- entrywise. The result has the same sign as the divisor :attr:`other` and its absolute value
- is less than that of :attr:`other`.
-
- It may also be defined in terms of :func:`torch.div` as
-
- .. code:: python
-
- torch.remainder(a, b) == a - a.div(b, rounding_mode="floor") * b
-
- Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
- :ref:`type promotion <type-promotion-doc>`, and integer and float inputs.
-
- .. note::
- Complex inputs are not supported. In some cases, it is not mathematically
- possible to satisfy the definition of a modulo operation with complex numbers.
- See :func:`torch.fmod` for how division by zero is handled.
-
- .. seealso::
-
- :func:`torch.fmod` which implements C++'s `std::fmod <https://en.cppreference.com/w/cpp/numeric/math/fmod>`_.
- This one is defined in terms of division rounding towards zero.
-
- Args:
- input (Tensor or Scalar): the dividend
- other (Tensor or Scalar): the divisor
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.remainder(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
- tensor([ 1., 0., 1., 1., 0., 1.])
- >>> torch.remainder(torch.tensor([1, 2, 3, 4, 5]), -1.5)
- tensor([ -0.5000, -1.0000, 0.0000, -0.5000, -1.0000 ])
- """
- ...
- def renorm(input: Tensor, p: Union[Number, _complex], dim: _int, maxnorm: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- renorm(input, p, dim, maxnorm, *, out=None) -> Tensor
-
- Returns a tensor where each sub-tensor of :attr:`input` along dimension
- :attr:`dim` is normalized such that the `p`-norm of the sub-tensor is lower
- than the value :attr:`maxnorm`
-
- .. note:: If the norm of a row is lower than `maxnorm`, the row is unchanged
-
- Args:
- input (Tensor): the input tensor.
- p (float): the power for the norm computation
- dim (int): the dimension to slice over to get the sub-tensors
- maxnorm (float): the maximum norm to keep each sub-tensor under
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> x = torch.ones(3, 3)
- >>> x[1].fill_(2)
- tensor([ 2., 2., 2.])
- >>> x[2].fill_(3)
- tensor([ 3., 3., 3.])
- >>> x
- tensor([[ 1., 1., 1.],
- [ 2., 2., 2.],
- [ 3., 3., 3.]])
- >>> torch.renorm(x, 1, 0, 5)
- tensor([[ 1.0000, 1.0000, 1.0000],
- [ 1.6667, 1.6667, 1.6667],
- [ 1.6667, 1.6667, 1.6667]])
- """
- ...
- @overload
- def repeat_interleave(input: Tensor, repeats: Tensor, dim: Optional[_int] = None, *, output_size: Optional[Union[_int, SymInt]] = None) -> Tensor:
- r"""
- repeat_interleave(input, repeats, dim=None, *, output_size=None) -> Tensor
-
- Repeat elements of a tensor.
-
- .. warning::
-
- This is different from :meth:`torch.Tensor.repeat` but similar to ``numpy.repeat``.
-
- Args:
- input (Tensor): the input tensor.
- repeats (Tensor or int): The number of repetitions for each element.
- repeats is broadcasted to fit the shape of the given axis.
- dim (int, optional): The dimension along which to repeat values.
- By default, use the flattened input array, and return a flat output
- array.
-
- Keyword args:
- output_size (int, optional): Total output size for the given axis
- ( e.g. sum of repeats). If given, it will avoid stream synchronization
- needed to calculate output shape of the tensor.
-
- Returns:
- Tensor: Repeated tensor which has the same shape as input, except along the given axis.
-
- Example::
-
- >>> x = torch.tensor([1, 2, 3])
- >>> x.repeat_interleave(2)
- tensor([1, 1, 2, 2, 3, 3])
- >>> y = torch.tensor([[1, 2], [3, 4]])
- >>> torch.repeat_interleave(y, 2)
- tensor([1, 1, 2, 2, 3, 3, 4, 4])
- >>> torch.repeat_interleave(y, 3, dim=1)
- tensor([[1, 1, 1, 2, 2, 2],
- [3, 3, 3, 4, 4, 4]])
- >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0)
- tensor([[1, 2],
- [3, 4],
- [3, 4]])
- >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0, output_size=3)
- tensor([[1, 2],
- [3, 4],
- [3, 4]])
-
- If the `repeats` is `tensor([n1, n2, n3, ...])`, then the output will be
- `tensor([0, 0, ..., 1, 1, ..., 2, 2, ..., ...])` where `0` appears `n1` times,
- `1` appears `n2` times, `2` appears `n3` times, etc.
-
- .. function:: repeat_interleave(repeats, *) -> Tensor
- :noindex:
-
- Repeats 0 repeats[0] times, 1 repeats[1] times, 2 repeats[2] times, etc.
-
- Args:
- repeats (Tensor): The number of repetitions for each element.
-
- Returns:
- Tensor: Repeated tensor of size `sum(repeats)`.
-
- Example::
-
- >>> torch.repeat_interleave(torch.tensor([1, 2, 3]))
- tensor([0, 1, 1, 2, 2, 2])
- """
- ...
- @overload
- def repeat_interleave(repeats: Tensor, *, output_size: Optional[Union[_int, SymInt]] = None) -> Tensor:
- r"""
- repeat_interleave(input, repeats, dim=None, *, output_size=None) -> Tensor
-
- Repeat elements of a tensor.
-
- .. warning::
-
- This is different from :meth:`torch.Tensor.repeat` but similar to ``numpy.repeat``.
-
- Args:
- input (Tensor): the input tensor.
- repeats (Tensor or int): The number of repetitions for each element.
- repeats is broadcasted to fit the shape of the given axis.
- dim (int, optional): The dimension along which to repeat values.
- By default, use the flattened input array, and return a flat output
- array.
-
- Keyword args:
- output_size (int, optional): Total output size for the given axis
- ( e.g. sum of repeats). If given, it will avoid stream synchronization
- needed to calculate output shape of the tensor.
-
- Returns:
- Tensor: Repeated tensor which has the same shape as input, except along the given axis.
-
- Example::
-
- >>> x = torch.tensor([1, 2, 3])
- >>> x.repeat_interleave(2)
- tensor([1, 1, 2, 2, 3, 3])
- >>> y = torch.tensor([[1, 2], [3, 4]])
- >>> torch.repeat_interleave(y, 2)
- tensor([1, 1, 2, 2, 3, 3, 4, 4])
- >>> torch.repeat_interleave(y, 3, dim=1)
- tensor([[1, 1, 1, 2, 2, 2],
- [3, 3, 3, 4, 4, 4]])
- >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0)
- tensor([[1, 2],
- [3, 4],
- [3, 4]])
- >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0, output_size=3)
- tensor([[1, 2],
- [3, 4],
- [3, 4]])
-
- If the `repeats` is `tensor([n1, n2, n3, ...])`, then the output will be
- `tensor([0, 0, ..., 1, 1, ..., 2, 2, ..., ...])` where `0` appears `n1` times,
- `1` appears `n2` times, `2` appears `n3` times, etc.
-
- .. function:: repeat_interleave(repeats, *) -> Tensor
- :noindex:
-
- Repeats 0 repeats[0] times, 1 repeats[1] times, 2 repeats[2] times, etc.
-
- Args:
- repeats (Tensor): The number of repetitions for each element.
-
- Returns:
- Tensor: Repeated tensor of size `sum(repeats)`.
-
- Example::
-
- >>> torch.repeat_interleave(torch.tensor([1, 2, 3]))
- tensor([0, 1, 1, 2, 2, 2])
- """
- ...
- @overload
- def repeat_interleave(input: Tensor, repeats: Union[_int, SymInt], dim: Optional[_int] = None, *, output_size: Optional[Union[_int, SymInt]] = None) -> Tensor:
- r"""
- repeat_interleave(input, repeats, dim=None, *, output_size=None) -> Tensor
-
- Repeat elements of a tensor.
-
- .. warning::
-
- This is different from :meth:`torch.Tensor.repeat` but similar to ``numpy.repeat``.
-
- Args:
- input (Tensor): the input tensor.
- repeats (Tensor or int): The number of repetitions for each element.
- repeats is broadcasted to fit the shape of the given axis.
- dim (int, optional): The dimension along which to repeat values.
- By default, use the flattened input array, and return a flat output
- array.
-
- Keyword args:
- output_size (int, optional): Total output size for the given axis
- ( e.g. sum of repeats). If given, it will avoid stream synchronization
- needed to calculate output shape of the tensor.
-
- Returns:
- Tensor: Repeated tensor which has the same shape as input, except along the given axis.
-
- Example::
-
- >>> x = torch.tensor([1, 2, 3])
- >>> x.repeat_interleave(2)
- tensor([1, 1, 2, 2, 3, 3])
- >>> y = torch.tensor([[1, 2], [3, 4]])
- >>> torch.repeat_interleave(y, 2)
- tensor([1, 1, 2, 2, 3, 3, 4, 4])
- >>> torch.repeat_interleave(y, 3, dim=1)
- tensor([[1, 1, 1, 2, 2, 2],
- [3, 3, 3, 4, 4, 4]])
- >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0)
- tensor([[1, 2],
- [3, 4],
- [3, 4]])
- >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0, output_size=3)
- tensor([[1, 2],
- [3, 4],
- [3, 4]])
-
- If the `repeats` is `tensor([n1, n2, n3, ...])`, then the output will be
- `tensor([0, 0, ..., 1, 1, ..., 2, 2, ..., ...])` where `0` appears `n1` times,
- `1` appears `n2` times, `2` appears `n3` times, etc.
-
- .. function:: repeat_interleave(repeats, *) -> Tensor
- :noindex:
-
- Repeats 0 repeats[0] times, 1 repeats[1] times, 2 repeats[2] times, etc.
-
- Args:
- repeats (Tensor): The number of repetitions for each element.
-
- Returns:
- Tensor: Repeated tensor of size `sum(repeats)`.
-
- Example::
-
- >>> torch.repeat_interleave(torch.tensor([1, 2, 3]))
- tensor([0, 1, 1, 2, 2, 2])
- """
- ...
- def reshape(input: Tensor, shape: Sequence[Union[_int, SymInt]]) -> Tensor:
- r"""
- reshape(input, shape) -> Tensor
-
- Returns a tensor with the same data and number of elements as :attr:`input`,
- but with the specified shape. When possible, the returned tensor will be a view
- of :attr:`input`. Otherwise, it will be a copy. Contiguous inputs and inputs
- with compatible strides can be reshaped without copying, but you should not
- depend on the copying vs. viewing behavior.
-
- See :meth:`torch.Tensor.view` on when it is possible to return a view.
-
- A single dimension may be -1, in which case it's inferred from the remaining
- dimensions and the number of elements in :attr:`input`.
-
- Args:
- input (Tensor): the tensor to be reshaped
- shape (tuple of int): the new shape
-
- Example::
-
- >>> a = torch.arange(4.)
- >>> torch.reshape(a, (2, 2))
- tensor([[ 0., 1.],
- [ 2., 3.]])
- >>> b = torch.tensor([[0, 1], [2, 3]])
- >>> torch.reshape(b, (-1,))
- tensor([ 0, 1, 2, 3])
- """
- ...
- def resize_as_(input: Tensor, the_template: Tensor, *, memory_format: Optional[memory_format] = None) -> Tensor: ...
- def resize_as_sparse_(input: Tensor, the_template: Tensor) -> Tensor: ...
- def resolve_conj(input: Tensor) -> Tensor:
- r"""
- resolve_conj(input) -> Tensor
-
- Returns a new tensor with materialized conjugation if :attr:`input`'s conjugate bit is set to `True`,
- else returns :attr:`input`. The output tensor will always have its conjugate bit set to `False`.
-
- Args:
- input (Tensor): the input tensor.
-
- Example::
-
- >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])
- >>> y = x.conj()
- >>> y.is_conj()
- True
- >>> z = y.resolve_conj()
- >>> z
- tensor([-1 - 1j, -2 - 2j, 3 + 3j])
- >>> z.is_conj()
- False
- """
- ...
- def resolve_neg(input: Tensor) -> Tensor:
- r"""
- resolve_neg(input) -> Tensor
-
- Returns a new tensor with materialized negation if :attr:`input`'s negative bit is set to `True`,
- else returns :attr:`input`. The output tensor will always have its negative bit set to `False`.
-
- Args:
- input (Tensor): the input tensor.
-
- Example::
-
- >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])
- >>> y = x.conj()
- >>> z = y.imag
- >>> z.is_neg()
- True
- >>> out = z.resolve_neg()
- >>> out
- tensor([-1., -2., 3.])
- >>> out.is_neg()
- False
- """
- ...
- @overload
- def result_type(tensor: Tensor, other: Tensor) -> _dtype:
- r"""
- result_type(tensor1, tensor2) -> dtype
-
- Returns the :class:`torch.dtype` that would result from performing an arithmetic
- operation on the provided input tensors. See type promotion :ref:`documentation <type-promotion-doc>`
- for more information on the type promotion logic.
-
- Args:
- tensor1 (Tensor or Number): an input tensor or number
- tensor2 (Tensor or Number): an input tensor or number
-
- Example::
-
- >>> torch.result_type(torch.tensor([1, 2], dtype=torch.int), 1.0)
- torch.float32
- >>> torch.result_type(torch.tensor([1, 2], dtype=torch.uint8), torch.tensor(1))
- torch.uint8
- """
- ...
- @overload
- def result_type(scalar: Union[Number, _complex], tensor: Tensor) -> _dtype:
- r"""
- result_type(tensor1, tensor2) -> dtype
-
- Returns the :class:`torch.dtype` that would result from performing an arithmetic
- operation on the provided input tensors. See type promotion :ref:`documentation <type-promotion-doc>`
- for more information on the type promotion logic.
-
- Args:
- tensor1 (Tensor or Number): an input tensor or number
- tensor2 (Tensor or Number): an input tensor or number
-
- Example::
-
- >>> torch.result_type(torch.tensor([1, 2], dtype=torch.int), 1.0)
- torch.float32
- >>> torch.result_type(torch.tensor([1, 2], dtype=torch.uint8), torch.tensor(1))
- torch.uint8
- """
- ...
- @overload
- def result_type(tensor: Tensor, other: Union[Number, _complex]) -> _dtype:
- r"""
- result_type(tensor1, tensor2) -> dtype
-
- Returns the :class:`torch.dtype` that would result from performing an arithmetic
- operation on the provided input tensors. See type promotion :ref:`documentation <type-promotion-doc>`
- for more information on the type promotion logic.
-
- Args:
- tensor1 (Tensor or Number): an input tensor or number
- tensor2 (Tensor or Number): an input tensor or number
-
- Example::
-
- >>> torch.result_type(torch.tensor([1, 2], dtype=torch.int), 1.0)
- torch.float32
- >>> torch.result_type(torch.tensor([1, 2], dtype=torch.uint8), torch.tensor(1))
- torch.uint8
- """
- ...
- @overload
- def result_type(scalar1: Union[Number, _complex], scalar2: Union[Number, _complex]) -> _dtype:
- r"""
- result_type(tensor1, tensor2) -> dtype
-
- Returns the :class:`torch.dtype` that would result from performing an arithmetic
- operation on the provided input tensors. See type promotion :ref:`documentation <type-promotion-doc>`
- for more information on the type promotion logic.
-
- Args:
- tensor1 (Tensor or Number): an input tensor or number
- tensor2 (Tensor or Number): an input tensor or number
-
- Example::
-
- >>> torch.result_type(torch.tensor([1, 2], dtype=torch.int), 1.0)
- torch.float32
- >>> torch.result_type(torch.tensor([1, 2], dtype=torch.uint8), torch.tensor(1))
- torch.uint8
- """
- ...
- def rms_norm(input: Tensor, normalized_shape: _size, weight: Optional[Tensor] = None, eps: Optional[_float] = None) -> Tensor: ...
- @overload
- def rnn_relu(data: Tensor, batch_sizes: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor]: ...
- @overload
- def rnn_relu(input: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor]: ...
- def rnn_relu_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor] = None, b_hh: Optional[Tensor] = None) -> Tensor: ...
- @overload
- def rnn_tanh(data: Tensor, batch_sizes: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor]: ...
- @overload
- def rnn_tanh(input: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor]: ...
- def rnn_tanh_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor] = None, b_hh: Optional[Tensor] = None) -> Tensor: ...
- def roll(input: Tensor, shifts: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]], dims: Union[_int, _size] = ()) -> Tensor:
- r"""
- roll(input, shifts, dims=None) -> Tensor
-
- Roll the tensor :attr:`input` along the given dimension(s). Elements that are
- shifted beyond the last position are re-introduced at the first position. If
- :attr:`dims` is `None`, the tensor will be flattened before rolling and then
- restored to the original shape.
-
- Args:
- input (Tensor): the input tensor.
- shifts (int or tuple of ints): The number of places by which the elements
- of the tensor are shifted. If shifts is a tuple, dims must be a tuple of
- the same size, and each dimension will be rolled by the corresponding
- value
- dims (int or tuple of ints): Axis along which to roll
-
- Example::
-
- >>> x = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8]).view(4, 2)
- >>> x
- tensor([[1, 2],
- [3, 4],
- [5, 6],
- [7, 8]])
- >>> torch.roll(x, 1)
- tensor([[8, 1],
- [2, 3],
- [4, 5],
- [6, 7]])
- >>> torch.roll(x, 1, 0)
- tensor([[7, 8],
- [1, 2],
- [3, 4],
- [5, 6]])
- >>> torch.roll(x, -1, 0)
- tensor([[3, 4],
- [5, 6],
- [7, 8],
- [1, 2]])
- >>> torch.roll(x, shifts=(2, 1), dims=(0, 1))
- tensor([[6, 5],
- [8, 7],
- [2, 1],
- [4, 3]])
- """
- ...
- def rot90(input: Tensor, k: _int = 1, dims: _size = (0,1)) -> Tensor:
- r"""
- rot90(input, k=1, dims=[0,1]) -> Tensor
-
- Rotate an n-D tensor by 90 degrees in the plane specified by dims axis.
- Rotation direction is from the first towards the second axis if k > 0, and from the second towards the first for k < 0.
-
- Args:
- input (Tensor): the input tensor.
- k (int): number of times to rotate. Default value is 1
- dims (a list or tuple): axis to rotate. Default value is [0, 1]
-
- Example::
-
- >>> x = torch.arange(4).view(2, 2)
- >>> x
- tensor([[0, 1],
- [2, 3]])
- >>> torch.rot90(x, 1, [0, 1])
- tensor([[1, 3],
- [0, 2]])
-
- >>> x = torch.arange(8).view(2, 2, 2)
- >>> x
- tensor([[[0, 1],
- [2, 3]],
-
- [[4, 5],
- [6, 7]]])
- >>> torch.rot90(x, 1, [1, 2])
- tensor([[[1, 3],
- [0, 2]],
-
- [[5, 7],
- [4, 6]]])
- """
- ...
- @overload
- def round(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- round(input, *, decimals=0, out=None) -> Tensor
-
- Rounds elements of :attr:`input` to the nearest integer.
-
- For integer inputs, follows the array-api convention of returning a
- copy of the input tensor.
- The return type of output is same as that of input's dtype.
-
- .. note::
- This function implements the "round half to even" to
- break ties when a number is equidistant from two
- integers (e.g. `round(2.5)` is 2).
-
- When the :attr:\`decimals\` argument is specified the
- algorithm used is similar to NumPy's `around`. This
- algorithm is fast but inexact and it can easily
- overflow for low precision dtypes.
- Eg. `round(tensor([10000], dtype=torch.float16), decimals=3)` is `inf`.
-
- .. seealso::
- :func:`torch.ceil`, which rounds up.
- :func:`torch.floor`, which rounds down.
- :func:`torch.trunc`, which rounds towards zero.
-
- Args:
- input (Tensor): the input tensor.
- decimals (int): Number of decimal places to round to (default: 0).
- If decimals is negative, it specifies the number of positions
- to the left of the decimal point.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.round(torch.tensor((4.7, -2.3, 9.1, -7.7)))
- tensor([ 5., -2., 9., -8.])
-
- >>> # Values equidistant from two integers are rounded towards the
- >>> # the nearest even value (zero is treated as even)
- >>> torch.round(torch.tensor([-0.5, 0.5, 1.5, 2.5]))
- tensor([-0., 0., 2., 2.])
-
- >>> # A positive decimals argument rounds to the to that decimal place
- >>> torch.round(torch.tensor([0.1234567]), decimals=3)
- tensor([0.1230])
-
- >>> # A negative decimals argument rounds to the left of the decimal
- >>> torch.round(torch.tensor([1200.1234567]), decimals=-3)
- tensor([1000.])
- """
- ...
- @overload
- def round(input: Tensor, *, decimals: _int, out: Optional[Tensor] = None) -> Tensor:
- r"""
- round(input, *, decimals=0, out=None) -> Tensor
-
- Rounds elements of :attr:`input` to the nearest integer.
-
- For integer inputs, follows the array-api convention of returning a
- copy of the input tensor.
- The return type of output is same as that of input's dtype.
-
- .. note::
- This function implements the "round half to even" to
- break ties when a number is equidistant from two
- integers (e.g. `round(2.5)` is 2).
-
- When the :attr:\`decimals\` argument is specified the
- algorithm used is similar to NumPy's `around`. This
- algorithm is fast but inexact and it can easily
- overflow for low precision dtypes.
- Eg. `round(tensor([10000], dtype=torch.float16), decimals=3)` is `inf`.
-
- .. seealso::
- :func:`torch.ceil`, which rounds up.
- :func:`torch.floor`, which rounds down.
- :func:`torch.trunc`, which rounds towards zero.
-
- Args:
- input (Tensor): the input tensor.
- decimals (int): Number of decimal places to round to (default: 0).
- If decimals is negative, it specifies the number of positions
- to the left of the decimal point.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> torch.round(torch.tensor((4.7, -2.3, 9.1, -7.7)))
- tensor([ 5., -2., 9., -8.])
-
- >>> # Values equidistant from two integers are rounded towards the
- >>> # the nearest even value (zero is treated as even)
- >>> torch.round(torch.tensor([-0.5, 0.5, 1.5, 2.5]))
- tensor([-0., 0., 2., 2.])
-
- >>> # A positive decimals argument rounds to the to that decimal place
- >>> torch.round(torch.tensor([0.1234567]), decimals=3)
- tensor([0.1230])
-
- >>> # A negative decimals argument rounds to the left of the decimal
- >>> torch.round(torch.tensor([1200.1234567]), decimals=-3)
- tensor([1000.])
- """
- ...
- @overload
- def round_(input: Tensor) -> Tensor: ...
- @overload
- def round_(input: Tensor, *, decimals: _int) -> Tensor: ...
- def row_indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
- def row_stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- row_stack(tensors, *, out=None) -> Tensor
-
- Alias of :func:`torch.vstack`.
- """
- ...
- def rrelu(input: Tensor, lower: Union[Number, _complex] = 0.125, upper: Union[Number, _complex] = 0.3333333333333333, training: _bool = False, generator: Optional[Generator] = None) -> Tensor: ...
- def rrelu_(input: Tensor, lower: Union[Number, _complex] = 0.125, upper: Union[Number, _complex] = 0.3333333333333333, training: _bool = False, generator: Optional[Generator] = None) -> Tensor: ...
- def rsqrt(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- rsqrt(input, *, out=None) -> Tensor
-
- Returns a new tensor with the reciprocal of the square-root of each of
- the elements of :attr:`input`.
-
- .. math::
- \text{out}_{i} = \frac{1}{\sqrt{\text{input}_{i}}}
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(4)
- >>> a
- tensor([-0.0370, 0.2970, 1.5420, -0.9105])
- >>> torch.rsqrt(a)
- tensor([ nan, 1.8351, 0.8053, nan])
- """
- ...
- def rsqrt_(input: Tensor) -> Tensor: ...
- @overload
- def rsub(input: Tensor, other: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor: ...
- @overload
- def rsub(input: Tensor, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor: ...
- def saddmm(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Number = 1, alpha: Number = 1, out: Optional[Tensor] = None) -> Tensor: ...
- def scalar_tensor(s: Union[Number, _complex], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ...
- @overload
- def scatter(input: Tensor, dim: _int, index: Tensor, src: Tensor, *, reduce: str, out: Optional[Tensor] = None) -> Tensor:
- r"""
- scatter(input, dim, index, src) -> Tensor
-
- Out-of-place version of :meth:`torch.Tensor.scatter_`
- """
- ...
- @overload
- def scatter(input: Tensor, dim: _int, index: Tensor, src: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- scatter(input, dim, index, src) -> Tensor
-
- Out-of-place version of :meth:`torch.Tensor.scatter_`
- """
- ...
- @overload
- def scatter(input: Tensor, dim: _int, index: Tensor, value: Union[Number, _complex], *, reduce: str, out: Optional[Tensor] = None) -> Tensor:
- r"""
- scatter(input, dim, index, src) -> Tensor
-
- Out-of-place version of :meth:`torch.Tensor.scatter_`
- """
- ...
- @overload
- def scatter(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, src: Tensor) -> Tensor:
- r"""
- scatter(input, dim, index, src) -> Tensor
-
- Out-of-place version of :meth:`torch.Tensor.scatter_`
- """
- ...
- @overload
- def scatter(input: Tensor, dim: _int, index: Tensor, value: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- scatter(input, dim, index, src) -> Tensor
-
- Out-of-place version of :meth:`torch.Tensor.scatter_`
- """
- ...
- @overload
- def scatter(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, value: Union[Number, _complex]) -> Tensor:
- r"""
- scatter(input, dim, index, src) -> Tensor
-
- Out-of-place version of :meth:`torch.Tensor.scatter_`
- """
- ...
- @overload
- def scatter_add(input: Tensor, dim: _int, index: Tensor, src: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- scatter_add(input, dim, index, src) -> Tensor
-
- Out-of-place version of :meth:`torch.Tensor.scatter_add_`
- """
- ...
- @overload
- def scatter_add(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, src: Tensor) -> Tensor:
- r"""
- scatter_add(input, dim, index, src) -> Tensor
-
- Out-of-place version of :meth:`torch.Tensor.scatter_add_`
- """
- ...
- def scatter_reduce(input: Tensor, dim: _int, index: Tensor, src: Tensor, reduce: str, *, include_self: _bool = True, out: Optional[Tensor] = None) -> Tensor:
- r"""
- scatter_reduce(input, dim, index, src, reduce, *, include_self=True) -> Tensor
-
- Out-of-place version of :meth:`torch.Tensor.scatter_reduce_`
- """
- ...
- @overload
- def searchsorted(sorted_sequence: Tensor, input: Tensor, *, out_int32: _bool = False, right: _bool = False, side: Optional[str] = None, sorter: Optional[Tensor] = None, out: Optional[Tensor] = None) -> Tensor:
- r"""
- searchsorted(sorted_sequence, values, *, out_int32=False, right=False, side=None, out=None, sorter=None) -> Tensor
-
- Find the indices from the *innermost* dimension of :attr:`sorted_sequence` such that, if the
- corresponding values in :attr:`values` were inserted before the indices, when sorted, the order
- of the corresponding *innermost* dimension within :attr:`sorted_sequence` would be preserved.
- Return a new tensor with the same size as :attr:`values`. More formally,
- the returned index satisfies the following rules:
-
- .. list-table::
- :widths: 12 10 78
- :header-rows: 1
-
- * - :attr:`sorted_sequence`
- - :attr:`right`
- - *returned index satisfies*
- * - 1-D
- - False
- - ``sorted_sequence[i-1] < values[m][n]...[l][x] <= sorted_sequence[i]``
- * - 1-D
- - True
- - ``sorted_sequence[i-1] <= values[m][n]...[l][x] < sorted_sequence[i]``
- * - N-D
- - False
- - ``sorted_sequence[m][n]...[l][i-1] < values[m][n]...[l][x] <= sorted_sequence[m][n]...[l][i]``
- * - N-D
- - True
- - ``sorted_sequence[m][n]...[l][i-1] <= values[m][n]...[l][x] < sorted_sequence[m][n]...[l][i]``
-
- Args:
- sorted_sequence (Tensor): N-D or 1-D tensor, containing monotonically increasing sequence on the *innermost*
- dimension unless :attr:`sorter` is provided, in which case the sequence does not
- need to be sorted
- values (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s).
-
- Keyword args:
- out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise.
- Default value is False, i.e. default output data type is torch.int64.
- right (bool, optional): if False, return the first suitable location that is found. If True, return the
- last such index. If no suitable index found, return 0 for non-numerical value
- (eg. nan, inf) or the size of *innermost* dimension within :attr:`sorted_sequence`
- (one pass the last index of the *innermost* dimension). In other words, if False,
- gets the lower bound index for each value in :attr:`values` on the corresponding
- *innermost* dimension of the :attr:`sorted_sequence`. If True, gets the upper
- bound index instead. Default value is False. :attr:`side` does the same and is
- preferred. It will error if :attr:`side` is set to "left" while this is True.
- side (str, optional): the same as :attr:`right` but preferred. "left" corresponds to False for :attr:`right`
- and "right" corresponds to True for :attr:`right`. It will error if this is set to
- "left" while :attr:`right` is True. Default value is None.
- out (Tensor, optional): the output tensor, must be the same size as :attr:`values` if provided.
- sorter (LongTensor, optional): if provided, a tensor matching the shape of the unsorted
- :attr:`sorted_sequence` containing a sequence of indices that sort it in the
- ascending order on the innermost dimension
-
-
- Example::
-
- >>> sorted_sequence = torch.tensor([[1, 3, 5, 7, 9], [2, 4, 6, 8, 10]])
- >>> sorted_sequence
- tensor([[ 1, 3, 5, 7, 9],
- [ 2, 4, 6, 8, 10]])
- >>> values = torch.tensor([[3, 6, 9], [3, 6, 9]])
- >>> values
- tensor([[3, 6, 9],
- [3, 6, 9]])
- >>> torch.searchsorted(sorted_sequence, values)
- tensor([[1, 3, 4],
- [1, 2, 4]])
- >>> torch.searchsorted(sorted_sequence, values, side='right')
- tensor([[2, 3, 5],
- [1, 3, 4]])
-
- >>> sorted_sequence_1d = torch.tensor([1, 3, 5, 7, 9])
- >>> sorted_sequence_1d
- tensor([1, 3, 5, 7, 9])
- >>> torch.searchsorted(sorted_sequence_1d, values)
- tensor([[1, 3, 4],
- [1, 3, 4]])
- """
- ...
- @overload
- def searchsorted(sorted_sequence: Tensor, self: Union[Number, _complex], *, out_int32: _bool = False, right: _bool = False, side: Optional[str] = None, sorter: Optional[Tensor] = None, out: Optional[Tensor] = None) -> Tensor:
- r"""
- searchsorted(sorted_sequence, values, *, out_int32=False, right=False, side=None, out=None, sorter=None) -> Tensor
-
- Find the indices from the *innermost* dimension of :attr:`sorted_sequence` such that, if the
- corresponding values in :attr:`values` were inserted before the indices, when sorted, the order
- of the corresponding *innermost* dimension within :attr:`sorted_sequence` would be preserved.
- Return a new tensor with the same size as :attr:`values`. More formally,
- the returned index satisfies the following rules:
-
- .. list-table::
- :widths: 12 10 78
- :header-rows: 1
-
- * - :attr:`sorted_sequence`
- - :attr:`right`
- - *returned index satisfies*
- * - 1-D
- - False
- - ``sorted_sequence[i-1] < values[m][n]...[l][x] <= sorted_sequence[i]``
- * - 1-D
- - True
- - ``sorted_sequence[i-1] <= values[m][n]...[l][x] < sorted_sequence[i]``
- * - N-D
- - False
- - ``sorted_sequence[m][n]...[l][i-1] < values[m][n]...[l][x] <= sorted_sequence[m][n]...[l][i]``
- * - N-D
- - True
- - ``sorted_sequence[m][n]...[l][i-1] <= values[m][n]...[l][x] < sorted_sequence[m][n]...[l][i]``
-
- Args:
- sorted_sequence (Tensor): N-D or 1-D tensor, containing monotonically increasing sequence on the *innermost*
- dimension unless :attr:`sorter` is provided, in which case the sequence does not
- need to be sorted
- values (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s).
-
- Keyword args:
- out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise.
- Default value is False, i.e. default output data type is torch.int64.
- right (bool, optional): if False, return the first suitable location that is found. If True, return the
- last such index. If no suitable index found, return 0 for non-numerical value
- (eg. nan, inf) or the size of *innermost* dimension within :attr:`sorted_sequence`
- (one pass the last index of the *innermost* dimension). In other words, if False,
- gets the lower bound index for each value in :attr:`values` on the corresponding
- *innermost* dimension of the :attr:`sorted_sequence`. If True, gets the upper
- bound index instead. Default value is False. :attr:`side` does the same and is
- preferred. It will error if :attr:`side` is set to "left" while this is True.
- side (str, optional): the same as :attr:`right` but preferred. "left" corresponds to False for :attr:`right`
- and "right" corresponds to True for :attr:`right`. It will error if this is set to
- "left" while :attr:`right` is True. Default value is None.
- out (Tensor, optional): the output tensor, must be the same size as :attr:`values` if provided.
- sorter (LongTensor, optional): if provided, a tensor matching the shape of the unsorted
- :attr:`sorted_sequence` containing a sequence of indices that sort it in the
- ascending order on the innermost dimension
-
-
- Example::
-
- >>> sorted_sequence = torch.tensor([[1, 3, 5, 7, 9], [2, 4, 6, 8, 10]])
- >>> sorted_sequence
- tensor([[ 1, 3, 5, 7, 9],
- [ 2, 4, 6, 8, 10]])
- >>> values = torch.tensor([[3, 6, 9], [3, 6, 9]])
- >>> values
- tensor([[3, 6, 9],
- [3, 6, 9]])
- >>> torch.searchsorted(sorted_sequence, values)
- tensor([[1, 3, 4],
- [1, 2, 4]])
- >>> torch.searchsorted(sorted_sequence, values, side='right')
- tensor([[2, 3, 5],
- [1, 3, 4]])
-
- >>> sorted_sequence_1d = torch.tensor([1, 3, 5, 7, 9])
- >>> sorted_sequence_1d
- tensor([1, 3, 5, 7, 9])
- >>> torch.searchsorted(sorted_sequence_1d, values)
- tensor([[1, 3, 4],
- [1, 3, 4]])
- """
- ...
- def segment_reduce(data: Tensor, reduce: str, *, lengths: Optional[Tensor] = None, indices: Optional[Tensor] = None, offsets: Optional[Tensor] = None, axis: _int = 0, unsafe: _bool = False, initial: Optional[Union[Number, _complex]] = None) -> Tensor: ...
- @overload
- def select(input: Tensor, dim: _int, index: Union[_int, SymInt]) -> Tensor:
- r"""
- select(input, dim, index) -> Tensor
-
- Slices the :attr:`input` tensor along the selected dimension at the given index.
- This function returns a view of the original tensor with the given dimension removed.
-
- .. note:: If :attr:`input` is a sparse tensor and returning a view of
- the tensor is not possible, a RuntimeError exception is
- raised. In this is the case, consider using
- :func:`torch.select_copy` function.
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the dimension to slice
- index (int): the index to select with
-
- .. note::
-
- :meth:`select` is equivalent to slicing. For example,
- ``tensor.select(0, index)`` is equivalent to ``tensor[index]`` and
- ``tensor.select(2, index)`` is equivalent to ``tensor[:,:,index]``.
- """
- ...
- @overload
- def select(input: Tensor, dim: Union[str, ellipsis, None], index: _int) -> Tensor:
- r"""
- select(input, dim, index) -> Tensor
-
- Slices the :attr:`input` tensor along the selected dimension at the given index.
- This function returns a view of the original tensor with the given dimension removed.
-
- .. note:: If :attr:`input` is a sparse tensor and returning a view of
- the tensor is not possible, a RuntimeError exception is
- raised. In this is the case, consider using
- :func:`torch.select_copy` function.
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the dimension to slice
- index (int): the index to select with
-
- .. note::
-
- :meth:`select` is equivalent to slicing. For example,
- ``tensor.select(0, index)`` is equivalent to ``tensor[index]`` and
- ``tensor.select(2, index)`` is equivalent to ``tensor[:,:,index]``.
- """
- ...
- def select_copy(input: Tensor, dim: _int, index: Union[_int, SymInt], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- Performs the same operation as :func:`torch.select`, but all output tensors
- are freshly created instead of aliasing the input.
- """
- ...
- def select_scatter(input: Tensor, src: Tensor, dim: _int, index: Union[_int, SymInt]) -> Tensor:
- r"""
- select_scatter(input, src, dim, index) -> Tensor
-
- Embeds the values of the :attr:`src` tensor into :attr:`input` at the given index.
- This function returns a tensor with fresh storage; it does not create a view.
-
-
- Args:
- input (Tensor): the input tensor.
- src (Tensor): The tensor to embed into :attr:`input`
- dim (int): the dimension to insert the slice into.
- index (int): the index to select with
-
- .. note::
-
- :attr:`src` must be of the proper size in order to be embedded
- into :attr:`input`. Specifically, it should have the same shape as
- ``torch.select(input, dim, index)``
-
- Example::
-
- >>> a = torch.zeros(2, 2)
- >>> b = torch.ones(2)
- >>> a.select_scatter(b, 0, 0)
- tensor([[1., 1.],
- [0., 0.]])
- """
- ...
- def selu(input: Tensor) -> Tensor: ...
- def selu_(input: Tensor) -> Tensor: ...
- def set_flush_denormal(mode: _bool) -> _bool:
- r"""
- set_flush_denormal(mode) -> bool
-
- Disables denormal floating numbers on CPU.
-
- Returns ``True`` if your system supports flushing denormal numbers and it
- successfully configures flush denormal mode. :meth:`~torch.set_flush_denormal`
- is supported on x86 architectures supporting SSE3 and AArch64 architecture.
-
- Args:
- mode (bool): Controls whether to enable flush denormal mode or not
-
- Example::
-
- >>> torch.set_flush_denormal(True)
- True
- >>> torch.tensor([1e-323], dtype=torch.float64)
- tensor([ 0.], dtype=torch.float64)
- >>> torch.set_flush_denormal(False)
- True
- >>> torch.tensor([1e-323], dtype=torch.float64)
- tensor(9.88131e-324 *
- [ 1.0000], dtype=torch.float64)
- """
- ...
- def set_num_interop_threads(num: _int) -> None:
- r"""
- set_num_interop_threads(int)
-
- Sets the number of threads used for interop parallelism
- (e.g. in JIT interpreter) on CPU.
-
- .. warning::
- Can only be called once and before any inter-op parallel work
- is started (e.g. JIT execution).
- """
- ...
- def set_num_threads(num: _int) -> None:
- r"""
- set_num_threads(int)
-
- Sets the number of threads used for intraop parallelism on CPU.
-
- .. warning::
- To ensure that the correct number of threads is used, set_num_threads
- must be called before running eager, JIT or autograd code.
- """
- ...
- def sgn(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- sgn(input, *, out=None) -> Tensor
-
- This function is an extension of torch.sign() to complex tensors.
- It computes a new tensor whose elements have
- the same angles as the corresponding elements of :attr:`input` and
- absolute values (i.e. magnitudes) of one for complex tensors and
- is equivalent to torch.sign() for non-complex tensors.
-
- .. math::
- \text{out}_{i} = \begin{cases}
- 0 & |\text{{input}}_i| == 0 \\
- \frac{{\text{{input}}_i}}{|{\text{{input}}_i}|} & \text{otherwise}
- \end{cases}
-
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> t = torch.tensor([3+4j, 7-24j, 0, 1+2j])
- >>> t.sgn()
- tensor([0.6000+0.8000j, 0.2800-0.9600j, 0.0000+0.0000j, 0.4472+0.8944j])
- """
- ...
- def sigmoid(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- sigmoid(input, *, out=None) -> Tensor
-
- Alias for :func:`torch.special.expit`.
- """
- ...
- def sigmoid_(input: Tensor) -> Tensor: ...
- def sign(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- sign(input, *, out=None) -> Tensor
-
- Returns a new tensor with the signs of the elements of :attr:`input`.
-
- .. math::
- \text{out}_{i} = \operatorname{sgn}(\text{input}_{i})
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.tensor([0.7, -1.2, 0., 2.3])
- >>> a
- tensor([ 0.7000, -1.2000, 0.0000, 2.3000])
- >>> torch.sign(a)
- tensor([ 1., -1., 0., 1.])
- """
- ...
- def signbit(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- signbit(input, *, out=None) -> Tensor
-
- Tests if each element of :attr:`input` has its sign bit set or not.
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.tensor([0.7, -1.2, 0., 2.3])
- >>> torch.signbit(a)
- tensor([ False, True, False, False])
- >>> a = torch.tensor([-0.0, 0.0])
- >>> torch.signbit(a)
- tensor([ True, False])
-
- .. note::
- signbit handles signed zeros, so negative zero (-0) returns True.
- """
- ...
- def sin(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- sin(input, *, out=None) -> Tensor
-
- Returns a new tensor with the sine of the elements of :attr:`input`.
-
- .. math::
- \text{out}_{i} = \sin(\text{input}_{i})
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(4)
- >>> a
- tensor([-0.5461, 0.1347, -2.7266, -0.2746])
- >>> torch.sin(a)
- tensor([-0.5194, 0.1343, -0.4032, -0.2711])
- """
- ...
- def sin_(input: Tensor) -> Tensor: ...
- def sinc(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- sinc(input, *, out=None) -> Tensor
-
- Alias for :func:`torch.special.sinc`.
- """
- ...
- def sinc_(input: Tensor) -> Tensor: ...
- def sinh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- sinh(input, *, out=None) -> Tensor
-
- Returns a new tensor with the hyperbolic sine of the elements of
- :attr:`input`.
-
- .. math::
- \text{out}_{i} = \sinh(\text{input}_{i})
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(4)
- >>> a
- tensor([ 0.5380, -0.8632, -0.1265, 0.9399])
- >>> torch.sinh(a)
- tensor([ 0.5644, -0.9744, -0.1268, 1.0845])
-
- .. note::
- When :attr:`input` is on the CPU, the implementation of torch.sinh may use
- the Sleef library, which rounds very large results to infinity or negative
- infinity. See `here <https://sleef.org/purec.xhtml>`_ for details.
- """
- ...
- def sinh_(input: Tensor) -> Tensor: ...
- def slice_copy(input: Tensor, dim: _int = 0, start: Optional[Union[_int, SymInt]] = None, end: Optional[Union[_int, SymInt]] = None, step: Union[_int, SymInt] = 1, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- Performs the same operation as :func:`torch.slice`, but all output tensors
- are freshly created instead of aliasing the input.
- """
- ...
- def slice_inverse(input: Tensor, src: Tensor, dim: _int = 0, start: Optional[Union[_int, SymInt]] = None, end: Optional[Union[_int, SymInt]] = None, step: Union[_int, SymInt] = 1) -> Tensor: ...
- def slice_scatter(input: Tensor, src: Tensor, dim: _int = 0, start: Optional[Union[_int, SymInt]] = None, end: Optional[Union[_int, SymInt]] = None, step: Union[_int, SymInt] = 1, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- slice_scatter(input, src, dim=0, start=None, end=None, step=1) -> Tensor
-
- Embeds the values of the :attr:`src` tensor into :attr:`input` at the given
- dimension.
- This function returns a tensor with fresh storage; it does not create a view.
-
-
- Args:
- input (Tensor): the input tensor.
- src (Tensor): The tensor to embed into :attr:`input`
- dim (int): the dimension to insert the slice into
- start (Optional[int]): the start index of where to insert the slice
- end (Optional[int]): the end index of where to insert the slice
- step (int): the how many elements to skip in
-
- Example::
-
- >>> a = torch.zeros(8, 8)
- >>> b = torch.ones(2, 8)
- >>> a.slice_scatter(b, start=6)
- tensor([[0., 0., 0., 0., 0., 0., 0., 0.],
- [0., 0., 0., 0., 0., 0., 0., 0.],
- [0., 0., 0., 0., 0., 0., 0., 0.],
- [0., 0., 0., 0., 0., 0., 0., 0.],
- [0., 0., 0., 0., 0., 0., 0., 0.],
- [0., 0., 0., 0., 0., 0., 0., 0.],
- [1., 1., 1., 1., 1., 1., 1., 1.],
- [1., 1., 1., 1., 1., 1., 1., 1.]])
-
- >>> b = torch.ones(8, 2)
- >>> a.slice_scatter(b, dim=1, start=2, end=6, step=2)
- tensor([[0., 0., 1., 0., 1., 0., 0., 0.],
- [0., 0., 1., 0., 1., 0., 0., 0.],
- [0., 0., 1., 0., 1., 0., 0., 0.],
- [0., 0., 1., 0., 1., 0., 0., 0.],
- [0., 0., 1., 0., 1., 0., 0., 0.],
- [0., 0., 1., 0., 1., 0., 0., 0.],
- [0., 0., 1., 0., 1., 0., 0., 0.],
- [0., 0., 1., 0., 1., 0., 0., 0.]])
- """
- ...
- def slogdet(input: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.slogdet:
- r"""
- slogdet(input) -> (Tensor, Tensor)
-
- Alias for :func:`torch.linalg.slogdet`
- """
- ...
- def smm(input: Tensor, mat2: Tensor) -> Tensor:
- r"""
- smm(input, mat) -> Tensor
-
- Performs a matrix multiplication of the sparse matrix :attr:`input`
- with the dense matrix :attr:`mat`.
-
- Args:
- input (Tensor): a sparse matrix to be matrix multiplied
- mat (Tensor): a dense matrix to be matrix multiplied
- """
- ...
- @overload
- def softmax(input: Tensor, dim: _int, dtype: Optional[_dtype] = None, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- softmax(input, dim, *, dtype=None) -> Tensor
-
- Alias for :func:`torch.nn.functional.softmax`.
- """
- ...
- @overload
- def softmax(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor:
- r"""
- softmax(input, dim, *, dtype=None) -> Tensor
-
- Alias for :func:`torch.nn.functional.softmax`.
- """
- ...
- @overload
- def sort(input: Tensor, *, stable: Optional[_bool], dim: _int = -1, descending: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.sort:
- r"""
- sort(input, dim=-1, descending=False, stable=False, *, out=None) -> (Tensor, LongTensor)
-
- Sorts the elements of the :attr:`input` tensor along a given dimension
- in ascending order by value.
-
- If :attr:`dim` is not given, the last dimension of the `input` is chosen.
-
- If :attr:`descending` is ``True`` then the elements are sorted in descending
- order by value.
-
- If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving
- the order of equivalent elements.
-
- A namedtuple of (values, indices) is returned, where the `values` are the
- sorted values and `indices` are the indices of the elements in the original
- `input` tensor.
-
- Args:
- input (Tensor): the input tensor.
- dim (int, optional): the dimension to sort along
- descending (bool, optional): controls the sorting order (ascending or descending)
- stable (bool, optional): makes the sorting routine stable, which guarantees that the order
- of equivalent elements is preserved.
-
- Keyword args:
- out (tuple, optional): the output tuple of (`Tensor`, `LongTensor`) that can
- be optionally given to be used as output buffers
-
- Example::
-
- >>> x = torch.randn(3, 4)
- >>> sorted, indices = torch.sort(x)
- >>> sorted
- tensor([[-0.2162, 0.0608, 0.6719, 2.3332],
- [-0.5793, 0.0061, 0.6058, 0.9497],
- [-0.5071, 0.3343, 0.9553, 1.0960]])
- >>> indices
- tensor([[ 1, 0, 2, 3],
- [ 3, 1, 0, 2],
- [ 0, 3, 1, 2]])
-
- >>> sorted, indices = torch.sort(x, 0)
- >>> sorted
- tensor([[-0.5071, -0.2162, 0.6719, -0.5793],
- [ 0.0608, 0.0061, 0.9497, 0.3343],
- [ 0.6058, 0.9553, 1.0960, 2.3332]])
- >>> indices
- tensor([[ 2, 0, 0, 1],
- [ 0, 1, 1, 2],
- [ 1, 2, 2, 0]])
- >>> x = torch.tensor([0, 1] * 9)
- >>> x.sort()
- torch.return_types.sort(
- values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
- indices=tensor([ 2, 16, 4, 6, 14, 8, 0, 10, 12, 9, 17, 15, 13, 11, 7, 5, 3, 1]))
- >>> x.sort(stable=True)
- torch.return_types.sort(
- values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
- indices=tensor([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 1, 3, 5, 7, 9, 11, 13, 15, 17]))
- """
- ...
- @overload
- def sort(input: Tensor, dim: _int = -1, descending: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.sort:
- r"""
- sort(input, dim=-1, descending=False, stable=False, *, out=None) -> (Tensor, LongTensor)
-
- Sorts the elements of the :attr:`input` tensor along a given dimension
- in ascending order by value.
-
- If :attr:`dim` is not given, the last dimension of the `input` is chosen.
-
- If :attr:`descending` is ``True`` then the elements are sorted in descending
- order by value.
-
- If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving
- the order of equivalent elements.
-
- A namedtuple of (values, indices) is returned, where the `values` are the
- sorted values and `indices` are the indices of the elements in the original
- `input` tensor.
-
- Args:
- input (Tensor): the input tensor.
- dim (int, optional): the dimension to sort along
- descending (bool, optional): controls the sorting order (ascending or descending)
- stable (bool, optional): makes the sorting routine stable, which guarantees that the order
- of equivalent elements is preserved.
-
- Keyword args:
- out (tuple, optional): the output tuple of (`Tensor`, `LongTensor`) that can
- be optionally given to be used as output buffers
-
- Example::
-
- >>> x = torch.randn(3, 4)
- >>> sorted, indices = torch.sort(x)
- >>> sorted
- tensor([[-0.2162, 0.0608, 0.6719, 2.3332],
- [-0.5793, 0.0061, 0.6058, 0.9497],
- [-0.5071, 0.3343, 0.9553, 1.0960]])
- >>> indices
- tensor([[ 1, 0, 2, 3],
- [ 3, 1, 0, 2],
- [ 0, 3, 1, 2]])
-
- >>> sorted, indices = torch.sort(x, 0)
- >>> sorted
- tensor([[-0.5071, -0.2162, 0.6719, -0.5793],
- [ 0.0608, 0.0061, 0.9497, 0.3343],
- [ 0.6058, 0.9553, 1.0960, 2.3332]])
- >>> indices
- tensor([[ 2, 0, 0, 1],
- [ 0, 1, 1, 2],
- [ 1, 2, 2, 0]])
- >>> x = torch.tensor([0, 1] * 9)
- >>> x.sort()
- torch.return_types.sort(
- values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
- indices=tensor([ 2, 16, 4, 6, 14, 8, 0, 10, 12, 9, 17, 15, 13, 11, 7, 5, 3, 1]))
- >>> x.sort(stable=True)
- torch.return_types.sort(
- values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
- indices=tensor([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 1, 3, 5, 7, 9, 11, 13, 15, 17]))
- """
- ...
- @overload
- def sort(input: Tensor, *, stable: Optional[_bool], dim: Union[str, ellipsis, None], descending: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.sort:
- r"""
- sort(input, dim=-1, descending=False, stable=False, *, out=None) -> (Tensor, LongTensor)
-
- Sorts the elements of the :attr:`input` tensor along a given dimension
- in ascending order by value.
-
- If :attr:`dim` is not given, the last dimension of the `input` is chosen.
-
- If :attr:`descending` is ``True`` then the elements are sorted in descending
- order by value.
-
- If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving
- the order of equivalent elements.
-
- A namedtuple of (values, indices) is returned, where the `values` are the
- sorted values and `indices` are the indices of the elements in the original
- `input` tensor.
-
- Args:
- input (Tensor): the input tensor.
- dim (int, optional): the dimension to sort along
- descending (bool, optional): controls the sorting order (ascending or descending)
- stable (bool, optional): makes the sorting routine stable, which guarantees that the order
- of equivalent elements is preserved.
-
- Keyword args:
- out (tuple, optional): the output tuple of (`Tensor`, `LongTensor`) that can
- be optionally given to be used as output buffers
-
- Example::
-
- >>> x = torch.randn(3, 4)
- >>> sorted, indices = torch.sort(x)
- >>> sorted
- tensor([[-0.2162, 0.0608, 0.6719, 2.3332],
- [-0.5793, 0.0061, 0.6058, 0.9497],
- [-0.5071, 0.3343, 0.9553, 1.0960]])
- >>> indices
- tensor([[ 1, 0, 2, 3],
- [ 3, 1, 0, 2],
- [ 0, 3, 1, 2]])
-
- >>> sorted, indices = torch.sort(x, 0)
- >>> sorted
- tensor([[-0.5071, -0.2162, 0.6719, -0.5793],
- [ 0.0608, 0.0061, 0.9497, 0.3343],
- [ 0.6058, 0.9553, 1.0960, 2.3332]])
- >>> indices
- tensor([[ 2, 0, 0, 1],
- [ 0, 1, 1, 2],
- [ 1, 2, 2, 0]])
- >>> x = torch.tensor([0, 1] * 9)
- >>> x.sort()
- torch.return_types.sort(
- values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
- indices=tensor([ 2, 16, 4, 6, 14, 8, 0, 10, 12, 9, 17, 15, 13, 11, 7, 5, 3, 1]))
- >>> x.sort(stable=True)
- torch.return_types.sort(
- values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
- indices=tensor([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 1, 3, 5, 7, 9, 11, 13, 15, 17]))
- """
- ...
- @overload
- def sort(input: Tensor, dim: Union[str, ellipsis, None], descending: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.sort:
- r"""
- sort(input, dim=-1, descending=False, stable=False, *, out=None) -> (Tensor, LongTensor)
-
- Sorts the elements of the :attr:`input` tensor along a given dimension
- in ascending order by value.
-
- If :attr:`dim` is not given, the last dimension of the `input` is chosen.
-
- If :attr:`descending` is ``True`` then the elements are sorted in descending
- order by value.
-
- If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving
- the order of equivalent elements.
-
- A namedtuple of (values, indices) is returned, where the `values` are the
- sorted values and `indices` are the indices of the elements in the original
- `input` tensor.
-
- Args:
- input (Tensor): the input tensor.
- dim (int, optional): the dimension to sort along
- descending (bool, optional): controls the sorting order (ascending or descending)
- stable (bool, optional): makes the sorting routine stable, which guarantees that the order
- of equivalent elements is preserved.
-
- Keyword args:
- out (tuple, optional): the output tuple of (`Tensor`, `LongTensor`) that can
- be optionally given to be used as output buffers
-
- Example::
-
- >>> x = torch.randn(3, 4)
- >>> sorted, indices = torch.sort(x)
- >>> sorted
- tensor([[-0.2162, 0.0608, 0.6719, 2.3332],
- [-0.5793, 0.0061, 0.6058, 0.9497],
- [-0.5071, 0.3343, 0.9553, 1.0960]])
- >>> indices
- tensor([[ 1, 0, 2, 3],
- [ 3, 1, 0, 2],
- [ 0, 3, 1, 2]])
-
- >>> sorted, indices = torch.sort(x, 0)
- >>> sorted
- tensor([[-0.5071, -0.2162, 0.6719, -0.5793],
- [ 0.0608, 0.0061, 0.9497, 0.3343],
- [ 0.6058, 0.9553, 1.0960, 2.3332]])
- >>> indices
- tensor([[ 2, 0, 0, 1],
- [ 0, 1, 1, 2],
- [ 1, 2, 2, 0]])
- >>> x = torch.tensor([0, 1] * 9)
- >>> x.sort()
- torch.return_types.sort(
- values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
- indices=tensor([ 2, 16, 4, 6, 14, 8, 0, 10, 12, 9, 17, 15, 13, 11, 7, 5, 3, 1]))
- >>> x.sort(stable=True)
- torch.return_types.sort(
- values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
- indices=tensor([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 1, 3, 5, 7, 9, 11, 13, 15, 17]))
- """
- ...
- def sparse_bsc_tensor(ccol_indices: Union[Tensor, List], row_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None) -> Tensor:
- r"""
- sparse_bsc_tensor(ccol_indices, row_indices, values, size=None, *, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
-
- Constructs a :ref:`sparse tensor in BSC (Block Compressed Sparse
- Column)) <sparse-bsc-docs>` with specified 2-dimensional blocks at the
- given :attr:`ccol_indices` and :attr:`row_indices`. Sparse matrix
- multiplication operations in BSC format are typically faster than that
- for sparse tensors in COO format. Make you have a look at :ref:`the
- note on the data type of the indices <sparse-bsc-docs>`.
-
- .. note::
-
- If the ``device`` argument is not specified the device of the given
- :attr:`values` and indices tensor(s) must match. If, however, the
- argument is specified the input Tensors will be converted to the
- given device and in turn determine the device of the constructed
- sparse tensor.
-
- Args:
- ccol_indices (array_like): (B+1)-dimensional array of size
- ``(*batchsize, ncolblocks + 1)``. The last element of each
- batch is the number of non-zeros. This tensor encodes the
- index in values and row_indices depending on where the given
- column starts. Each successive number in the tensor subtracted
- by the number before it denotes the number of elements in a
- given column.
- row_indices (array_like): Row block co-ordinates of each block in
- values. (B+1)-dimensional tensor with the same length
- as values.
- values (array_list): Initial blocks for the tensor. Can be a list,
- tuple, NumPy ``ndarray``, and other types that
- represents a (1 + 2 + K)-dimensional tensor where ``K`` is the
- number of dense dimensions.
- size (list, tuple, :class:`torch.Size`, optional): Size of the
- sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols *
- blocksize[1], *densesize)`` If not provided, the size will be
- inferred as the minimum size big enough to hold all non-zero
- blocks.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of
- returned tensor. Default: if None, infers data type from
- :attr:`values`.
- device (:class:`torch.device`, optional): the desired device of
- returned tensor. Default: if None, uses the current device
- for the default tensor type (see
- :func:`torch.set_default_device`). :attr:`device` will be
- the CPU for CPU tensor types and the current CUDA device for
- CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- check_invariants (bool, optional): If sparse tensor invariants are checked.
- Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`,
- initially False.
-
- Example::
- >>> ccol_indices = [0, 1, 2]
- >>> row_indices = [0, 1]
- >>> values = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
- >>> torch.sparse_bsc_tensor(torch.tensor(ccol_indices, dtype=torch.int64),
- ... torch.tensor(row_indices, dtype=torch.int64),
- ... torch.tensor(values), dtype=torch.double)
- tensor(ccol_indices=tensor([0, 1, 2]),
- row_indices=tensor([0, 1]),
- values=tensor([[[1., 2.],
- [3., 4.]],
- [[5., 6.],
- [7., 8.]]]), size=(2, 2), nnz=2, dtype=torch.float64,
- layout=torch.sparse_bsc)
- """
- ...
- def sparse_bsr_tensor(crow_indices: Union[Tensor, List], col_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None) -> Tensor:
- r"""
- sparse_bsr_tensor(crow_indices, col_indices, values, size=None, *, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
-
- Constructs a :ref:`sparse tensor in BSR (Block Compressed Sparse Row))
- <sparse-bsr-docs>` with specified 2-dimensional blocks at the given
- :attr:`crow_indices` and :attr:`col_indices`. Sparse matrix
- multiplication operations in BSR format are typically faster than that
- for sparse tensors in COO format. Make you have a look at :ref:`the
- note on the data type of the indices <sparse-bsr-docs>`.
-
- .. note::
-
- If the ``device`` argument is not specified the device of the given
- :attr:`values` and indices tensor(s) must match. If, however, the
- argument is specified the input Tensors will be converted to the
- given device and in turn determine the device of the constructed
- sparse tensor.
-
- Args:
- crow_indices (array_like): (B+1)-dimensional array of size
- ``(*batchsize, nrowblocks + 1)``. The last element of each
- batch is the number of non-zeros. This tensor encodes the
- block index in values and col_indices depending on where the
- given row block starts. Each successive number in the tensor
- subtracted by the number before it denotes the number of
- blocks in a given row.
- col_indices (array_like): Column block co-ordinates of each block
- in values. (B+1)-dimensional tensor with the same length as
- values.
- values (array_list): Initial values for the tensor. Can be a list,
- tuple, NumPy ``ndarray``, scalar, and other types that
- represents a (1 + 2 + K)-dimensional tensor where ``K`` is the
- number of dense dimensions.
- size (list, tuple, :class:`torch.Size`, optional): Size of the
- sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols *
- blocksize[1], *densesize)`` where ``blocksize ==
- values.shape[1:3]``. If not provided, the size will be
- inferred as the minimum size big enough to hold all non-zero
- blocks.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of
- returned tensor. Default: if None, infers data type from
- :attr:`values`.
- device (:class:`torch.device`, optional): the desired device of
- returned tensor. Default: if None, uses the current device
- for the default tensor type (see
- :func:`torch.set_default_device`). :attr:`device` will be
- the CPU for CPU tensor types and the current CUDA device for
- CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- check_invariants (bool, optional): If sparse tensor invariants are checked.
- Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`,
- initially False.
-
- Example::
- >>> crow_indices = [0, 1, 2]
- >>> col_indices = [0, 1]
- >>> values = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
- >>> torch.sparse_bsr_tensor(torch.tensor(crow_indices, dtype=torch.int64),
- ... torch.tensor(col_indices, dtype=torch.int64),
- ... torch.tensor(values), dtype=torch.double)
- tensor(crow_indices=tensor([0, 1, 2]),
- col_indices=tensor([0, 1]),
- values=tensor([[[1., 2.],
- [3., 4.]],
- [[5., 6.],
- [7., 8.]]]), size=(2, 2), nnz=2, dtype=torch.float64,
- layout=torch.sparse_bsr)
- """
- ...
- def sparse_compressed_tensor(compressed_indices: Union[Tensor, List], plain_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None) -> Tensor:
- r"""
- sparse_compressed_tensor(compressed_indices, plain_indices, values, size=None, *, dtype=None, layout=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
-
- Constructs a :ref:`sparse tensor in Compressed Sparse format - CSR,
- CSC, BSR, or BSC - <sparse-compressed-docs>` with specified values at
- the given :attr:`compressed_indices` and :attr:`plain_indices`. Sparse
- matrix multiplication operations in Compressed Sparse format are
- typically faster than that for sparse tensors in COO format. Make you
- have a look at :ref:`the note on the data type of the indices
- <sparse-compressed-docs>`.
-
- .. note::
-
- If the ``device`` argument is not specified the device of the given
- :attr:`values` and indices tensor(s) must match. If, however, the
- argument is specified the input Tensors will be converted to the
- given device and in turn determine the device of the constructed
- sparse tensor.
-
- Args:
- compressed_indices (array_like): (B+1)-dimensional array of size
- ``(*batchsize, compressed_dim_size + 1)``. The last element of
- each batch is the number of non-zero elements or blocks. This
- tensor encodes the index in ``values`` and ``plain_indices``
- depending on where the given compressed dimension (row or
- column) starts. Each successive number in the tensor
- subtracted by the number before it denotes the number of
- elements or blocks in a given compressed dimension.
- plain_indices (array_like): Plain dimension (column or row)
- co-ordinates of each element or block in values. (B+1)-dimensional
- tensor with the same length as values.
-
- values (array_list): Initial values for the tensor. Can be a list,
- tuple, NumPy ``ndarray``, scalar, and other types. that
- represents a (1+K)-dimensional (for CSR and CSC layouts) or
- (1+2+K)-dimensional tensor (for BSR and BSC layouts) where
- ``K`` is the number of dense dimensions.
- size (list, tuple, :class:`torch.Size`, optional): Size of the
- sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols *
- blocksize[1], *densesize)`` where ``blocksize[0] ==
- blocksize[1] == 1`` for CSR and CSC formats. If not provided,
- the size will be inferred as the minimum size big enough to
- hold all non-zero elements or blocks.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of
- returned tensor. Default: if None, infers data type from
- :attr:`values`.
- layout (:class:`torch.layout`, required): the desired layout of
- returned tensor: :attr:`torch.sparse_csr`,
- :attr:`torch.sparse_csc`, :attr:`torch.sparse_bsr`, or
- :attr:`torch.sparse_bsc`.
- device (:class:`torch.device`, optional): the desired device of
- returned tensor. Default: if None, uses the current device
- for the default tensor type (see
- :func:`torch.set_default_device`). :attr:`device` will be
- the CPU for CPU tensor types and the current CUDA device for
- CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- check_invariants (bool, optional): If sparse tensor invariants are checked.
- Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`,
- initially False.
-
- Example::
- >>> compressed_indices = [0, 2, 4]
- >>> plain_indices = [0, 1, 0, 1]
- >>> values = [1, 2, 3, 4]
- >>> torch.sparse_compressed_tensor(torch.tensor(compressed_indices, dtype=torch.int64),
- ... torch.tensor(plain_indices, dtype=torch.int64),
- ... torch.tensor(values), dtype=torch.double, layout=torch.sparse_csr)
- tensor(crow_indices=tensor([0, 2, 4]),
- col_indices=tensor([0, 1, 0, 1]),
- values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4,
- dtype=torch.float64, layout=torch.sparse_csr)
- """
- ...
- def sparse_coo_tensor(indices: Tensor, values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None, is_coalesced: Optional[_bool] = None) -> Tensor:
- r"""
- sparse_coo_tensor(indices, values, size=None, *, dtype=None, device=None, requires_grad=False, check_invariants=None, is_coalesced=None) -> Tensor
-
- Constructs a :ref:`sparse tensor in COO(rdinate) format
- <sparse-coo-docs>` with specified values at the given
- :attr:`indices`.
-
- .. note::
-
- This function returns an :ref:`uncoalesced tensor
- <sparse-uncoalesced-coo-docs>` when :attr:`is_coalesced` is
- unspecified or ``None``.
-
- .. note::
-
- If the ``device`` argument is not specified the device of the given
- :attr:`values` and indices tensor(s) must match. If, however, the
- argument is specified the input Tensors will be converted to the
- given device and in turn determine the device of the constructed
- sparse tensor.
-
- Args:
- indices (array_like): Initial data for the tensor. Can be a list, tuple,
- NumPy ``ndarray``, scalar, and other types. Will be cast to a :class:`torch.LongTensor`
- internally. The indices are the coordinates of the non-zero values in the matrix, and thus
- should be two-dimensional where the first dimension is the number of tensor dimensions and
- the second dimension is the number of non-zero values.
- values (array_like): Initial values for the tensor. Can be a list, tuple,
- NumPy ``ndarray``, scalar, and other types.
- size (list, tuple, or :class:`torch.Size`, optional): Size of the sparse tensor. If not
- provided the size will be inferred as the minimum size big enough to hold all non-zero
- elements.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if None, infers data type from :attr:`values`.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if None, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- check_invariants (bool, optional): If sparse tensor invariants are checked.
- Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`,
- initially False.
- is_coalesced (bool, optional): When``True``, the caller is
- responsible for providing tensor indices that correspond to a
- coalesced tensor. If the :attr:`check_invariants` flag is
- False, no error will be raised if the prerequisites are not
- met and this will lead to silently incorrect results. To force
- coalescion please use :meth:`coalesce` on the resulting
- Tensor.
- Default: None: except for trivial cases (e.g. nnz < 2) the
- resulting Tensor has is_coalesced set to ``False```.
-
- Example::
-
- >>> i = torch.tensor([[0, 1, 1],
- ... [2, 0, 2]])
- >>> v = torch.tensor([3, 4, 5], dtype=torch.float32)
- >>> torch.sparse_coo_tensor(i, v, [2, 4])
- tensor(indices=tensor([[0, 1, 1],
- [2, 0, 2]]),
- values=tensor([3., 4., 5.]),
- size=(2, 4), nnz=3, layout=torch.sparse_coo)
-
- >>> torch.sparse_coo_tensor(i, v) # Shape inference
- tensor(indices=tensor([[0, 1, 1],
- [2, 0, 2]]),
- values=tensor([3., 4., 5.]),
- size=(2, 3), nnz=3, layout=torch.sparse_coo)
-
- >>> torch.sparse_coo_tensor(i, v, [2, 4],
- ... dtype=torch.float64,
- ... device=torch.device('cuda:0'))
- tensor(indices=tensor([[0, 1, 1],
- [2, 0, 2]]),
- values=tensor([3., 4., 5.]),
- device='cuda:0', size=(2, 4), nnz=3, dtype=torch.float64,
- layout=torch.sparse_coo)
-
- # Create an empty sparse tensor with the following invariants:
- # 1. sparse_dim + dense_dim = len(SparseTensor.shape)
- # 2. SparseTensor._indices().shape = (sparse_dim, nnz)
- # 3. SparseTensor._values().shape = (nnz, SparseTensor.shape[sparse_dim:])
- #
- # For instance, to create an empty sparse tensor with nnz = 0, dense_dim = 0 and
- # sparse_dim = 1 (hence indices is a 2D tensor of shape = (1, 0))
- >>> S = torch.sparse_coo_tensor(torch.empty([1, 0]), [], [1])
- tensor(indices=tensor([], size=(1, 0)),
- values=tensor([], size=(0,)),
- size=(1,), nnz=0, layout=torch.sparse_coo)
-
- # and to create an empty sparse tensor with nnz = 0, dense_dim = 1 and
- # sparse_dim = 1
- >>> S = torch.sparse_coo_tensor(torch.empty([1, 0]), torch.empty([0, 2]), [1, 2])
- tensor(indices=tensor([], size=(1, 0)),
- values=tensor([], size=(0, 2)),
- size=(1, 2), nnz=0, layout=torch.sparse_coo)
-
- .. _torch.sparse: https://pytorch.org/docs/stable/sparse.html
- """
- ...
- def sparse_csc_tensor(ccol_indices: Union[Tensor, List], row_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None) -> Tensor:
- r"""
- sparse_csc_tensor(ccol_indices, row_indices, values, size=None, *, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
-
- Constructs a :ref:`sparse tensor in CSC (Compressed Sparse Column)
- <sparse-csc-docs>` with specified values at the given
- :attr:`ccol_indices` and :attr:`row_indices`. Sparse matrix
- multiplication operations in CSC format are typically faster than that
- for sparse tensors in COO format. Make you have a look at :ref:`the
- note on the data type of the indices <sparse-csc-docs>`.
-
- .. note::
-
- If the ``device`` argument is not specified the device of the given
- :attr:`values` and indices tensor(s) must match. If, however, the
- argument is specified the input Tensors will be converted to the
- given device and in turn determine the device of the constructed
- sparse tensor.
-
- Args:
- ccol_indices (array_like): (B+1)-dimensional array of size
- ``(*batchsize, ncols + 1)``. The last element of each batch
- is the number of non-zeros. This tensor encodes the index in
- values and row_indices depending on where the given column
- starts. Each successive number in the tensor subtracted by the
- number before it denotes the number of elements in a given
- column.
- row_indices (array_like): Row co-ordinates of each element in
- values. (B+1)-dimensional tensor with the same length as
- values.
- values (array_list): Initial values for the tensor. Can be a list,
- tuple, NumPy ``ndarray``, scalar, and other types that
- represents a (1+K)-dimensional tensor where ``K`` is the number
- of dense dimensions.
- size (list, tuple, :class:`torch.Size`, optional): Size of the
- sparse tensor: ``(*batchsize, nrows, ncols, *densesize)``. If
- not provided, the size will be inferred as the minimum size
- big enough to hold all non-zero elements.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of
- returned tensor. Default: if None, infers data type from
- :attr:`values`.
- device (:class:`torch.device`, optional): the desired device of
- returned tensor. Default: if None, uses the current device
- for the default tensor type (see
- :func:`torch.set_default_device`). :attr:`device` will be
- the CPU for CPU tensor types and the current CUDA device for
- CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- check_invariants (bool, optional): If sparse tensor invariants are checked.
- Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`,
- initially False.
-
- Example::
- >>> ccol_indices = [0, 2, 4]
- >>> row_indices = [0, 1, 0, 1]
- >>> values = [1, 2, 3, 4]
- >>> torch.sparse_csc_tensor(torch.tensor(ccol_indices, dtype=torch.int64),
- ... torch.tensor(row_indices, dtype=torch.int64),
- ... torch.tensor(values), dtype=torch.double)
- tensor(ccol_indices=tensor([0, 2, 4]),
- row_indices=tensor([0, 1, 0, 1]),
- values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4,
- dtype=torch.float64, layout=torch.sparse_csc)
- """
- ...
- def sparse_csr_tensor(crow_indices: Union[Tensor, List], col_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None) -> Tensor:
- r"""
- sparse_csr_tensor(crow_indices, col_indices, values, size=None, *, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor
-
- Constructs a :ref:`sparse tensor in CSR (Compressed Sparse Row) <sparse-csr-docs>` with specified
- values at the given :attr:`crow_indices` and :attr:`col_indices`. Sparse matrix multiplication operations
- in CSR format are typically faster than that for sparse tensors in COO format. Make you have a look
- at :ref:`the note on the data type of the indices <sparse-csr-docs>`.
-
- .. note::
-
- If the ``device`` argument is not specified the device of the given
- :attr:`values` and indices tensor(s) must match. If, however, the
- argument is specified the input Tensors will be converted to the
- given device and in turn determine the device of the constructed
- sparse tensor.
-
- Args:
- crow_indices (array_like): (B+1)-dimensional array of size
- ``(*batchsize, nrows + 1)``. The last element of each batch
- is the number of non-zeros. This tensor encodes the index in
- values and col_indices depending on where the given row
- starts. Each successive number in the tensor subtracted by the
- number before it denotes the number of elements in a given
- row.
- col_indices (array_like): Column co-ordinates of each element in
- values. (B+1)-dimensional tensor with the same length
- as values.
- values (array_list): Initial values for the tensor. Can be a list,
- tuple, NumPy ``ndarray``, scalar, and other types that
- represents a (1+K)-dimensional tensor where ``K`` is the number
- of dense dimensions.
- size (list, tuple, :class:`torch.Size`, optional): Size of the
- sparse tensor: ``(*batchsize, nrows, ncols, *densesize)``. If
- not provided, the size will be inferred as the minimum size
- big enough to hold all non-zero elements.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of
- returned tensor. Default: if None, infers data type from
- :attr:`values`.
- device (:class:`torch.device`, optional): the desired device of
- returned tensor. Default: if None, uses the current device
- for the default tensor type (see
- :func:`torch.set_default_device`). :attr:`device` will be
- the CPU for CPU tensor types and the current CUDA device for
- CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- check_invariants (bool, optional): If sparse tensor invariants are checked.
- Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`,
- initially False.
-
- Example::
- >>> crow_indices = [0, 2, 4]
- >>> col_indices = [0, 1, 0, 1]
- >>> values = [1, 2, 3, 4]
- >>> torch.sparse_csr_tensor(torch.tensor(crow_indices, dtype=torch.int64),
- ... torch.tensor(col_indices, dtype=torch.int64),
- ... torch.tensor(values), dtype=torch.double)
- tensor(crow_indices=tensor([0, 2, 4]),
- col_indices=tensor([0, 1, 0, 1]),
- values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4,
- dtype=torch.float64, layout=torch.sparse_csr)
- """
- ...
- def split_copy(input: Tensor, split_size: Union[_int, SymInt], dim: _int = 0, *, out: Union[Tuple[Tensor, ...], List[Tensor], None] = None) -> None:
- r"""
- Performs the same operation as :func:`torch.split`, but all output tensors
- are freshly created instead of aliasing the input.
- """
- ...
- def split_with_sizes(input: Tensor, split_sizes: Sequence[Union[_int, SymInt]], dim: _int = 0) -> Tuple[Tensor, ...]: ...
- def split_with_sizes_copy(input: Tensor, split_sizes: Sequence[Union[_int, SymInt]], dim: _int = 0, *, out: Union[Tuple[Tensor, ...], List[Tensor], None] = None) -> None:
- r"""
- Performs the same operation as :func:`torch.split_with_sizes`, but all output tensors
- are freshly created instead of aliasing the input.
- """
- ...
- def spmm(input: Tensor, mat2: Tensor) -> Tensor: ...
- def sqrt(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- sqrt(input, *, out=None) -> Tensor
-
- Returns a new tensor with the square-root of the elements of :attr:`input`.
-
- .. math::
- \text{out}_{i} = \sqrt{\text{input}_{i}}
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(4)
- >>> a
- tensor([-2.0755, 1.0226, 0.0831, 0.4806])
- >>> torch.sqrt(a)
- tensor([ nan, 1.0112, 0.2883, 0.6933])
- """
- ...
- def sqrt_(input: Tensor) -> Tensor: ...
- def square(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- square(input, *, out=None) -> Tensor
-
- Returns a new tensor with the square of the elements of :attr:`input`.
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(4)
- >>> a
- tensor([-2.0755, 1.0226, 0.0831, 0.4806])
- >>> torch.square(a)
- tensor([ 4.3077, 1.0457, 0.0069, 0.2310])
- """
- ...
- def square_(input: Tensor) -> Tensor: ...
- @overload
- def squeeze(input: Tensor) -> Tensor:
- r"""
- squeeze(input, dim=None) -> Tensor
-
- Returns a tensor with all specified dimensions of :attr:`input` of size `1` removed.
-
- For example, if `input` is of shape:
- :math:`(A \times 1 \times B \times C \times 1 \times D)` then the `input.squeeze()`
- will be of shape: :math:`(A \times B \times C \times D)`.
-
- When :attr:`dim` is given, a squeeze operation is done only in the given
- dimension(s). If `input` is of shape: :math:`(A \times 1 \times B)`,
- ``squeeze(input, 0)`` leaves the tensor unchanged, but ``squeeze(input, 1)``
- will squeeze the tensor to the shape :math:`(A \times B)`.
-
- .. note:: The returned tensor shares the storage with the input tensor,
- so changing the contents of one will change the contents of the other.
-
- .. warning:: If the tensor has a batch dimension of size 1, then `squeeze(input)`
- will also remove the batch dimension, which can lead to unexpected
- errors. Consider specifying only the dims you wish to be squeezed.
-
- Args:
- input (Tensor): the input tensor.
- dim (int or tuple of ints, optional): if given, the input will be squeezed
- only in the specified dimensions.
-
- .. versionchanged:: 2.0
- :attr:`dim` now accepts tuples of dimensions.
-
- Example::
-
- >>> x = torch.zeros(2, 1, 2, 1, 2)
- >>> x.size()
- torch.Size([2, 1, 2, 1, 2])
- >>> y = torch.squeeze(x)
- >>> y.size()
- torch.Size([2, 2, 2])
- >>> y = torch.squeeze(x, 0)
- >>> y.size()
- torch.Size([2, 1, 2, 1, 2])
- >>> y = torch.squeeze(x, 1)
- >>> y.size()
- torch.Size([2, 2, 1, 2])
- >>> y = torch.squeeze(x, (1, 2, 3))
- torch.Size([2, 2, 2])
- """
- ...
- @overload
- def squeeze(input: Tensor, dim: _int) -> Tensor:
- r"""
- squeeze(input, dim=None) -> Tensor
-
- Returns a tensor with all specified dimensions of :attr:`input` of size `1` removed.
-
- For example, if `input` is of shape:
- :math:`(A \times 1 \times B \times C \times 1 \times D)` then the `input.squeeze()`
- will be of shape: :math:`(A \times B \times C \times D)`.
-
- When :attr:`dim` is given, a squeeze operation is done only in the given
- dimension(s). If `input` is of shape: :math:`(A \times 1 \times B)`,
- ``squeeze(input, 0)`` leaves the tensor unchanged, but ``squeeze(input, 1)``
- will squeeze the tensor to the shape :math:`(A \times B)`.
-
- .. note:: The returned tensor shares the storage with the input tensor,
- so changing the contents of one will change the contents of the other.
-
- .. warning:: If the tensor has a batch dimension of size 1, then `squeeze(input)`
- will also remove the batch dimension, which can lead to unexpected
- errors. Consider specifying only the dims you wish to be squeezed.
-
- Args:
- input (Tensor): the input tensor.
- dim (int or tuple of ints, optional): if given, the input will be squeezed
- only in the specified dimensions.
-
- .. versionchanged:: 2.0
- :attr:`dim` now accepts tuples of dimensions.
-
- Example::
-
- >>> x = torch.zeros(2, 1, 2, 1, 2)
- >>> x.size()
- torch.Size([2, 1, 2, 1, 2])
- >>> y = torch.squeeze(x)
- >>> y.size()
- torch.Size([2, 2, 2])
- >>> y = torch.squeeze(x, 0)
- >>> y.size()
- torch.Size([2, 1, 2, 1, 2])
- >>> y = torch.squeeze(x, 1)
- >>> y.size()
- torch.Size([2, 2, 1, 2])
- >>> y = torch.squeeze(x, (1, 2, 3))
- torch.Size([2, 2, 2])
- """
- ...
- @overload
- def squeeze(input: Tensor, dim: _size) -> Tensor:
- r"""
- squeeze(input, dim=None) -> Tensor
-
- Returns a tensor with all specified dimensions of :attr:`input` of size `1` removed.
-
- For example, if `input` is of shape:
- :math:`(A \times 1 \times B \times C \times 1 \times D)` then the `input.squeeze()`
- will be of shape: :math:`(A \times B \times C \times D)`.
-
- When :attr:`dim` is given, a squeeze operation is done only in the given
- dimension(s). If `input` is of shape: :math:`(A \times 1 \times B)`,
- ``squeeze(input, 0)`` leaves the tensor unchanged, but ``squeeze(input, 1)``
- will squeeze the tensor to the shape :math:`(A \times B)`.
-
- .. note:: The returned tensor shares the storage with the input tensor,
- so changing the contents of one will change the contents of the other.
-
- .. warning:: If the tensor has a batch dimension of size 1, then `squeeze(input)`
- will also remove the batch dimension, which can lead to unexpected
- errors. Consider specifying only the dims you wish to be squeezed.
-
- Args:
- input (Tensor): the input tensor.
- dim (int or tuple of ints, optional): if given, the input will be squeezed
- only in the specified dimensions.
-
- .. versionchanged:: 2.0
- :attr:`dim` now accepts tuples of dimensions.
-
- Example::
-
- >>> x = torch.zeros(2, 1, 2, 1, 2)
- >>> x.size()
- torch.Size([2, 1, 2, 1, 2])
- >>> y = torch.squeeze(x)
- >>> y.size()
- torch.Size([2, 2, 2])
- >>> y = torch.squeeze(x, 0)
- >>> y.size()
- torch.Size([2, 1, 2, 1, 2])
- >>> y = torch.squeeze(x, 1)
- >>> y.size()
- torch.Size([2, 2, 1, 2])
- >>> y = torch.squeeze(x, (1, 2, 3))
- torch.Size([2, 2, 2])
- """
- ...
- @overload
- def squeeze(input: Tensor, dim: Union[str, ellipsis, None]) -> Tensor:
- r"""
- squeeze(input, dim=None) -> Tensor
-
- Returns a tensor with all specified dimensions of :attr:`input` of size `1` removed.
-
- For example, if `input` is of shape:
- :math:`(A \times 1 \times B \times C \times 1 \times D)` then the `input.squeeze()`
- will be of shape: :math:`(A \times B \times C \times D)`.
-
- When :attr:`dim` is given, a squeeze operation is done only in the given
- dimension(s). If `input` is of shape: :math:`(A \times 1 \times B)`,
- ``squeeze(input, 0)`` leaves the tensor unchanged, but ``squeeze(input, 1)``
- will squeeze the tensor to the shape :math:`(A \times B)`.
-
- .. note:: The returned tensor shares the storage with the input tensor,
- so changing the contents of one will change the contents of the other.
-
- .. warning:: If the tensor has a batch dimension of size 1, then `squeeze(input)`
- will also remove the batch dimension, which can lead to unexpected
- errors. Consider specifying only the dims you wish to be squeezed.
-
- Args:
- input (Tensor): the input tensor.
- dim (int or tuple of ints, optional): if given, the input will be squeezed
- only in the specified dimensions.
-
- .. versionchanged:: 2.0
- :attr:`dim` now accepts tuples of dimensions.
-
- Example::
-
- >>> x = torch.zeros(2, 1, 2, 1, 2)
- >>> x.size()
- torch.Size([2, 1, 2, 1, 2])
- >>> y = torch.squeeze(x)
- >>> y.size()
- torch.Size([2, 2, 2])
- >>> y = torch.squeeze(x, 0)
- >>> y.size()
- torch.Size([2, 1, 2, 1, 2])
- >>> y = torch.squeeze(x, 1)
- >>> y.size()
- torch.Size([2, 2, 1, 2])
- >>> y = torch.squeeze(x, (1, 2, 3))
- torch.Size([2, 2, 2])
- """
- ...
- @overload
- def squeeze_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- Performs the same operation as :func:`torch.squeeze`, but all output tensors
- are freshly created instead of aliasing the input.
- """
- ...
- @overload
- def squeeze_copy(input: Tensor, dim: _int, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- Performs the same operation as :func:`torch.squeeze`, but all output tensors
- are freshly created instead of aliasing the input.
- """
- ...
- @overload
- def squeeze_copy(input: Tensor, dim: _size, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- Performs the same operation as :func:`torch.squeeze`, but all output tensors
- are freshly created instead of aliasing the input.
- """
- ...
- @overload
- def sspaddmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat1: Tensor, mat2: Tensor) -> Tensor:
- r"""
- sspaddmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
-
- Matrix multiplies a sparse tensor :attr:`mat1` with a dense tensor
- :attr:`mat2`, then adds the sparse tensor :attr:`input` to the result.
-
- Note: This function is equivalent to :func:`torch.addmm`, except
- :attr:`input` and :attr:`mat1` are sparse.
-
- Args:
- input (Tensor): a sparse matrix to be added
- mat1 (Tensor): a sparse matrix to be matrix multiplied
- mat2 (Tensor): a dense matrix to be matrix multiplied
-
- Keyword args:
- beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`)
- alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
- out (Tensor, optional): the output tensor.
- """
- ...
- @overload
- def sspaddmm(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor:
- r"""
- sspaddmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
-
- Matrix multiplies a sparse tensor :attr:`mat1` with a dense tensor
- :attr:`mat2`, then adds the sparse tensor :attr:`input` to the result.
-
- Note: This function is equivalent to :func:`torch.addmm`, except
- :attr:`input` and :attr:`mat1` are sparse.
-
- Args:
- input (Tensor): a sparse matrix to be added
- mat1 (Tensor): a sparse matrix to be matrix multiplied
- mat2 (Tensor): a dense matrix to be matrix multiplied
-
- Keyword args:
- beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`)
- alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
- out (Tensor, optional): the output tensor.
- """
- ...
- @overload
- def sspaddmm(beta: Union[Number, _complex], self: Tensor, mat1: Tensor, mat2: Tensor) -> Tensor:
- r"""
- sspaddmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
-
- Matrix multiplies a sparse tensor :attr:`mat1` with a dense tensor
- :attr:`mat2`, then adds the sparse tensor :attr:`input` to the result.
-
- Note: This function is equivalent to :func:`torch.addmm`, except
- :attr:`input` and :attr:`mat1` are sparse.
-
- Args:
- input (Tensor): a sparse matrix to be added
- mat1 (Tensor): a sparse matrix to be matrix multiplied
- mat2 (Tensor): a dense matrix to be matrix multiplied
-
- Keyword args:
- beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`)
- alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
- out (Tensor, optional): the output tensor.
- """
- ...
- def stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int = 0, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- stack(tensors, dim=0, *, out=None) -> Tensor
-
- Concatenates a sequence of tensors along a new dimension.
-
- All tensors need to be of the same size.
-
- .. seealso::
-
- :func:`torch.cat` concatenates the given sequence along an existing dimension.
-
- Arguments:
- tensors (sequence of Tensors): sequence of tensors to concatenate
- dim (int, optional): dimension to insert. Has to be between 0 and the number
- of dimensions of concatenated tensors (inclusive). Default: 0
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> x = torch.randn(2, 3)
- >>> x
- tensor([[ 0.3367, 0.1288, 0.2345],
- [ 0.2303, -1.1229, -0.1863]])
- >>> x = torch.stack((x, x)) # same as torch.stack((x, x), dim=0)
- >>> x
- tensor([[[ 0.3367, 0.1288, 0.2345],
- [ 0.2303, -1.1229, -0.1863]],
-
- [[ 0.3367, 0.1288, 0.2345],
- [ 0.2303, -1.1229, -0.1863]]])
- >>> x.size()
- torch.Size([2, 2, 3])
- >>> x = torch.stack((x, x), dim=1)
- tensor([[[ 0.3367, 0.1288, 0.2345],
- [ 0.3367, 0.1288, 0.2345]],
-
- [[ 0.2303, -1.1229, -0.1863],
- [ 0.2303, -1.1229, -0.1863]]])
- >>> x = torch.stack((x, x), dim=2)
- tensor([[[ 0.3367, 0.3367],
- [ 0.1288, 0.1288],
- [ 0.2345, 0.2345]],
-
- [[ 0.2303, 0.2303],
- [-1.1229, -1.1229],
- [-0.1863, -0.1863]]])
- >>> x = torch.stack((x, x), dim=-1)
- tensor([[[ 0.3367, 0.3367],
- [ 0.1288, 0.1288],
- [ 0.2345, 0.2345]],
-
- [[ 0.2303, 0.2303],
- [-1.1229, -1.1229],
- [-0.1863, -0.1863]]])
- """
- ...
- @overload
- def std(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- std(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
-
- Calculates the standard deviation over the dimensions specified by :attr:`dim`.
- :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
- reduce over all dimensions.
-
- The standard deviation (:math:`\sigma`) is calculated as
-
- .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
-
- where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
- sample mean, :math:`N` is the number of samples and :math:`\delta N` is
- the :attr:`correction`.
-
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
- dim (int or tuple of ints): the dimension or dimensions to reduce.
-
- Keyword args:
- correction (int): difference between the sample size and sample degrees of freedom.
- Defaults to `Bessel's correction`_, ``correction=1``.
-
- .. versionchanged:: 2.0
- Previously this argument was called ``unbiased`` and was a boolean
- with ``True`` corresponding to ``correction=1`` and ``False`` being
- ``correction=0``.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
- out (Tensor, optional): the output tensor.
-
- Example:
-
- >>> a = torch.tensor(
- ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
- ... [ 1.5027, -0.3270, 0.5905, 0.6538],
- ... [-1.5745, 1.3330, -0.5596, -0.6548],
- ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
- >>> torch.std(a, dim=1, keepdim=True)
- tensor([[1.0311],
- [0.7477],
- [1.2204],
- [0.9087]])
-
- .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
- """
- ...
- @overload
- def std(input: Tensor, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False, out: Optional[Tensor] = None) -> Tensor:
- r"""
- std(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
-
- Calculates the standard deviation over the dimensions specified by :attr:`dim`.
- :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
- reduce over all dimensions.
-
- The standard deviation (:math:`\sigma`) is calculated as
-
- .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
-
- where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
- sample mean, :math:`N` is the number of samples and :math:`\delta N` is
- the :attr:`correction`.
-
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
- dim (int or tuple of ints): the dimension or dimensions to reduce.
-
- Keyword args:
- correction (int): difference between the sample size and sample degrees of freedom.
- Defaults to `Bessel's correction`_, ``correction=1``.
-
- .. versionchanged:: 2.0
- Previously this argument was called ``unbiased`` and was a boolean
- with ``True`` corresponding to ``correction=1`` and ``False`` being
- ``correction=0``.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
- out (Tensor, optional): the output tensor.
-
- Example:
-
- >>> a = torch.tensor(
- ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
- ... [ 1.5027, -0.3270, 0.5905, 0.6538],
- ... [-1.5745, 1.3330, -0.5596, -0.6548],
- ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
- >>> torch.std(a, dim=1, keepdim=True)
- tensor([[1.0311],
- [0.7477],
- [1.2204],
- [0.9087]])
-
- .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
- """
- ...
- @overload
- def std(input: Tensor, unbiased: _bool = True) -> Tensor:
- r"""
- std(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
-
- Calculates the standard deviation over the dimensions specified by :attr:`dim`.
- :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
- reduce over all dimensions.
-
- The standard deviation (:math:`\sigma`) is calculated as
-
- .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
-
- where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
- sample mean, :math:`N` is the number of samples and :math:`\delta N` is
- the :attr:`correction`.
-
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
- dim (int or tuple of ints): the dimension or dimensions to reduce.
-
- Keyword args:
- correction (int): difference between the sample size and sample degrees of freedom.
- Defaults to `Bessel's correction`_, ``correction=1``.
-
- .. versionchanged:: 2.0
- Previously this argument was called ``unbiased`` and was a boolean
- with ``True`` corresponding to ``correction=1`` and ``False`` being
- ``correction=0``.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
- out (Tensor, optional): the output tensor.
-
- Example:
-
- >>> a = torch.tensor(
- ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
- ... [ 1.5027, -0.3270, 0.5905, 0.6538],
- ... [-1.5745, 1.3330, -0.5596, -0.6548],
- ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
- >>> torch.std(a, dim=1, keepdim=True)
- tensor([[1.0311],
- [0.7477],
- [1.2204],
- [0.9087]])
-
- .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
- """
- ...
- @overload
- def std(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False, out: Optional[Tensor] = None) -> Tensor:
- r"""
- std(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
-
- Calculates the standard deviation over the dimensions specified by :attr:`dim`.
- :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
- reduce over all dimensions.
-
- The standard deviation (:math:`\sigma`) is calculated as
-
- .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
-
- where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
- sample mean, :math:`N` is the number of samples and :math:`\delta N` is
- the :attr:`correction`.
-
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
- dim (int or tuple of ints): the dimension or dimensions to reduce.
-
- Keyword args:
- correction (int): difference between the sample size and sample degrees of freedom.
- Defaults to `Bessel's correction`_, ``correction=1``.
-
- .. versionchanged:: 2.0
- Previously this argument was called ``unbiased`` and was a boolean
- with ``True`` corresponding to ``correction=1`` and ``False`` being
- ``correction=0``.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
- out (Tensor, optional): the output tensor.
-
- Example:
-
- >>> a = torch.tensor(
- ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
- ... [ 1.5027, -0.3270, 0.5905, 0.6538],
- ... [-1.5745, 1.3330, -0.5596, -0.6548],
- ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
- >>> torch.std(a, dim=1, keepdim=True)
- tensor([[1.0311],
- [0.7477],
- [1.2204],
- [0.9087]])
-
- .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
- """
- ...
- @overload
- def std(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- std(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
-
- Calculates the standard deviation over the dimensions specified by :attr:`dim`.
- :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
- reduce over all dimensions.
-
- The standard deviation (:math:`\sigma`) is calculated as
-
- .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
-
- where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
- sample mean, :math:`N` is the number of samples and :math:`\delta N` is
- the :attr:`correction`.
-
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
- dim (int or tuple of ints): the dimension or dimensions to reduce.
-
- Keyword args:
- correction (int): difference between the sample size and sample degrees of freedom.
- Defaults to `Bessel's correction`_, ``correction=1``.
-
- .. versionchanged:: 2.0
- Previously this argument was called ``unbiased`` and was a boolean
- with ``True`` corresponding to ``correction=1`` and ``False`` being
- ``correction=0``.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
- out (Tensor, optional): the output tensor.
-
- Example:
-
- >>> a = torch.tensor(
- ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
- ... [ 1.5027, -0.3270, 0.5905, 0.6538],
- ... [-1.5745, 1.3330, -0.5596, -0.6548],
- ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
- >>> torch.std(a, dim=1, keepdim=True)
- tensor([[1.0311],
- [0.7477],
- [1.2204],
- [0.9087]])
-
- .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
- """
- ...
- @overload
- def std_mean(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False) -> Tuple[Tensor, Tensor]:
- r"""
- std_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
-
- Calculates the standard deviation and mean over the dimensions specified by
- :attr:`dim`. :attr:`dim` can be a single dimension, list of dimensions, or
- ``None`` to reduce over all dimensions.
-
- The standard deviation (:math:`\sigma`) is calculated as
-
- .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
-
- where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
- sample mean, :math:`N` is the number of samples and :math:`\delta N` is
- the :attr:`correction`.
-
-
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
-
- dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
- If ``None``, all dimensions are reduced.
-
-
- Keyword args:
- correction (int): difference between the sample size and sample degrees of freedom.
- Defaults to `Bessel's correction`_, ``correction=1``.
-
- .. versionchanged:: 2.0
- Previously this argument was called ``unbiased`` and was a boolean
- with ``True`` corresponding to ``correction=1`` and ``False`` being
- ``correction=0``.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
- out (Tensor, optional): the output tensor.
-
- Returns:
- A tuple (std, mean) containing the standard deviation and mean.
-
- Example:
-
- >>> a = torch.tensor(
- ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
- ... [ 1.5027, -0.3270, 0.5905, 0.6538],
- ... [-1.5745, 1.3330, -0.5596, -0.6548],
- ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
- >>> torch.std_mean(a, dim=0, keepdim=True)
- (tensor([[1.2620, 1.0028, 1.0957, 0.6038]]),
- tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
-
- .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
- """
- ...
- @overload
- def std_mean(input: Tensor, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tuple[Tensor, Tensor]:
- r"""
- std_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
-
- Calculates the standard deviation and mean over the dimensions specified by
- :attr:`dim`. :attr:`dim` can be a single dimension, list of dimensions, or
- ``None`` to reduce over all dimensions.
-
- The standard deviation (:math:`\sigma`) is calculated as
-
- .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
-
- where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
- sample mean, :math:`N` is the number of samples and :math:`\delta N` is
- the :attr:`correction`.
-
-
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
-
- dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
- If ``None``, all dimensions are reduced.
-
-
- Keyword args:
- correction (int): difference between the sample size and sample degrees of freedom.
- Defaults to `Bessel's correction`_, ``correction=1``.
-
- .. versionchanged:: 2.0
- Previously this argument was called ``unbiased`` and was a boolean
- with ``True`` corresponding to ``correction=1`` and ``False`` being
- ``correction=0``.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
- out (Tensor, optional): the output tensor.
-
- Returns:
- A tuple (std, mean) containing the standard deviation and mean.
-
- Example:
-
- >>> a = torch.tensor(
- ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
- ... [ 1.5027, -0.3270, 0.5905, 0.6538],
- ... [-1.5745, 1.3330, -0.5596, -0.6548],
- ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
- >>> torch.std_mean(a, dim=0, keepdim=True)
- (tensor([[1.2620, 1.0028, 1.0957, 0.6038]]),
- tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
-
- .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
- """
- ...
- @overload
- def std_mean(input: Tensor, unbiased: _bool = True) -> Tuple[Tensor, Tensor]:
- r"""
- std_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
-
- Calculates the standard deviation and mean over the dimensions specified by
- :attr:`dim`. :attr:`dim` can be a single dimension, list of dimensions, or
- ``None`` to reduce over all dimensions.
-
- The standard deviation (:math:`\sigma`) is calculated as
-
- .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
-
- where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
- sample mean, :math:`N` is the number of samples and :math:`\delta N` is
- the :attr:`correction`.
-
-
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
-
- dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
- If ``None``, all dimensions are reduced.
-
-
- Keyword args:
- correction (int): difference between the sample size and sample degrees of freedom.
- Defaults to `Bessel's correction`_, ``correction=1``.
-
- .. versionchanged:: 2.0
- Previously this argument was called ``unbiased`` and was a boolean
- with ``True`` corresponding to ``correction=1`` and ``False`` being
- ``correction=0``.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
- out (Tensor, optional): the output tensor.
-
- Returns:
- A tuple (std, mean) containing the standard deviation and mean.
-
- Example:
-
- >>> a = torch.tensor(
- ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
- ... [ 1.5027, -0.3270, 0.5905, 0.6538],
- ... [-1.5745, 1.3330, -0.5596, -0.6548],
- ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
- >>> torch.std_mean(a, dim=0, keepdim=True)
- (tensor([[1.2620, 1.0028, 1.0957, 0.6038]]),
- tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
-
- .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
- """
- ...
- @overload
- def std_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tuple[Tensor, Tensor]:
- r"""
- std_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
-
- Calculates the standard deviation and mean over the dimensions specified by
- :attr:`dim`. :attr:`dim` can be a single dimension, list of dimensions, or
- ``None`` to reduce over all dimensions.
-
- The standard deviation (:math:`\sigma`) is calculated as
-
- .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
-
- where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
- sample mean, :math:`N` is the number of samples and :math:`\delta N` is
- the :attr:`correction`.
-
-
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
-
- dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
- If ``None``, all dimensions are reduced.
-
-
- Keyword args:
- correction (int): difference between the sample size and sample degrees of freedom.
- Defaults to `Bessel's correction`_, ``correction=1``.
-
- .. versionchanged:: 2.0
- Previously this argument was called ``unbiased`` and was a boolean
- with ``True`` corresponding to ``correction=1`` and ``False`` being
- ``correction=0``.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
- out (Tensor, optional): the output tensor.
-
- Returns:
- A tuple (std, mean) containing the standard deviation and mean.
-
- Example:
-
- >>> a = torch.tensor(
- ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
- ... [ 1.5027, -0.3270, 0.5905, 0.6538],
- ... [-1.5745, 1.3330, -0.5596, -0.6548],
- ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
- >>> torch.std_mean(a, dim=0, keepdim=True)
- (tensor([[1.2620, 1.0028, 1.0957, 0.6038]]),
- tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
-
- .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
- """
- ...
- @overload
- def std_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False) -> Tuple[Tensor, Tensor]:
- r"""
- std_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
-
- Calculates the standard deviation and mean over the dimensions specified by
- :attr:`dim`. :attr:`dim` can be a single dimension, list of dimensions, or
- ``None`` to reduce over all dimensions.
-
- The standard deviation (:math:`\sigma`) is calculated as
-
- .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
-
- where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
- sample mean, :math:`N` is the number of samples and :math:`\delta N` is
- the :attr:`correction`.
-
-
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
-
- dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
- If ``None``, all dimensions are reduced.
-
-
- Keyword args:
- correction (int): difference between the sample size and sample degrees of freedom.
- Defaults to `Bessel's correction`_, ``correction=1``.
-
- .. versionchanged:: 2.0
- Previously this argument was called ``unbiased`` and was a boolean
- with ``True`` corresponding to ``correction=1`` and ``False`` being
- ``correction=0``.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
- out (Tensor, optional): the output tensor.
-
- Returns:
- A tuple (std, mean) containing the standard deviation and mean.
-
- Example:
-
- >>> a = torch.tensor(
- ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
- ... [ 1.5027, -0.3270, 0.5905, 0.6538],
- ... [-1.5745, 1.3330, -0.5596, -0.6548],
- ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
- >>> torch.std_mean(a, dim=0, keepdim=True)
- (tensor([[1.2620, 1.0028, 1.0957, 0.6038]]),
- tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
-
- .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
- """
- ...
- @overload
- def sub(input: Union[Tensor, Number, _complex], other: Union[Tensor, Number, _complex], *, alpha: Optional[Union[Number, _complex]] = 1, out: Optional[Tensor] = None) -> Tensor:
- r"""
- sub(input, other, *, alpha=1, out=None) -> Tensor
-
- Subtracts :attr:`other`, scaled by :attr:`alpha`, from :attr:`input`.
-
- .. math::
- \text{{out}}_i = \text{{input}}_i - \text{{alpha}} \times \text{{other}}_i
-
-
- Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
- :ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
-
- Args:
- input (Tensor): the input tensor.
- other (Tensor or Number): the tensor or number to subtract from :attr:`input`.
-
- Keyword args:
- alpha (Number): the multiplier for :attr:`other`.
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.tensor((1, 2))
- >>> b = torch.tensor((0, 1))
- >>> torch.sub(a, b, alpha=2)
- tensor([1, 0])
- """
- ...
- @overload
- def sub(self: Tensor, alpha: Union[Number, _complex], other: Tensor) -> Tensor:
- r"""
- sub(input, other, *, alpha=1, out=None) -> Tensor
-
- Subtracts :attr:`other`, scaled by :attr:`alpha`, from :attr:`input`.
-
- .. math::
- \text{{out}}_i = \text{{input}}_i - \text{{alpha}} \times \text{{other}}_i
-
-
- Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
- :ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
-
- Args:
- input (Tensor): the input tensor.
- other (Tensor or Number): the tensor or number to subtract from :attr:`input`.
-
- Keyword args:
- alpha (Number): the multiplier for :attr:`other`.
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.tensor((1, 2))
- >>> b = torch.tensor((0, 1))
- >>> torch.sub(a, b, alpha=2)
- tensor([1, 0])
- """
- ...
- @overload
- def sub(self: Tensor, alpha: Union[Number, _complex], other: Tensor, *, out: Tensor) -> Tensor:
- r"""
- sub(input, other, *, alpha=1, out=None) -> Tensor
-
- Subtracts :attr:`other`, scaled by :attr:`alpha`, from :attr:`input`.
-
- .. math::
- \text{{out}}_i = \text{{input}}_i - \text{{alpha}} \times \text{{other}}_i
-
-
- Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
- :ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
-
- Args:
- input (Tensor): the input tensor.
- other (Tensor or Number): the tensor or number to subtract from :attr:`input`.
-
- Keyword args:
- alpha (Number): the multiplier for :attr:`other`.
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.tensor((1, 2))
- >>> b = torch.tensor((0, 1))
- >>> torch.sub(a, b, alpha=2)
- tensor([1, 0])
- """
- ...
- @overload
- def subtract(input: Tensor, other: Tensor, *, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor:
- r"""
- subtract(input, other, *, alpha=1, out=None) -> Tensor
-
- Alias for :func:`torch.sub`.
- """
- ...
- @overload
- def subtract(input: Tensor, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor:
- r"""
- subtract(input, other, *, alpha=1, out=None) -> Tensor
-
- Alias for :func:`torch.sub`.
- """
- ...
- @overload
- def sum(input: Tensor, *, dtype: Optional[_dtype] = None) -> Tensor:
- r"""
- sum(input, *, dtype=None) -> Tensor
-
- Returns the sum of all elements in the :attr:`input` tensor.
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- If specified, the input tensor is casted to :attr:`dtype` before the operation
- is performed. This is useful for preventing data type overflows. Default: None.
-
- Example::
-
- >>> a = torch.randn(1, 3)
- >>> a
- tensor([[ 0.1133, -0.9567, 0.2958]])
- >>> torch.sum(a)
- tensor(-0.5475)
-
- .. function:: sum(input, dim, keepdim=False, *, dtype=None) -> Tensor
- :noindex:
-
- Returns the sum of each row of the :attr:`input` tensor in the given
- dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
- reduce over all of them.
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
-
- dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
- If ``None``, all dimensions are reduced.
-
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- If specified, the input tensor is casted to :attr:`dtype` before the operation
- is performed. This is useful for preventing data type overflows. Default: None.
-
- Example::
-
- >>> a = torch.randn(4, 4)
- >>> a
- tensor([[ 0.0569, -0.2475, 0.0737, -0.3429],
- [-0.2993, 0.9138, 0.9337, -1.6864],
- [ 0.1132, 0.7892, -0.1003, 0.5688],
- [ 0.3637, -0.9906, -0.4752, -1.5197]])
- >>> torch.sum(a, 1)
- tensor([-0.4598, -0.1381, 1.3708, -2.6217])
- >>> b = torch.arange(4 * 5 * 6).view(4, 5, 6)
- >>> torch.sum(b, (2, 1))
- tensor([ 435., 1335., 2235., 3135.])
- """
- ...
- @overload
- def sum(input: Tensor, dim: Optional[Union[_int, _size]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
- r"""
- sum(input, *, dtype=None) -> Tensor
-
- Returns the sum of all elements in the :attr:`input` tensor.
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- If specified, the input tensor is casted to :attr:`dtype` before the operation
- is performed. This is useful for preventing data type overflows. Default: None.
-
- Example::
-
- >>> a = torch.randn(1, 3)
- >>> a
- tensor([[ 0.1133, -0.9567, 0.2958]])
- >>> torch.sum(a)
- tensor(-0.5475)
-
- .. function:: sum(input, dim, keepdim=False, *, dtype=None) -> Tensor
- :noindex:
-
- Returns the sum of each row of the :attr:`input` tensor in the given
- dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
- reduce over all of them.
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
-
- dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
- If ``None``, all dimensions are reduced.
-
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- If specified, the input tensor is casted to :attr:`dtype` before the operation
- is performed. This is useful for preventing data type overflows. Default: None.
-
- Example::
-
- >>> a = torch.randn(4, 4)
- >>> a
- tensor([[ 0.0569, -0.2475, 0.0737, -0.3429],
- [-0.2993, 0.9138, 0.9337, -1.6864],
- [ 0.1132, 0.7892, -0.1003, 0.5688],
- [ 0.3637, -0.9906, -0.4752, -1.5197]])
- >>> torch.sum(a, 1)
- tensor([-0.4598, -0.1381, 1.3708, -2.6217])
- >>> b = torch.arange(4 * 5 * 6).view(4, 5, 6)
- >>> torch.sum(b, (2, 1))
- tensor([ 435., 1335., 2235., 3135.])
- """
- ...
- @overload
- def sum(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor:
- r"""
- sum(input, *, dtype=None) -> Tensor
-
- Returns the sum of all elements in the :attr:`input` tensor.
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- If specified, the input tensor is casted to :attr:`dtype` before the operation
- is performed. This is useful for preventing data type overflows. Default: None.
-
- Example::
-
- >>> a = torch.randn(1, 3)
- >>> a
- tensor([[ 0.1133, -0.9567, 0.2958]])
- >>> torch.sum(a)
- tensor(-0.5475)
-
- .. function:: sum(input, dim, keepdim=False, *, dtype=None) -> Tensor
- :noindex:
-
- Returns the sum of each row of the :attr:`input` tensor in the given
- dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
- reduce over all of them.
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
-
- dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
- If ``None``, all dimensions are reduced.
-
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- If specified, the input tensor is casted to :attr:`dtype` before the operation
- is performed. This is useful for preventing data type overflows. Default: None.
-
- Example::
-
- >>> a = torch.randn(4, 4)
- >>> a
- tensor([[ 0.0569, -0.2475, 0.0737, -0.3429],
- [-0.2993, 0.9138, 0.9337, -1.6864],
- [ 0.1132, 0.7892, -0.1003, 0.5688],
- [ 0.3637, -0.9906, -0.4752, -1.5197]])
- >>> torch.sum(a, 1)
- tensor([-0.4598, -0.1381, 1.3708, -2.6217])
- >>> b = torch.arange(4 * 5 * 6).view(4, 5, 6)
- >>> torch.sum(b, (2, 1))
- tensor([ 435., 1335., 2235., 3135.])
- """
- ...
- def svd(input: Tensor, some: _bool = True, compute_uv: _bool = True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.svd:
- r"""
- svd(input, some=True, compute_uv=True, *, out=None) -> (Tensor, Tensor, Tensor)
-
- Computes the singular value decomposition of either a matrix or batch of
- matrices :attr:`input`. The singular value decomposition is represented as a
- namedtuple `(U, S, V)`, such that :attr:`input` :math:`= U \text{diag}(S) V^{\text{H}}`.
- where :math:`V^{\text{H}}` is the transpose of `V` for real inputs,
- and the conjugate transpose of `V` for complex inputs.
- If :attr:`input` is a batch of matrices, then `U`, `S`, and `V` are also
- batched with the same batch dimensions as :attr:`input`.
-
- If :attr:`some` is `True` (default), the method returns the reduced singular
- value decomposition. In this case, if the last two dimensions of :attr:`input` are
- `m` and `n`, then the returned `U` and `V` matrices will contain only
- `min(n, m)` orthonormal columns.
-
- If :attr:`compute_uv` is `False`, the returned `U` and `V` will be
- zero-filled matrices of shape `(m, m)` and `(n, n)`
- respectively, and the same device as :attr:`input`. The argument :attr:`some`
- has no effect when :attr:`compute_uv` is `False`.
-
- Supports :attr:`input` of float, double, cfloat and cdouble data types.
- The dtypes of `U` and `V` are the same as :attr:`input`'s. `S` will
- always be real-valued, even if :attr:`input` is complex.
-
- .. warning::
-
- :func:`torch.svd` is deprecated in favor of :func:`torch.linalg.svd`
- and will be removed in a future PyTorch release.
-
- ``U, S, V = torch.svd(A, some=some, compute_uv=True)`` (default) should be replaced with
-
- .. code:: python
-
- U, S, Vh = torch.linalg.svd(A, full_matrices=not some)
- V = Vh.mH
-
- ``_, S, _ = torch.svd(A, some=some, compute_uv=False)`` should be replaced with
-
- .. code:: python
-
- S = torch.linalg.svdvals(A)
-
- .. note:: Differences with :func:`torch.linalg.svd`:
-
- * :attr:`some` is the opposite of
- :func:`torch.linalg.svd`'s :attr:`full_matrices`. Note that
- default value for both is `True`, so the default behavior is
- effectively the opposite.
- * :func:`torch.svd` returns `V`, whereas :func:`torch.linalg.svd` returns
- `Vh`, that is, :math:`V^{\text{H}}`.
- * If :attr:`compute_uv` is `False`, :func:`torch.svd` returns zero-filled
- tensors for `U` and `Vh`, whereas :func:`torch.linalg.svd` returns
- empty tensors.
-
- .. note:: The singular values are returned in descending order. If :attr:`input` is a batch of matrices,
- then the singular values of each matrix in the batch are returned in descending order.
-
- .. note:: The `S` tensor can only be used to compute gradients if :attr:`compute_uv` is `True`.
-
- .. note:: When :attr:`some` is `False`, the gradients on `U[..., :, min(m, n):]`
- and `V[..., :, min(m, n):]` will be ignored in the backward pass, as those vectors
- can be arbitrary bases of the corresponding subspaces.
-
- .. note:: The implementation of :func:`torch.linalg.svd` on CPU uses LAPACK's routine `?gesdd`
- (a divide-and-conquer algorithm) instead of `?gesvd` for speed. Analogously,
- on GPU, it uses cuSOLVER's routines `gesvdj` and `gesvdjBatched` on CUDA 10.1.243
- and later, and MAGMA's routine `gesdd` on earlier versions of CUDA.
-
- .. note:: The returned `U` will not be contiguous. The matrix (or batch of matrices) will
- be represented as a column-major matrix (i.e. Fortran-contiguous).
-
- .. warning:: The gradients with respect to `U` and `V` will only be finite when the input does not
- have zero nor repeated singular values.
-
- .. warning:: If the distance between any two singular values is close to zero, the gradients with respect to
- `U` and `V` will be numerically unstable, as they depends on
- :math:`\frac{1}{\min_{i \neq j} \sigma_i^2 - \sigma_j^2}`. The same happens when the matrix
- has small singular values, as these gradients also depend on `S^{-1}`.
-
- .. warning:: For complex-valued :attr:`input` the singular value decomposition is not unique,
- as `U` and `V` may be multiplied by an arbitrary phase factor :math:`e^{i \phi}` on every column.
- The same happens when :attr:`input` has repeated singular values, where one may multiply
- the columns of the spanning subspace in `U` and `V` by a rotation matrix
- and `the resulting vectors will span the same subspace`_.
- Different platforms, like NumPy, or inputs on different device types,
- may produce different `U` and `V` tensors.
-
- Args:
- input (Tensor): the input tensor of size `(*, m, n)` where `*` is zero or more
- batch dimensions consisting of `(m, n)` matrices.
- some (bool, optional): controls whether to compute the reduced or full decomposition, and
- consequently, the shape of returned `U` and `V`. Default: `True`.
- compute_uv (bool, optional): controls whether to compute `U` and `V`. Default: `True`.
-
- Keyword args:
- out (tuple, optional): the output tuple of tensors
-
- Example::
-
- >>> a = torch.randn(5, 3)
- >>> a
- tensor([[ 0.2364, -0.7752, 0.6372],
- [ 1.7201, 0.7394, -0.0504],
- [-0.3371, -1.0584, 0.5296],
- [ 0.3550, -0.4022, 1.5569],
- [ 0.2445, -0.0158, 1.1414]])
- >>> u, s, v = torch.svd(a)
- >>> u
- tensor([[ 0.4027, 0.0287, 0.5434],
- [-0.1946, 0.8833, 0.3679],
- [ 0.4296, -0.2890, 0.5261],
- [ 0.6604, 0.2717, -0.2618],
- [ 0.4234, 0.2481, -0.4733]])
- >>> s
- tensor([2.3289, 2.0315, 0.7806])
- >>> v
- tensor([[-0.0199, 0.8766, 0.4809],
- [-0.5080, 0.4054, -0.7600],
- [ 0.8611, 0.2594, -0.4373]])
- >>> torch.dist(a, torch.mm(torch.mm(u, torch.diag(s)), v.t()))
- tensor(8.6531e-07)
- >>> a_big = torch.randn(7, 5, 3)
- >>> u, s, v = torch.svd(a_big)
- >>> torch.dist(a_big, torch.matmul(torch.matmul(u, torch.diag_embed(s)), v.mT))
- tensor(2.6503e-06)
-
- .. _the resulting vectors will span the same subspace:
- (https://en.wikipedia.org/wiki/Singular_value_decomposition#Singular_values,_singular_vectors,_and_their_relation_to_the_SVD)
- """
- ...
- def swapaxes(input: Tensor, axis0: _int, axis1: _int) -> Tensor:
- r"""
- swapaxes(input, axis0, axis1) -> Tensor
-
- Alias for :func:`torch.transpose`.
-
- This function is equivalent to NumPy's swapaxes function.
-
- Examples::
-
- >>> x = torch.tensor([[[0,1],[2,3]],[[4,5],[6,7]]])
- >>> x
- tensor([[[0, 1],
- [2, 3]],
-
- [[4, 5],
- [6, 7]]])
- >>> torch.swapaxes(x, 0, 1)
- tensor([[[0, 1],
- [4, 5]],
-
- [[2, 3],
- [6, 7]]])
- >>> torch.swapaxes(x, 0, 2)
- tensor([[[0, 4],
- [2, 6]],
-
- [[1, 5],
- [3, 7]]])
- """
- ...
- def swapdims(input: Tensor, dim0: _int, dim1: _int) -> Tensor:
- r"""
- swapdims(input, dim0, dim1) -> Tensor
-
- Alias for :func:`torch.transpose`.
-
- This function is equivalent to NumPy's swapaxes function.
-
- Examples::
-
- >>> x = torch.tensor([[[0,1],[2,3]],[[4,5],[6,7]]])
- >>> x
- tensor([[[0, 1],
- [2, 3]],
-
- [[4, 5],
- [6, 7]]])
- >>> torch.swapdims(x, 0, 1)
- tensor([[[0, 1],
- [4, 5]],
-
- [[2, 3],
- [6, 7]]])
- >>> torch.swapdims(x, 0, 2)
- tensor([[[0, 4],
- [2, 6]],
-
- [[1, 5],
- [3, 7]]])
- """
- ...
- def sym_constrain_range(size: Union[Number, _complex], *, min: Optional[_int] = None, max: Optional[_int] = None) -> None: ...
- def sym_constrain_range_for_size(size: Union[Number, _complex], *, min: Optional[_int] = None, max: Optional[_int] = None) -> None: ...
- def t(input: Tensor) -> Tensor:
- r"""
- t(input) -> Tensor
-
- Expects :attr:`input` to be <= 2-D tensor and transposes dimensions 0
- and 1.
-
- 0-D and 1-D tensors are returned as is. When input is a 2-D tensor this
- is equivalent to ``transpose(input, 0, 1)``.
-
- Args:
- input (Tensor): the input tensor.
-
- Example::
-
- >>> x = torch.randn(())
- >>> x
- tensor(0.1995)
- >>> torch.t(x)
- tensor(0.1995)
- >>> x = torch.randn(3)
- >>> x
- tensor([ 2.4320, -0.4608, 0.7702])
- >>> torch.t(x)
- tensor([ 2.4320, -0.4608, 0.7702])
- >>> x = torch.randn(2, 3)
- >>> x
- tensor([[ 0.4875, 0.9158, -0.5872],
- [ 0.3938, -0.6929, 0.6932]])
- >>> torch.t(x)
- tensor([[ 0.4875, 0.3938],
- [ 0.9158, -0.6929],
- [-0.5872, 0.6932]])
-
- See also :func:`torch.transpose`.
- """
- ...
- def t_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- Performs the same operation as :func:`torch.t`, but all output tensors
- are freshly created instead of aliasing the input.
- """
- ...
- def take(input: Tensor, index: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- take(input, index) -> Tensor
-
- Returns a new tensor with the elements of :attr:`input` at the given indices.
- The input tensor is treated as if it were viewed as a 1-D tensor. The result
- takes the same shape as the indices.
-
- Args:
- input (Tensor): the input tensor.
- index (LongTensor): the indices into tensor
-
- Example::
-
- >>> src = torch.tensor([[4, 3, 5],
- ... [6, 7, 8]])
- >>> torch.take(src, torch.tensor([0, 2, 5]))
- tensor([ 4, 5, 8])
- """
- ...
- def take_along_dim(input: Tensor, indices: Tensor, dim: Optional[_int] = None, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- take_along_dim(input, indices, dim=None, *, out=None) -> Tensor
-
- Selects values from :attr:`input` at the 1-dimensional indices from :attr:`indices` along the given :attr:`dim`.
-
- If :attr:`dim` is None, the input array is treated as if it has been flattened to 1d.
-
- Functions that return indices along a dimension, like :func:`torch.argmax` and :func:`torch.argsort`,
- are designed to work with this function. See the examples below.
-
- .. note::
- This function is similar to NumPy's `take_along_axis`.
- See also :func:`torch.gather`.
-
- Args:
- input (Tensor): the input tensor.
- indices (tensor): the indices into :attr:`input`. Must have long dtype.
- dim (int, optional): dimension to select along.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> t = torch.tensor([[10, 30, 20], [60, 40, 50]])
- >>> max_idx = torch.argmax(t)
- >>> torch.take_along_dim(t, max_idx)
- tensor([60])
- >>> sorted_idx = torch.argsort(t, dim=1)
- >>> torch.take_along_dim(t, sorted_idx, dim=1)
- tensor([[10, 20, 30],
- [40, 50, 60]])
- """
- ...
- def tan(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- tan(input, *, out=None) -> Tensor
-
- Returns a new tensor with the tangent of the elements of :attr:`input`.
-
- .. math::
- \text{out}_{i} = \tan(\text{input}_{i})
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(4)
- >>> a
- tensor([-1.2027, -1.7687, 0.4412, -1.3856])
- >>> torch.tan(a)
- tensor([-2.5930, 4.9859, 0.4722, -5.3366])
- """
- ...
- def tan_(input: Tensor) -> Tensor: ...
- def tanh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- tanh(input, *, out=None) -> Tensor
-
- Returns a new tensor with the hyperbolic tangent of the elements
- of :attr:`input`.
-
- .. math::
- \text{out}_{i} = \tanh(\text{input}_{i})
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(4)
- >>> a
- tensor([ 0.8986, -0.7279, 1.1745, 0.2611])
- >>> torch.tanh(a)
- tensor([ 0.7156, -0.6218, 0.8257, 0.2553])
- """
- ...
- def tanh_(input: Tensor) -> Tensor: ...
- def tensor(data: Any, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor:
- r"""
- tensor(data, *, dtype=None, device=None, requires_grad=False, pin_memory=False) -> Tensor
-
- Constructs a tensor with no autograd history (also known as a "leaf tensor", see :doc:`/notes/autograd`) by copying :attr:`data`.
-
- .. warning::
-
- When working with tensors prefer using :func:`torch.Tensor.clone`,
- :func:`torch.Tensor.detach`, and :func:`torch.Tensor.requires_grad_` for
- readability. Letting `t` be a tensor, ``torch.tensor(t)`` is equivalent to
- ``t.clone().detach()``, and ``torch.tensor(t, requires_grad=True)``
- is equivalent to ``t.clone().detach().requires_grad_(True)``.
-
- .. seealso::
-
- :func:`torch.as_tensor` preserves autograd history and avoids copies where possible.
- :func:`torch.from_numpy` creates a tensor that shares storage with a NumPy array.
-
- Args:
- data (array_like): Initial data for the tensor. Can be a list, tuple,
- NumPy ``ndarray``, scalar, and other types.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, infers data type from :attr:`data`.
- device (:class:`torch.device`, optional): the device of the constructed tensor. If None and data is a tensor
- then the device of data is used. If None and data is not a tensor then
- the result tensor is constructed on the current device.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- pin_memory (bool, optional): If set, returned tensor would be allocated in
- the pinned memory. Works only for CPU tensors. Default: ``False``.
-
-
- Example::
-
- >>> torch.tensor([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]])
- tensor([[ 0.1000, 1.2000],
- [ 2.2000, 3.1000],
- [ 4.9000, 5.2000]])
-
- >>> torch.tensor([0, 1]) # Type inference on data
- tensor([ 0, 1])
-
- >>> torch.tensor([[0.11111, 0.222222, 0.3333333]],
- ... dtype=torch.float64,
- ... device=torch.device('cuda:0')) # creates a double tensor on a CUDA device
- tensor([[ 0.1111, 0.2222, 0.3333]], dtype=torch.float64, device='cuda:0')
-
- >>> torch.tensor(3.14159) # Create a zero-dimensional (scalar) tensor
- tensor(3.1416)
-
- >>> torch.tensor([]) # Create an empty tensor (of size (0,))
- tensor([])
- """
- ...
- @overload
- def tensor_split(input: Tensor, tensor_indices_or_sections: Tensor, dim: _int = 0) -> Tuple[Tensor, ...]:
- r"""
- tensor_split(input, indices_or_sections, dim=0) -> List of Tensors
-
- Splits a tensor into multiple sub-tensors, all of which are views of :attr:`input`,
- along dimension :attr:`dim` according to the indices or number of sections specified
- by :attr:`indices_or_sections`. This function is based on NumPy's
- :func:`numpy.array_split`.
-
- Args:
- input (Tensor): the tensor to split
- indices_or_sections (Tensor, int or list or tuple of ints):
- If :attr:`indices_or_sections` is an integer ``n`` or a zero dimensional long tensor
- with value ``n``, :attr:`input` is split into ``n`` sections along dimension :attr:`dim`.
- If :attr:`input` is divisible by ``n`` along dimension :attr:`dim`, each
- section will be of equal size, :code:`input.size(dim) / n`. If :attr:`input`
- is not divisible by ``n``, the sizes of the first :code:`int(input.size(dim) % n)`
- sections will have size :code:`int(input.size(dim) / n) + 1`, and the rest will
- have size :code:`int(input.size(dim) / n)`.
-
- If :attr:`indices_or_sections` is a list or tuple of ints, or a one-dimensional long
- tensor, then :attr:`input` is split along dimension :attr:`dim` at each of the indices
- in the list, tuple or tensor. For instance, :code:`indices_or_sections=[2, 3]` and :code:`dim=0`
- would result in the tensors :code:`input[:2]`, :code:`input[2:3]`, and :code:`input[3:]`.
-
- If :attr:`indices_or_sections` is a tensor, it must be a zero-dimensional or one-dimensional
- long tensor on the CPU.
-
- dim (int, optional): dimension along which to split the tensor. Default: ``0``
-
- Example::
-
- >>> x = torch.arange(8)
- >>> torch.tensor_split(x, 3)
- (tensor([0, 1, 2]), tensor([3, 4, 5]), tensor([6, 7]))
-
- >>> x = torch.arange(7)
- >>> torch.tensor_split(x, 3)
- (tensor([0, 1, 2]), tensor([3, 4]), tensor([5, 6]))
- >>> torch.tensor_split(x, (1, 6))
- (tensor([0]), tensor([1, 2, 3, 4, 5]), tensor([6]))
-
- >>> x = torch.arange(14).reshape(2, 7)
- >>> x
- tensor([[ 0, 1, 2, 3, 4, 5, 6],
- [ 7, 8, 9, 10, 11, 12, 13]])
- >>> torch.tensor_split(x, 3, dim=1)
- (tensor([[0, 1, 2],
- [7, 8, 9]]),
- tensor([[ 3, 4],
- [10, 11]]),
- tensor([[ 5, 6],
- [12, 13]]))
- >>> torch.tensor_split(x, (1, 6), dim=1)
- (tensor([[0],
- [7]]),
- tensor([[ 1, 2, 3, 4, 5],
- [ 8, 9, 10, 11, 12]]),
- tensor([[ 6],
- [13]]))
- """
- ...
- @overload
- def tensor_split(input: Tensor, sections: Union[_int, SymInt], dim: _int = 0) -> Tuple[Tensor, ...]:
- r"""
- tensor_split(input, indices_or_sections, dim=0) -> List of Tensors
-
- Splits a tensor into multiple sub-tensors, all of which are views of :attr:`input`,
- along dimension :attr:`dim` according to the indices or number of sections specified
- by :attr:`indices_or_sections`. This function is based on NumPy's
- :func:`numpy.array_split`.
-
- Args:
- input (Tensor): the tensor to split
- indices_or_sections (Tensor, int or list or tuple of ints):
- If :attr:`indices_or_sections` is an integer ``n`` or a zero dimensional long tensor
- with value ``n``, :attr:`input` is split into ``n`` sections along dimension :attr:`dim`.
- If :attr:`input` is divisible by ``n`` along dimension :attr:`dim`, each
- section will be of equal size, :code:`input.size(dim) / n`. If :attr:`input`
- is not divisible by ``n``, the sizes of the first :code:`int(input.size(dim) % n)`
- sections will have size :code:`int(input.size(dim) / n) + 1`, and the rest will
- have size :code:`int(input.size(dim) / n)`.
-
- If :attr:`indices_or_sections` is a list or tuple of ints, or a one-dimensional long
- tensor, then :attr:`input` is split along dimension :attr:`dim` at each of the indices
- in the list, tuple or tensor. For instance, :code:`indices_or_sections=[2, 3]` and :code:`dim=0`
- would result in the tensors :code:`input[:2]`, :code:`input[2:3]`, and :code:`input[3:]`.
-
- If :attr:`indices_or_sections` is a tensor, it must be a zero-dimensional or one-dimensional
- long tensor on the CPU.
-
- dim (int, optional): dimension along which to split the tensor. Default: ``0``
-
- Example::
-
- >>> x = torch.arange(8)
- >>> torch.tensor_split(x, 3)
- (tensor([0, 1, 2]), tensor([3, 4, 5]), tensor([6, 7]))
-
- >>> x = torch.arange(7)
- >>> torch.tensor_split(x, 3)
- (tensor([0, 1, 2]), tensor([3, 4]), tensor([5, 6]))
- >>> torch.tensor_split(x, (1, 6))
- (tensor([0]), tensor([1, 2, 3, 4, 5]), tensor([6]))
-
- >>> x = torch.arange(14).reshape(2, 7)
- >>> x
- tensor([[ 0, 1, 2, 3, 4, 5, 6],
- [ 7, 8, 9, 10, 11, 12, 13]])
- >>> torch.tensor_split(x, 3, dim=1)
- (tensor([[0, 1, 2],
- [7, 8, 9]]),
- tensor([[ 3, 4],
- [10, 11]]),
- tensor([[ 5, 6],
- [12, 13]]))
- >>> torch.tensor_split(x, (1, 6), dim=1)
- (tensor([[0],
- [7]]),
- tensor([[ 1, 2, 3, 4, 5],
- [ 8, 9, 10, 11, 12]]),
- tensor([[ 6],
- [13]]))
- """
- ...
- @overload
- def tensor_split(input: Tensor, indices: Sequence[Union[_int, SymInt]], dim: _int = 0) -> Tuple[Tensor, ...]:
- r"""
- tensor_split(input, indices_or_sections, dim=0) -> List of Tensors
-
- Splits a tensor into multiple sub-tensors, all of which are views of :attr:`input`,
- along dimension :attr:`dim` according to the indices or number of sections specified
- by :attr:`indices_or_sections`. This function is based on NumPy's
- :func:`numpy.array_split`.
-
- Args:
- input (Tensor): the tensor to split
- indices_or_sections (Tensor, int or list or tuple of ints):
- If :attr:`indices_or_sections` is an integer ``n`` or a zero dimensional long tensor
- with value ``n``, :attr:`input` is split into ``n`` sections along dimension :attr:`dim`.
- If :attr:`input` is divisible by ``n`` along dimension :attr:`dim`, each
- section will be of equal size, :code:`input.size(dim) / n`. If :attr:`input`
- is not divisible by ``n``, the sizes of the first :code:`int(input.size(dim) % n)`
- sections will have size :code:`int(input.size(dim) / n) + 1`, and the rest will
- have size :code:`int(input.size(dim) / n)`.
-
- If :attr:`indices_or_sections` is a list or tuple of ints, or a one-dimensional long
- tensor, then :attr:`input` is split along dimension :attr:`dim` at each of the indices
- in the list, tuple or tensor. For instance, :code:`indices_or_sections=[2, 3]` and :code:`dim=0`
- would result in the tensors :code:`input[:2]`, :code:`input[2:3]`, and :code:`input[3:]`.
-
- If :attr:`indices_or_sections` is a tensor, it must be a zero-dimensional or one-dimensional
- long tensor on the CPU.
-
- dim (int, optional): dimension along which to split the tensor. Default: ``0``
-
- Example::
-
- >>> x = torch.arange(8)
- >>> torch.tensor_split(x, 3)
- (tensor([0, 1, 2]), tensor([3, 4, 5]), tensor([6, 7]))
-
- >>> x = torch.arange(7)
- >>> torch.tensor_split(x, 3)
- (tensor([0, 1, 2]), tensor([3, 4]), tensor([5, 6]))
- >>> torch.tensor_split(x, (1, 6))
- (tensor([0]), tensor([1, 2, 3, 4, 5]), tensor([6]))
-
- >>> x = torch.arange(14).reshape(2, 7)
- >>> x
- tensor([[ 0, 1, 2, 3, 4, 5, 6],
- [ 7, 8, 9, 10, 11, 12, 13]])
- >>> torch.tensor_split(x, 3, dim=1)
- (tensor([[0, 1, 2],
- [7, 8, 9]]),
- tensor([[ 3, 4],
- [10, 11]]),
- tensor([[ 5, 6],
- [12, 13]]))
- >>> torch.tensor_split(x, (1, 6), dim=1)
- (tensor([[0],
- [7]]),
- tensor([[ 1, 2, 3, 4, 5],
- [ 8, 9, 10, 11, 12]]),
- tensor([[ 6],
- [13]]))
- """
- ...
- def threshold(input: Tensor, threshold: Union[Number, _complex], value: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ...
- def threshold_(input: Tensor, threshold: Union[Number, _complex], value: Union[Number, _complex]) -> Tensor: ...
- def tile(input: Tensor, dims: Sequence[Union[_int, SymInt]]) -> Tensor:
- r"""
- tile(input, dims) -> Tensor
-
- Constructs a tensor by repeating the elements of :attr:`input`.
- The :attr:`dims` argument specifies the number of repetitions
- in each dimension.
-
- If :attr:`dims` specifies fewer dimensions than :attr:`input` has, then
- ones are prepended to :attr:`dims` until all dimensions are specified.
- For example, if :attr:`input` has shape (8, 6, 4, 2) and :attr:`dims`
- is (2, 2), then :attr:`dims` is treated as (1, 1, 2, 2).
-
- Analogously, if :attr:`input` has fewer dimensions than :attr:`dims`
- specifies, then :attr:`input` is treated as if it were unsqueezed at
- dimension zero until it has as many dimensions as :attr:`dims` specifies.
- For example, if :attr:`input` has shape (4, 2) and :attr:`dims`
- is (3, 3, 2, 2), then :attr:`input` is treated as if it had the
- shape (1, 1, 4, 2).
-
- .. note::
-
- This function is similar to NumPy's tile function.
-
- Args:
- input (Tensor): the tensor whose elements to repeat.
- dims (tuple): the number of repetitions per dimension.
-
- Example::
-
- >>> x = torch.tensor([1, 2, 3])
- >>> x.tile((2,))
- tensor([1, 2, 3, 1, 2, 3])
- >>> y = torch.tensor([[1, 2], [3, 4]])
- >>> torch.tile(y, (2, 2))
- tensor([[1, 2, 1, 2],
- [3, 4, 3, 4],
- [1, 2, 1, 2],
- [3, 4, 3, 4]])
- """
- ...
- def topk(input: Tensor, k: Union[_int, SymInt], dim: _int = -1, largest: _bool = True, sorted: _bool = True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.topk:
- r"""
- topk(input, k, dim=None, largest=True, sorted=True, *, out=None) -> (Tensor, LongTensor)
-
- Returns the :attr:`k` largest elements of the given :attr:`input` tensor along
- a given dimension.
-
- If :attr:`dim` is not given, the last dimension of the `input` is chosen.
-
- If :attr:`largest` is ``False`` then the `k` smallest elements are returned.
-
- A namedtuple of `(values, indices)` is returned with the `values` and
- `indices` of the largest `k` elements of each row of the `input` tensor in the
- given dimension `dim`.
-
- The boolean option :attr:`sorted` if ``True``, will make sure that the returned
- `k` elements are themselves sorted
-
- Args:
- input (Tensor): the input tensor.
- k (int): the k in "top-k"
- dim (int, optional): the dimension to sort along
- largest (bool, optional): controls whether to return largest or
- smallest elements
- sorted (bool, optional): controls whether to return the elements
- in sorted order
-
- Keyword args:
- out (tuple, optional): the output tuple of (Tensor, LongTensor) that can be
- optionally given to be used as output buffers
-
- Example::
-
- >>> x = torch.arange(1., 6.)
- >>> x
- tensor([ 1., 2., 3., 4., 5.])
- >>> torch.topk(x, 3)
- torch.return_types.topk(values=tensor([5., 4., 3.]), indices=tensor([4, 3, 2]))
- """
- ...
- def trace(input: Tensor) -> Tensor:
- r"""
- trace(input) -> Tensor
-
- Returns the sum of the elements of the diagonal of the input 2-D matrix.
-
- Example::
-
- >>> x = torch.arange(1., 10.).view(3, 3)
- >>> x
- tensor([[ 1., 2., 3.],
- [ 4., 5., 6.],
- [ 7., 8., 9.]])
- >>> torch.trace(x)
- tensor(15.)
- """
- ...
- @overload
- def transpose(input: Tensor, dim0: _int, dim1: _int) -> Tensor:
- r"""
- transpose(input, dim0, dim1) -> Tensor
-
- Returns a tensor that is a transposed version of :attr:`input`.
- The given dimensions :attr:`dim0` and :attr:`dim1` are swapped.
-
- If :attr:`input` is a strided tensor then the resulting :attr:`out`
- tensor shares its underlying storage with the :attr:`input` tensor, so
- changing the content of one would change the content of the other.
-
- If :attr:`input` is a :ref:`sparse tensor <sparse-docs>` then the
- resulting :attr:`out` tensor *does not* share the underlying storage
- with the :attr:`input` tensor.
-
- If :attr:`input` is a :ref:`sparse tensor <sparse-docs>` with compressed
- layout (SparseCSR, SparseBSR, SparseCSC or SparseBSC) the arguments
- :attr:`dim0` and :attr:`dim1` must be both batch dimensions, or must
- both be sparse dimensions. The batch dimensions of a sparse tensor are the
- dimensions preceding the sparse dimensions.
-
- .. note::
- Transpositions which interchange the sparse dimensions of a `SparseCSR`
- or `SparseCSC` layout tensor will result in the layout changing between
- the two options. Transposition of the sparse dimensions of a ` SparseBSR`
- or `SparseBSC` layout tensor will likewise generate a result with the
- opposite layout.
-
-
- Args:
- input (Tensor): the input tensor.
- dim0 (int): the first dimension to be transposed
- dim1 (int): the second dimension to be transposed
-
- Example::
-
- >>> x = torch.randn(2, 3)
- >>> x
- tensor([[ 1.0028, -0.9893, 0.5809],
- [-0.1669, 0.7299, 0.4942]])
- >>> torch.transpose(x, 0, 1)
- tensor([[ 1.0028, -0.1669],
- [-0.9893, 0.7299],
- [ 0.5809, 0.4942]])
-
- See also :func:`torch.t`.
- """
- ...
- @overload
- def transpose(input: Tensor, dim0: Union[str, ellipsis, None], dim1: Union[str, ellipsis, None]) -> Tensor:
- r"""
- transpose(input, dim0, dim1) -> Tensor
-
- Returns a tensor that is a transposed version of :attr:`input`.
- The given dimensions :attr:`dim0` and :attr:`dim1` are swapped.
-
- If :attr:`input` is a strided tensor then the resulting :attr:`out`
- tensor shares its underlying storage with the :attr:`input` tensor, so
- changing the content of one would change the content of the other.
-
- If :attr:`input` is a :ref:`sparse tensor <sparse-docs>` then the
- resulting :attr:`out` tensor *does not* share the underlying storage
- with the :attr:`input` tensor.
-
- If :attr:`input` is a :ref:`sparse tensor <sparse-docs>` with compressed
- layout (SparseCSR, SparseBSR, SparseCSC or SparseBSC) the arguments
- :attr:`dim0` and :attr:`dim1` must be both batch dimensions, or must
- both be sparse dimensions. The batch dimensions of a sparse tensor are the
- dimensions preceding the sparse dimensions.
-
- .. note::
- Transpositions which interchange the sparse dimensions of a `SparseCSR`
- or `SparseCSC` layout tensor will result in the layout changing between
- the two options. Transposition of the sparse dimensions of a ` SparseBSR`
- or `SparseBSC` layout tensor will likewise generate a result with the
- opposite layout.
-
-
- Args:
- input (Tensor): the input tensor.
- dim0 (int): the first dimension to be transposed
- dim1 (int): the second dimension to be transposed
-
- Example::
-
- >>> x = torch.randn(2, 3)
- >>> x
- tensor([[ 1.0028, -0.9893, 0.5809],
- [-0.1669, 0.7299, 0.4942]])
- >>> torch.transpose(x, 0, 1)
- tensor([[ 1.0028, -0.1669],
- [-0.9893, 0.7299],
- [ 0.5809, 0.4942]])
-
- See also :func:`torch.t`.
- """
- ...
- def transpose_copy(input: Tensor, dim0: _int, dim1: _int, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- Performs the same operation as :func:`torch.transpose`, but all output tensors
- are freshly created instead of aliasing the input.
- """
- ...
- @overload
- def trapezoid(y: Tensor, x: Tensor, *, dim: _int = -1) -> Tensor:
- r"""
- trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor
-
- Computes the `trapezoidal rule <https://en.wikipedia.org/wiki/Trapezoidal_rule>`_ along
- :attr:`dim`. By default the spacing between elements is assumed to be 1, but
- :attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be
- used to specify arbitrary spacing along :attr:`dim`.
-
-
- Assuming :attr:`y` is a one-dimensional tensor with elements :math:`{y_0, y_1, ..., y_n}`,
- the default computation is
-
- .. math::
- \begin{aligned}
- \sum_{i = 1}^{n-1} \frac{1}{2} (y_i + y_{i-1})
- \end{aligned}
-
- When :attr:`dx` is specified the computation becomes
-
- .. math::
- \begin{aligned}
- \sum_{i = 1}^{n-1} \frac{\Delta x}{2} (y_i + y_{i-1})
- \end{aligned}
-
- effectively multiplying the result by :attr:`dx`. When :attr:`x` is specified,
- assuming :attr:`x` is also a one-dimensional tensor with
- elements :math:`{x_0, x_1, ..., x_n}`, the computation becomes
-
- .. math::
- \begin{aligned}
- \sum_{i = 1}^{n-1} \frac{(x_i - x_{i-1})}{2} (y_i + y_{i-1})
- \end{aligned}
-
- When :attr:`x` and :attr:`y` have the same size, the computation is as described above and no broadcasting is needed.
- The broadcasting behavior of this function is as follows when their sizes are different. For both :attr:`x`
- and :attr:`y`, the function computes the difference between consecutive elements along
- dimension :attr:`dim`. This effectively creates two tensors, `x_diff` and `y_diff`, that have
- the same shape as the original tensors except their lengths along the dimension :attr:`dim` is reduced by 1.
- After that, those two tensors are broadcast together to compute final output as part of the trapezoidal rule.
- See the examples below for details.
-
- .. note::
- The trapezoidal rule is a technique for approximating the definite integral of a function
- by averaging its left and right Riemann sums. The approximation becomes more accurate as
- the resolution of the partition increases.
-
- Arguments:
- y (Tensor): Values to use when computing the trapezoidal rule.
- x (Tensor): If specified, defines spacing between values as specified above.
-
- Keyword arguments:
- dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx`
- are specified then this defaults to 1. Effectively multiplies the result by its value.
- dim (int): The dimension along which to compute the trapezoidal rule.
- The last (inner-most) dimension by default.
-
- Examples::
-
- >>> # Computes the trapezoidal rule in 1D, spacing is implicitly 1
- >>> y = torch.tensor([1, 5, 10])
- >>> torch.trapezoid(y)
- tensor(10.5)
-
- >>> # Computes the same trapezoidal rule directly to verify
- >>> (1 + 10 + 10) / 2
- 10.5
-
- >>> # Computes the trapezoidal rule in 1D with constant spacing of 2
- >>> # NOTE: the result is the same as before, but multiplied by 2
- >>> torch.trapezoid(y, dx=2)
- 21.0
-
- >>> # Computes the trapezoidal rule in 1D with arbitrary spacing
- >>> x = torch.tensor([1, 3, 6])
- >>> torch.trapezoid(y, x)
- 28.5
-
- >>> # Computes the same trapezoidal rule directly to verify
- >>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2
- 28.5
-
- >>> # Computes the trapezoidal rule for each row of a 3x3 matrix
- >>> y = torch.arange(9).reshape(3, 3)
- tensor([[0, 1, 2],
- [3, 4, 5],
- [6, 7, 8]])
- >>> torch.trapezoid(y)
- tensor([ 2., 8., 14.])
-
- >>> # Computes the trapezoidal rule for each column of the matrix
- >>> torch.trapezoid(y, dim=0)
- tensor([ 6., 8., 10.])
-
- >>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix
- >>> # with the same arbitrary spacing
- >>> y = torch.ones(3, 3)
- >>> x = torch.tensor([1, 3, 6])
- >>> torch.trapezoid(y, x)
- array([5., 5., 5.])
-
- >>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix
- >>> # with different arbitrary spacing per row
- >>> y = torch.ones(3, 3)
- >>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]])
- >>> torch.trapezoid(y, x)
- array([2., 4., 6.])
- """
- ...
- @overload
- def trapezoid(y: Tensor, *, dx: Union[Number, _complex] = 1, dim: _int = -1) -> Tensor:
- r"""
- trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor
-
- Computes the `trapezoidal rule <https://en.wikipedia.org/wiki/Trapezoidal_rule>`_ along
- :attr:`dim`. By default the spacing between elements is assumed to be 1, but
- :attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be
- used to specify arbitrary spacing along :attr:`dim`.
-
-
- Assuming :attr:`y` is a one-dimensional tensor with elements :math:`{y_0, y_1, ..., y_n}`,
- the default computation is
-
- .. math::
- \begin{aligned}
- \sum_{i = 1}^{n-1} \frac{1}{2} (y_i + y_{i-1})
- \end{aligned}
-
- When :attr:`dx` is specified the computation becomes
-
- .. math::
- \begin{aligned}
- \sum_{i = 1}^{n-1} \frac{\Delta x}{2} (y_i + y_{i-1})
- \end{aligned}
-
- effectively multiplying the result by :attr:`dx`. When :attr:`x` is specified,
- assuming :attr:`x` is also a one-dimensional tensor with
- elements :math:`{x_0, x_1, ..., x_n}`, the computation becomes
-
- .. math::
- \begin{aligned}
- \sum_{i = 1}^{n-1} \frac{(x_i - x_{i-1})}{2} (y_i + y_{i-1})
- \end{aligned}
-
- When :attr:`x` and :attr:`y` have the same size, the computation is as described above and no broadcasting is needed.
- The broadcasting behavior of this function is as follows when their sizes are different. For both :attr:`x`
- and :attr:`y`, the function computes the difference between consecutive elements along
- dimension :attr:`dim`. This effectively creates two tensors, `x_diff` and `y_diff`, that have
- the same shape as the original tensors except their lengths along the dimension :attr:`dim` is reduced by 1.
- After that, those two tensors are broadcast together to compute final output as part of the trapezoidal rule.
- See the examples below for details.
-
- .. note::
- The trapezoidal rule is a technique for approximating the definite integral of a function
- by averaging its left and right Riemann sums. The approximation becomes more accurate as
- the resolution of the partition increases.
-
- Arguments:
- y (Tensor): Values to use when computing the trapezoidal rule.
- x (Tensor): If specified, defines spacing between values as specified above.
-
- Keyword arguments:
- dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx`
- are specified then this defaults to 1. Effectively multiplies the result by its value.
- dim (int): The dimension along which to compute the trapezoidal rule.
- The last (inner-most) dimension by default.
-
- Examples::
-
- >>> # Computes the trapezoidal rule in 1D, spacing is implicitly 1
- >>> y = torch.tensor([1, 5, 10])
- >>> torch.trapezoid(y)
- tensor(10.5)
-
- >>> # Computes the same trapezoidal rule directly to verify
- >>> (1 + 10 + 10) / 2
- 10.5
-
- >>> # Computes the trapezoidal rule in 1D with constant spacing of 2
- >>> # NOTE: the result is the same as before, but multiplied by 2
- >>> torch.trapezoid(y, dx=2)
- 21.0
-
- >>> # Computes the trapezoidal rule in 1D with arbitrary spacing
- >>> x = torch.tensor([1, 3, 6])
- >>> torch.trapezoid(y, x)
- 28.5
-
- >>> # Computes the same trapezoidal rule directly to verify
- >>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2
- 28.5
-
- >>> # Computes the trapezoidal rule for each row of a 3x3 matrix
- >>> y = torch.arange(9).reshape(3, 3)
- tensor([[0, 1, 2],
- [3, 4, 5],
- [6, 7, 8]])
- >>> torch.trapezoid(y)
- tensor([ 2., 8., 14.])
-
- >>> # Computes the trapezoidal rule for each column of the matrix
- >>> torch.trapezoid(y, dim=0)
- tensor([ 6., 8., 10.])
-
- >>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix
- >>> # with the same arbitrary spacing
- >>> y = torch.ones(3, 3)
- >>> x = torch.tensor([1, 3, 6])
- >>> torch.trapezoid(y, x)
- array([5., 5., 5.])
-
- >>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix
- >>> # with different arbitrary spacing per row
- >>> y = torch.ones(3, 3)
- >>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]])
- >>> torch.trapezoid(y, x)
- array([2., 4., 6.])
- """
- ...
- @overload
- def trapz(y: Tensor, *, dx: _float = 1, dim: _int = -1) -> Tensor:
- r"""
- trapz(y, x, *, dim=-1) -> Tensor
-
- Alias for :func:`torch.trapezoid`.
- """
- ...
- @overload
- def trapz(y: Tensor, x: Tensor, *, dim: _int = -1) -> Tensor:
- r"""
- trapz(y, x, *, dim=-1) -> Tensor
-
- Alias for :func:`torch.trapezoid`.
- """
- ...
- def triangular_solve(input: Tensor, A: Tensor, upper: _bool = True, transpose: _bool = False, unitriangular: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.triangular_solve:
- r"""
- triangular_solve(b, A, upper=True, transpose=False, unitriangular=False, *, out=None) -> (Tensor, Tensor)
-
- Solves a system of equations with a square upper or lower triangular invertible matrix :math:`A`
- and multiple right-hand sides :math:`b`.
-
- In symbols, it solves :math:`AX = b` and assumes :math:`A` is square upper-triangular
- (or lower-triangular if :attr:`upper`\ `= False`) and does not have zeros on the diagonal.
-
- `torch.triangular_solve(b, A)` can take in 2D inputs `b, A` or inputs that are
- batches of 2D matrices. If the inputs are batches, then returns
- batched outputs `X`
-
- If the diagonal of :attr:`A` contains zeros or elements that are very close to zero and
- :attr:`unitriangular`\ `= False` (default) or if the input matrix is badly conditioned,
- the result may contain `NaN` s.
-
- Supports input of float, double, cfloat and cdouble data types.
-
- .. warning::
-
- :func:`torch.triangular_solve` is deprecated in favor of :func:`torch.linalg.solve_triangular`
- and will be removed in a future PyTorch release.
- :func:`torch.linalg.solve_triangular` has its arguments reversed and does not return a
- copy of one of the inputs.
-
- ``X = torch.triangular_solve(B, A).solution`` should be replaced with
-
- .. code:: python
-
- X = torch.linalg.solve_triangular(A, B)
-
- Args:
- b (Tensor): multiple right-hand sides of size :math:`(*, m, k)` where
- :math:`*` is zero of more batch dimensions
- A (Tensor): the input triangular coefficient matrix of size :math:`(*, m, m)`
- where :math:`*` is zero or more batch dimensions
- upper (bool, optional): whether :math:`A` is upper or lower triangular. Default: ``True``.
- transpose (bool, optional): solves `op(A)X = b` where `op(A) = A^T` if this flag is ``True``,
- and `op(A) = A` if it is ``False``. Default: ``False``.
- unitriangular (bool, optional): whether :math:`A` is unit triangular.
- If True, the diagonal elements of :math:`A` are assumed to be
- 1 and not referenced from :math:`A`. Default: ``False``.
-
- Keyword args:
- out ((Tensor, Tensor), optional): tuple of two tensors to write
- the output to. Ignored if `None`. Default: `None`.
-
- Returns:
- A namedtuple `(solution, cloned_coefficient)` where `cloned_coefficient`
- is a clone of :math:`A` and `solution` is the solution :math:`X` to :math:`AX = b`
- (or whatever variant of the system of equations, depending on the keyword arguments.)
-
- Examples::
-
- >>> A = torch.randn(2, 2).triu()
- >>> A
- tensor([[ 1.1527, -1.0753],
- [ 0.0000, 0.7986]])
- >>> b = torch.randn(2, 3)
- >>> b
- tensor([[-0.0210, 2.3513, -1.5492],
- [ 1.5429, 0.7403, -1.0243]])
- >>> torch.triangular_solve(b, A)
- torch.return_types.triangular_solve(
- solution=tensor([[ 1.7841, 2.9046, -2.5405],
- [ 1.9320, 0.9270, -1.2826]]),
- cloned_coefficient=tensor([[ 1.1527, -1.0753],
- [ 0.0000, 0.7986]]))
- """
- ...
- def tril(input: Tensor, diagonal: _int = 0, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- tril(input, diagonal=0, *, out=None) -> Tensor
-
- Returns the lower triangular part of the matrix (2-D tensor) or batch of matrices
- :attr:`input`, the other elements of the result tensor :attr:`out` are set to 0.
-
- The lower triangular part of the matrix is defined as the elements on and
- below the diagonal.
-
- The argument :attr:`diagonal` controls which diagonal to consider. If
- :attr:`diagonal` = 0, all elements on and below the main diagonal are
- retained. A positive value includes just as many diagonals above the main
- diagonal, and similarly a negative value excludes just as many diagonals below
- the main diagonal. The main diagonal are the set of indices
- :math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where
- :math:`d_{1}, d_{2}` are the dimensions of the matrix.
-
- Args:
- input (Tensor): the input tensor.
- diagonal (int, optional): the diagonal to consider
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(3, 3)
- >>> a
- tensor([[-1.0813, -0.8619, 0.7105],
- [ 0.0935, 0.1380, 2.2112],
- [-0.3409, -0.9828, 0.0289]])
- >>> torch.tril(a)
- tensor([[-1.0813, 0.0000, 0.0000],
- [ 0.0935, 0.1380, 0.0000],
- [-0.3409, -0.9828, 0.0289]])
-
- >>> b = torch.randn(4, 6)
- >>> b
- tensor([[ 1.2219, 0.5653, -0.2521, -0.2345, 1.2544, 0.3461],
- [ 0.4785, -0.4477, 0.6049, 0.6368, 0.8775, 0.7145],
- [ 1.1502, 3.2716, -1.1243, -0.5413, 0.3615, 0.6864],
- [-0.0614, -0.7344, -1.3164, -0.7648, -1.4024, 0.0978]])
- >>> torch.tril(b, diagonal=1)
- tensor([[ 1.2219, 0.5653, 0.0000, 0.0000, 0.0000, 0.0000],
- [ 0.4785, -0.4477, 0.6049, 0.0000, 0.0000, 0.0000],
- [ 1.1502, 3.2716, -1.1243, -0.5413, 0.0000, 0.0000],
- [-0.0614, -0.7344, -1.3164, -0.7648, -1.4024, 0.0000]])
- >>> torch.tril(b, diagonal=-1)
- tensor([[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
- [ 0.4785, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
- [ 1.1502, 3.2716, 0.0000, 0.0000, 0.0000, 0.0000],
- [-0.0614, -0.7344, -1.3164, 0.0000, 0.0000, 0.0000]])
- """
- ...
- def tril_indices(row: _int, col: _int, offset: _int = 0, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- tril_indices(row, col, offset=0, *, dtype=torch.long, device='cpu', layout=torch.strided) -> Tensor
-
- Returns the indices of the lower triangular part of a :attr:`row`-by-
- :attr:`col` matrix in a 2-by-N Tensor, where the first row contains row
- coordinates of all indices and the second row contains column coordinates.
- Indices are ordered based on rows and then columns.
-
- The lower triangular part of the matrix is defined as the elements on and
- below the diagonal.
-
- The argument :attr:`offset` controls which diagonal to consider. If
- :attr:`offset` = 0, all elements on and below the main diagonal are
- retained. A positive value includes just as many diagonals above the main
- diagonal, and similarly a negative value excludes just as many diagonals below
- the main diagonal. The main diagonal are the set of indices
- :math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]`
- where :math:`d_{1}, d_{2}` are the dimensions of the matrix.
-
- .. note::
- When running on CUDA, ``row * col`` must be less than :math:`2^{59}` to
- prevent overflow during calculation.
-
- Args:
- row (``int``): number of rows in the 2-D matrix.
- col (``int``): number of columns in the 2-D matrix.
- offset (``int``): diagonal offset from the main diagonal.
- Default: if not provided, 0.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, ``torch.long``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- layout (:class:`torch.layout`, optional): currently only support ``torch.strided``.
-
- Example::
-
- >>> a = torch.tril_indices(3, 3)
- >>> a
- tensor([[0, 1, 1, 2, 2, 2],
- [0, 0, 1, 0, 1, 2]])
-
- >>> a = torch.tril_indices(4, 3, -1)
- >>> a
- tensor([[1, 2, 2, 3, 3, 3],
- [0, 0, 1, 0, 1, 2]])
-
- >>> a = torch.tril_indices(4, 3, 1)
- >>> a
- tensor([[0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3],
- [0, 1, 0, 1, 2, 0, 1, 2, 0, 1, 2]])
- """
- ...
- def triplet_margin_loss(anchor: Tensor, positive: Tensor, negative: Tensor, margin: _float = 1.0, p: _float = 2, eps: _float = 1e-06, swap: _bool = False, reduction: _int = 1) -> Tensor: ...
- def triu(input: Tensor, diagonal: _int = 0, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- triu(input, diagonal=0, *, out=None) -> Tensor
-
- Returns the upper triangular part of a matrix (2-D tensor) or batch of matrices
- :attr:`input`, the other elements of the result tensor :attr:`out` are set to 0.
-
- The upper triangular part of the matrix is defined as the elements on and
- above the diagonal.
-
- The argument :attr:`diagonal` controls which diagonal to consider. If
- :attr:`diagonal` = 0, all elements on and above the main diagonal are
- retained. A positive value excludes just as many diagonals above the main
- diagonal, and similarly a negative value includes just as many diagonals below
- the main diagonal. The main diagonal are the set of indices
- :math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where
- :math:`d_{1}, d_{2}` are the dimensions of the matrix.
-
- Args:
- input (Tensor): the input tensor.
- diagonal (int, optional): the diagonal to consider
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(3, 3)
- >>> a
- tensor([[ 0.2309, 0.5207, 2.0049],
- [ 0.2072, -1.0680, 0.6602],
- [ 0.3480, -0.5211, -0.4573]])
- >>> torch.triu(a)
- tensor([[ 0.2309, 0.5207, 2.0049],
- [ 0.0000, -1.0680, 0.6602],
- [ 0.0000, 0.0000, -0.4573]])
- >>> torch.triu(a, diagonal=1)
- tensor([[ 0.0000, 0.5207, 2.0049],
- [ 0.0000, 0.0000, 0.6602],
- [ 0.0000, 0.0000, 0.0000]])
- >>> torch.triu(a, diagonal=-1)
- tensor([[ 0.2309, 0.5207, 2.0049],
- [ 0.2072, -1.0680, 0.6602],
- [ 0.0000, -0.5211, -0.4573]])
-
- >>> b = torch.randn(4, 6)
- >>> b
- tensor([[ 0.5876, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235],
- [-0.2447, 0.9556, -1.2919, 1.3378, -0.1768, -1.0857],
- [ 0.4333, 0.3146, 0.6576, -1.0432, 0.9348, -0.4410],
- [-0.9888, 1.0679, -1.3337, -1.6556, 0.4798, 0.2830]])
- >>> torch.triu(b, diagonal=1)
- tensor([[ 0.0000, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235],
- [ 0.0000, 0.0000, -1.2919, 1.3378, -0.1768, -1.0857],
- [ 0.0000, 0.0000, 0.0000, -1.0432, 0.9348, -0.4410],
- [ 0.0000, 0.0000, 0.0000, 0.0000, 0.4798, 0.2830]])
- >>> torch.triu(b, diagonal=-1)
- tensor([[ 0.5876, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235],
- [-0.2447, 0.9556, -1.2919, 1.3378, -0.1768, -1.0857],
- [ 0.0000, 0.3146, 0.6576, -1.0432, 0.9348, -0.4410],
- [ 0.0000, 0.0000, -1.3337, -1.6556, 0.4798, 0.2830]])
- """
- ...
- def triu_indices(row: _int, col: _int, offset: _int = 0, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- triu_indices(row, col, offset=0, *, dtype=torch.long, device='cpu', layout=torch.strided) -> Tensor
-
- Returns the indices of the upper triangular part of a :attr:`row` by
- :attr:`col` matrix in a 2-by-N Tensor, where the first row contains row
- coordinates of all indices and the second row contains column coordinates.
- Indices are ordered based on rows and then columns.
-
- The upper triangular part of the matrix is defined as the elements on and
- above the diagonal.
-
- The argument :attr:`offset` controls which diagonal to consider. If
- :attr:`offset` = 0, all elements on and above the main diagonal are
- retained. A positive value excludes just as many diagonals above the main
- diagonal, and similarly a negative value includes just as many diagonals below
- the main diagonal. The main diagonal are the set of indices
- :math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]`
- where :math:`d_{1}, d_{2}` are the dimensions of the matrix.
-
- .. note::
- When running on CUDA, ``row * col`` must be less than :math:`2^{59}` to
- prevent overflow during calculation.
-
- Args:
- row (``int``): number of rows in the 2-D matrix.
- col (``int``): number of columns in the 2-D matrix.
- offset (``int``): diagonal offset from the main diagonal.
- Default: if not provided, 0.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, ``torch.long``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- layout (:class:`torch.layout`, optional): currently only support ``torch.strided``.
-
- Example::
-
- >>> a = torch.triu_indices(3, 3)
- >>> a
- tensor([[0, 0, 0, 1, 1, 2],
- [0, 1, 2, 1, 2, 2]])
-
- >>> a = torch.triu_indices(4, 3, -1)
- >>> a
- tensor([[0, 0, 0, 1, 1, 1, 2, 2, 3],
- [0, 1, 2, 0, 1, 2, 1, 2, 2]])
-
- >>> a = torch.triu_indices(4, 3, 1)
- >>> a
- tensor([[0, 0, 1],
- [1, 2, 2]])
- """
- ...
- def true_divide(input: Union[Tensor, Number], other: Union[Tensor, Number], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- true_divide(dividend, divisor, *, out) -> Tensor
-
- Alias for :func:`torch.div` with ``rounding_mode=None``.
- """
- ...
- def trunc(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- trunc(input, *, out=None) -> Tensor
-
- Returns a new tensor with the truncated integer values of
- the elements of :attr:`input`.
-
- For integer inputs, follows the array-api convention of returning a
- copy of the input tensor.
-
- Args:
- input (Tensor): the input tensor.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.randn(4)
- >>> a
- tensor([ 3.4742, 0.5466, -0.8008, -0.9079])
- >>> torch.trunc(a)
- tensor([ 3., 0., -0., -0.])
- """
- ...
- def trunc_(input: Tensor) -> Tensor: ...
- @overload
- def unbind(input: Tensor, dim: _int = 0) -> Tuple[Tensor, ...]:
- r"""
- unbind(input, dim=0) -> seq
-
- Removes a tensor dimension.
-
- Returns a tuple of all slices along a given dimension, already without it.
-
- Arguments:
- input (Tensor): the tensor to unbind
- dim (int): dimension to remove
-
- Example::
-
- >>> torch.unbind(torch.tensor([[1, 2, 3],
- >>> [4, 5, 6],
- >>> [7, 8, 9]]))
- (tensor([1, 2, 3]), tensor([4, 5, 6]), tensor([7, 8, 9]))
- """
- ...
- @overload
- def unbind(input: Tensor, dim: Union[str, ellipsis, None]) -> Tuple[Tensor, ...]:
- r"""
- unbind(input, dim=0) -> seq
-
- Removes a tensor dimension.
-
- Returns a tuple of all slices along a given dimension, already without it.
-
- Arguments:
- input (Tensor): the tensor to unbind
- dim (int): dimension to remove
-
- Example::
-
- >>> torch.unbind(torch.tensor([[1, 2, 3],
- >>> [4, 5, 6],
- >>> [7, 8, 9]]))
- (tensor([1, 2, 3]), tensor([4, 5, 6]), tensor([7, 8, 9]))
- """
- ...
- def unbind_copy(input: Tensor, dim: _int = 0, *, out: Union[Tuple[Tensor, ...], List[Tensor], None] = None) -> None:
- r"""
- Performs the same operation as :func:`torch.unbind`, but all output tensors
- are freshly created instead of aliasing the input.
- """
- ...
- @overload
- def unflatten(input: Tensor, dim: Union[str, ellipsis, None], sizes: Sequence[Union[_int, SymInt]], names: Sequence[Union[str, ellipsis, None]]) -> Tensor:
- r"""
- unflatten(input, dim, sizes) -> Tensor
-
- Expands a dimension of the input tensor over multiple dimensions.
-
- .. seealso::
-
- :func:`torch.flatten` the inverse of this function. It coalesces several dimensions into one.
-
- Args:
- input (Tensor): the input tensor.
- dim (int): Dimension to be unflattened, specified as an index into
- ``input.shape``.
- sizes (Tuple[int]): New shape of the unflattened dimension.
- One of its elements can be `-1` in which case the corresponding output
- dimension is inferred. Otherwise, the product of ``sizes`` *must*
- equal ``input.shape[dim]``.
-
- Returns:
- A View of input with the specified dimension unflattened.
-
- Examples::
- >>> torch.unflatten(torch.randn(3, 4, 1), 1, (2, 2)).shape
- torch.Size([3, 2, 2, 1])
- >>> torch.unflatten(torch.randn(3, 4, 1), 1, (-1, 2)).shape
- torch.Size([3, 2, 2, 1])
- >>> torch.unflatten(torch.randn(5, 12, 3), -2, (2, 2, 3, 1, 1)).shape
- torch.Size([5, 2, 2, 3, 1, 1, 3])
- """
- ...
- @overload
- def unflatten(input: Tensor, dim: _int, sizes: Sequence[Union[_int, SymInt]]) -> Tensor:
- r"""
- unflatten(input, dim, sizes) -> Tensor
-
- Expands a dimension of the input tensor over multiple dimensions.
-
- .. seealso::
-
- :func:`torch.flatten` the inverse of this function. It coalesces several dimensions into one.
-
- Args:
- input (Tensor): the input tensor.
- dim (int): Dimension to be unflattened, specified as an index into
- ``input.shape``.
- sizes (Tuple[int]): New shape of the unflattened dimension.
- One of its elements can be `-1` in which case the corresponding output
- dimension is inferred. Otherwise, the product of ``sizes`` *must*
- equal ``input.shape[dim]``.
-
- Returns:
- A View of input with the specified dimension unflattened.
-
- Examples::
- >>> torch.unflatten(torch.randn(3, 4, 1), 1, (2, 2)).shape
- torch.Size([3, 2, 2, 1])
- >>> torch.unflatten(torch.randn(3, 4, 1), 1, (-1, 2)).shape
- torch.Size([3, 2, 2, 1])
- >>> torch.unflatten(torch.randn(5, 12, 3), -2, (2, 2, 3, 1, 1)).shape
- torch.Size([5, 2, 2, 3, 1, 1, 3])
- """
- ...
- def unfold_copy(input: Tensor, dimension: _int, size: _int, step: _int, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- Performs the same operation as :func:`torch.unfold`, but all output tensors
- are freshly created instead of aliasing the input.
- """
- ...
- def unique_dim(input: Tensor, dim: _int, sorted: _bool = True, return_inverse: _bool = False, return_counts: _bool = False) -> Tuple[Tensor, Tensor, Tensor]: ...
- def unsafe_chunk(input: Tensor, chunks: _int, dim: _int = 0) -> Tuple[Tensor, ...]:
- r"""
- unsafe_chunk(input, chunks, dim=0) -> List of Tensors
-
- Works like :func:`torch.chunk` but without enforcing the autograd restrictions
- on inplace modification of the outputs.
-
- .. warning::
- This function is safe to use as long as only the input, or only the outputs
- are modified inplace after calling this function. It is user's
- responsibility to ensure that is the case. If both the input and one or more
- of the outputs are modified inplace, gradients computed by autograd will be
- silently incorrect.
- """
- ...
- def unsafe_split(input: Tensor, split_size: Union[_int, SymInt], dim: _int = 0) -> Tuple[Tensor, ...]:
- r"""
- unsafe_split(tensor, split_size_or_sections, dim=0) -> List of Tensors
-
- Works like :func:`torch.split` but without enforcing the autograd restrictions
- on inplace modification of the outputs.
-
- .. warning::
- This function is safe to use as long as only the input, or only the outputs
- are modified inplace after calling this function. It is user's
- responsibility to ensure that is the case. If both the input and one or more
- of the outputs are modified inplace, gradients computed by autograd will be
- silently incorrect.
- """
- ...
- def unsafe_split_with_sizes(input: Tensor, split_sizes: Sequence[Union[_int, SymInt]], dim: _int = 0) -> Tuple[Tensor, ...]: ...
- def unsqueeze(input: Tensor, dim: _int) -> Tensor:
- r"""
- unsqueeze(input, dim) -> Tensor
-
- Returns a new tensor with a dimension of size one inserted at the
- specified position.
-
- The returned tensor shares the same underlying data with this tensor.
-
- A :attr:`dim` value within the range ``[-input.dim() - 1, input.dim() + 1)``
- can be used. Negative :attr:`dim` will correspond to :meth:`unsqueeze`
- applied at :attr:`dim` = ``dim + input.dim() + 1``.
-
- Args:
- input (Tensor): the input tensor.
- dim (int): the index at which to insert the singleton dimension
-
- Example::
-
- >>> x = torch.tensor([1, 2, 3, 4])
- >>> torch.unsqueeze(x, 0)
- tensor([[ 1, 2, 3, 4]])
- >>> torch.unsqueeze(x, 1)
- tensor([[ 1],
- [ 2],
- [ 3],
- [ 4]])
- """
- ...
- def unsqueeze_copy(input: Tensor, dim: _int, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- Performs the same operation as :func:`torch.unsqueeze`, but all output tensors
- are freshly created instead of aliasing the input.
- """
- ...
- def values_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- Performs the same operation as :func:`torch.values`, but all output tensors
- are freshly created instead of aliasing the input.
- """
- ...
- def vander(x: Tensor, N: Optional[_int] = None, increasing: _bool = False) -> Tensor:
- r"""
- vander(x, N=None, increasing=False) -> Tensor
-
- Generates a Vandermonde matrix.
-
- The columns of the output matrix are elementwise powers of the input vector :math:`x^{(N-1)}, x^{(N-2)}, ..., x^0`.
- If increasing is True, the order of the columns is reversed :math:`x^0, x^1, ..., x^{(N-1)}`. Such a
- matrix with a geometric progression in each row is named for Alexandre-Theophile Vandermonde.
-
- Arguments:
- x (Tensor): 1-D input tensor.
- N (int, optional): Number of columns in the output. If N is not specified,
- a square array is returned :math:`(N = len(x))`.
- increasing (bool, optional): Order of the powers of the columns. If True,
- the powers increase from left to right, if False (the default) they are reversed.
-
- Returns:
- Tensor: Vandermonde matrix. If increasing is False, the first column is :math:`x^{(N-1)}`,
- the second :math:`x^{(N-2)}` and so forth. If increasing is True, the columns
- are :math:`x^0, x^1, ..., x^{(N-1)}`.
-
- Example::
-
- >>> x = torch.tensor([1, 2, 3, 5])
- >>> torch.vander(x)
- tensor([[ 1, 1, 1, 1],
- [ 8, 4, 2, 1],
- [ 27, 9, 3, 1],
- [125, 25, 5, 1]])
- >>> torch.vander(x, N=3)
- tensor([[ 1, 1, 1],
- [ 4, 2, 1],
- [ 9, 3, 1],
- [25, 5, 1]])
- >>> torch.vander(x, N=3, increasing=True)
- tensor([[ 1, 1, 1],
- [ 1, 2, 4],
- [ 1, 3, 9],
- [ 1, 5, 25]])
- """
- ...
- @overload
- def var(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- var(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
-
- Calculates the variance over the dimensions specified by :attr:`dim`. :attr:`dim`
- can be a single dimension, list of dimensions, or ``None`` to reduce over all
- dimensions.
-
- The variance (:math:`\sigma^2`) is calculated as
-
- .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
-
- where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
- sample mean, :math:`N` is the number of samples and :math:`\delta N` is
- the :attr:`correction`.
-
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
-
- dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
- If ``None``, all dimensions are reduced.
-
-
- Keyword args:
- correction (int): difference between the sample size and sample degrees of freedom.
- Defaults to `Bessel's correction`_, ``correction=1``.
-
- .. versionchanged:: 2.0
- Previously this argument was called ``unbiased`` and was a boolean
- with ``True`` corresponding to ``correction=1`` and ``False`` being
- ``correction=0``.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
- out (Tensor, optional): the output tensor.
-
- Example:
-
- >>> a = torch.tensor(
- ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
- ... [ 1.5027, -0.3270, 0.5905, 0.6538],
- ... [-1.5745, 1.3330, -0.5596, -0.6548],
- ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
- >>> torch.var(a, dim=1, keepdim=True)
- tensor([[1.0631],
- [0.5590],
- [1.4893],
- [0.8258]])
-
- .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
- """
- ...
- @overload
- def var(input: Tensor, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False, out: Optional[Tensor] = None) -> Tensor:
- r"""
- var(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
-
- Calculates the variance over the dimensions specified by :attr:`dim`. :attr:`dim`
- can be a single dimension, list of dimensions, or ``None`` to reduce over all
- dimensions.
-
- The variance (:math:`\sigma^2`) is calculated as
-
- .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
-
- where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
- sample mean, :math:`N` is the number of samples and :math:`\delta N` is
- the :attr:`correction`.
-
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
-
- dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
- If ``None``, all dimensions are reduced.
-
-
- Keyword args:
- correction (int): difference between the sample size and sample degrees of freedom.
- Defaults to `Bessel's correction`_, ``correction=1``.
-
- .. versionchanged:: 2.0
- Previously this argument was called ``unbiased`` and was a boolean
- with ``True`` corresponding to ``correction=1`` and ``False`` being
- ``correction=0``.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
- out (Tensor, optional): the output tensor.
-
- Example:
-
- >>> a = torch.tensor(
- ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
- ... [ 1.5027, -0.3270, 0.5905, 0.6538],
- ... [-1.5745, 1.3330, -0.5596, -0.6548],
- ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
- >>> torch.var(a, dim=1, keepdim=True)
- tensor([[1.0631],
- [0.5590],
- [1.4893],
- [0.8258]])
-
- .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
- """
- ...
- @overload
- def var(input: Tensor, unbiased: _bool = True) -> Tensor:
- r"""
- var(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
-
- Calculates the variance over the dimensions specified by :attr:`dim`. :attr:`dim`
- can be a single dimension, list of dimensions, or ``None`` to reduce over all
- dimensions.
-
- The variance (:math:`\sigma^2`) is calculated as
-
- .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
-
- where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
- sample mean, :math:`N` is the number of samples and :math:`\delta N` is
- the :attr:`correction`.
-
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
-
- dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
- If ``None``, all dimensions are reduced.
-
-
- Keyword args:
- correction (int): difference between the sample size and sample degrees of freedom.
- Defaults to `Bessel's correction`_, ``correction=1``.
-
- .. versionchanged:: 2.0
- Previously this argument was called ``unbiased`` and was a boolean
- with ``True`` corresponding to ``correction=1`` and ``False`` being
- ``correction=0``.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
- out (Tensor, optional): the output tensor.
-
- Example:
-
- >>> a = torch.tensor(
- ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
- ... [ 1.5027, -0.3270, 0.5905, 0.6538],
- ... [-1.5745, 1.3330, -0.5596, -0.6548],
- ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
- >>> torch.var(a, dim=1, keepdim=True)
- tensor([[1.0631],
- [0.5590],
- [1.4893],
- [0.8258]])
-
- .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
- """
- ...
- @overload
- def var(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False, out: Optional[Tensor] = None) -> Tensor:
- r"""
- var(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
-
- Calculates the variance over the dimensions specified by :attr:`dim`. :attr:`dim`
- can be a single dimension, list of dimensions, or ``None`` to reduce over all
- dimensions.
-
- The variance (:math:`\sigma^2`) is calculated as
-
- .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
-
- where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
- sample mean, :math:`N` is the number of samples and :math:`\delta N` is
- the :attr:`correction`.
-
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
-
- dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
- If ``None``, all dimensions are reduced.
-
-
- Keyword args:
- correction (int): difference between the sample size and sample degrees of freedom.
- Defaults to `Bessel's correction`_, ``correction=1``.
-
- .. versionchanged:: 2.0
- Previously this argument was called ``unbiased`` and was a boolean
- with ``True`` corresponding to ``correction=1`` and ``False`` being
- ``correction=0``.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
- out (Tensor, optional): the output tensor.
-
- Example:
-
- >>> a = torch.tensor(
- ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
- ... [ 1.5027, -0.3270, 0.5905, 0.6538],
- ... [-1.5745, 1.3330, -0.5596, -0.6548],
- ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
- >>> torch.var(a, dim=1, keepdim=True)
- tensor([[1.0631],
- [0.5590],
- [1.4893],
- [0.8258]])
-
- .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
- """
- ...
- @overload
- def var(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- var(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
-
- Calculates the variance over the dimensions specified by :attr:`dim`. :attr:`dim`
- can be a single dimension, list of dimensions, or ``None`` to reduce over all
- dimensions.
-
- The variance (:math:`\sigma^2`) is calculated as
-
- .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
-
- where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
- sample mean, :math:`N` is the number of samples and :math:`\delta N` is
- the :attr:`correction`.
-
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
-
- dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
- If ``None``, all dimensions are reduced.
-
-
- Keyword args:
- correction (int): difference between the sample size and sample degrees of freedom.
- Defaults to `Bessel's correction`_, ``correction=1``.
-
- .. versionchanged:: 2.0
- Previously this argument was called ``unbiased`` and was a boolean
- with ``True`` corresponding to ``correction=1`` and ``False`` being
- ``correction=0``.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
- out (Tensor, optional): the output tensor.
-
- Example:
-
- >>> a = torch.tensor(
- ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
- ... [ 1.5027, -0.3270, 0.5905, 0.6538],
- ... [-1.5745, 1.3330, -0.5596, -0.6548],
- ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
- >>> torch.var(a, dim=1, keepdim=True)
- tensor([[1.0631],
- [0.5590],
- [1.4893],
- [0.8258]])
-
- .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
- """
- ...
- @overload
- def var_mean(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False) -> Tuple[Tensor, Tensor]:
- r"""
- var_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
-
- Calculates the variance and mean over the dimensions specified by :attr:`dim`.
- :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
- reduce over all dimensions.
-
- The variance (:math:`\sigma^2`) is calculated as
-
- .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
-
- where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
- sample mean, :math:`N` is the number of samples and :math:`\delta N` is
- the :attr:`correction`.
-
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
-
- dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
- If ``None``, all dimensions are reduced.
-
-
- Keyword args:
- correction (int): difference between the sample size and sample degrees of freedom.
- Defaults to `Bessel's correction`_, ``correction=1``.
-
- .. versionchanged:: 2.0
- Previously this argument was called ``unbiased`` and was a boolean
- with ``True`` corresponding to ``correction=1`` and ``False`` being
- ``correction=0``.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
- out (Tensor, optional): the output tensor.
-
- Returns:
- A tuple (var, mean) containing the variance and mean.
-
- Example:
-
- >>> a = torch.tensor(
- ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
- ... [ 1.5027, -0.3270, 0.5905, 0.6538],
- ... [-1.5745, 1.3330, -0.5596, -0.6548],
- ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
- >>> torch.var_mean(a, dim=0, keepdim=True)
- (tensor([[1.5926, 1.0056, 1.2005, 0.3646]]),
- tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
-
- .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
- """
- ...
- @overload
- def var_mean(input: Tensor, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tuple[Tensor, Tensor]:
- r"""
- var_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
-
- Calculates the variance and mean over the dimensions specified by :attr:`dim`.
- :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
- reduce over all dimensions.
-
- The variance (:math:`\sigma^2`) is calculated as
-
- .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
-
- where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
- sample mean, :math:`N` is the number of samples and :math:`\delta N` is
- the :attr:`correction`.
-
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
-
- dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
- If ``None``, all dimensions are reduced.
-
-
- Keyword args:
- correction (int): difference between the sample size and sample degrees of freedom.
- Defaults to `Bessel's correction`_, ``correction=1``.
-
- .. versionchanged:: 2.0
- Previously this argument was called ``unbiased`` and was a boolean
- with ``True`` corresponding to ``correction=1`` and ``False`` being
- ``correction=0``.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
- out (Tensor, optional): the output tensor.
-
- Returns:
- A tuple (var, mean) containing the variance and mean.
-
- Example:
-
- >>> a = torch.tensor(
- ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
- ... [ 1.5027, -0.3270, 0.5905, 0.6538],
- ... [-1.5745, 1.3330, -0.5596, -0.6548],
- ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
- >>> torch.var_mean(a, dim=0, keepdim=True)
- (tensor([[1.5926, 1.0056, 1.2005, 0.3646]]),
- tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
-
- .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
- """
- ...
- @overload
- def var_mean(input: Tensor, unbiased: _bool = True) -> Tuple[Tensor, Tensor]:
- r"""
- var_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
-
- Calculates the variance and mean over the dimensions specified by :attr:`dim`.
- :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
- reduce over all dimensions.
-
- The variance (:math:`\sigma^2`) is calculated as
-
- .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
-
- where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
- sample mean, :math:`N` is the number of samples and :math:`\delta N` is
- the :attr:`correction`.
-
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
-
- dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
- If ``None``, all dimensions are reduced.
-
-
- Keyword args:
- correction (int): difference between the sample size and sample degrees of freedom.
- Defaults to `Bessel's correction`_, ``correction=1``.
-
- .. versionchanged:: 2.0
- Previously this argument was called ``unbiased`` and was a boolean
- with ``True`` corresponding to ``correction=1`` and ``False`` being
- ``correction=0``.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
- out (Tensor, optional): the output tensor.
-
- Returns:
- A tuple (var, mean) containing the variance and mean.
-
- Example:
-
- >>> a = torch.tensor(
- ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
- ... [ 1.5027, -0.3270, 0.5905, 0.6538],
- ... [-1.5745, 1.3330, -0.5596, -0.6548],
- ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
- >>> torch.var_mean(a, dim=0, keepdim=True)
- (tensor([[1.5926, 1.0056, 1.2005, 0.3646]]),
- tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
-
- .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
- """
- ...
- @overload
- def var_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tuple[Tensor, Tensor]:
- r"""
- var_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
-
- Calculates the variance and mean over the dimensions specified by :attr:`dim`.
- :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
- reduce over all dimensions.
-
- The variance (:math:`\sigma^2`) is calculated as
-
- .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
-
- where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
- sample mean, :math:`N` is the number of samples and :math:`\delta N` is
- the :attr:`correction`.
-
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
-
- dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
- If ``None``, all dimensions are reduced.
-
-
- Keyword args:
- correction (int): difference between the sample size and sample degrees of freedom.
- Defaults to `Bessel's correction`_, ``correction=1``.
-
- .. versionchanged:: 2.0
- Previously this argument was called ``unbiased`` and was a boolean
- with ``True`` corresponding to ``correction=1`` and ``False`` being
- ``correction=0``.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
- out (Tensor, optional): the output tensor.
-
- Returns:
- A tuple (var, mean) containing the variance and mean.
-
- Example:
-
- >>> a = torch.tensor(
- ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
- ... [ 1.5027, -0.3270, 0.5905, 0.6538],
- ... [-1.5745, 1.3330, -0.5596, -0.6548],
- ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
- >>> torch.var_mean(a, dim=0, keepdim=True)
- (tensor([[1.5926, 1.0056, 1.2005, 0.3646]]),
- tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
-
- .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
- """
- ...
- @overload
- def var_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False) -> Tuple[Tensor, Tensor]:
- r"""
- var_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
-
- Calculates the variance and mean over the dimensions specified by :attr:`dim`.
- :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
- reduce over all dimensions.
-
- The variance (:math:`\sigma^2`) is calculated as
-
- .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
-
- where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
- sample mean, :math:`N` is the number of samples and :math:`\delta N` is
- the :attr:`correction`.
-
-
-
- If :attr:`keepdim` is ``True``, the output tensor is of the same size
- as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
- Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
- output tensor having 1 (or ``len(dim)``) fewer dimension(s).
-
-
- Args:
- input (Tensor): the input tensor.
-
- dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
- If ``None``, all dimensions are reduced.
-
-
- Keyword args:
- correction (int): difference between the sample size and sample degrees of freedom.
- Defaults to `Bessel's correction`_, ``correction=1``.
-
- .. versionchanged:: 2.0
- Previously this argument was called ``unbiased`` and was a boolean
- with ``True`` corresponding to ``correction=1`` and ``False`` being
- ``correction=0``.
- keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
- out (Tensor, optional): the output tensor.
-
- Returns:
- A tuple (var, mean) containing the variance and mean.
-
- Example:
-
- >>> a = torch.tensor(
- ... [[ 0.2035, 1.2959, 1.8101, -0.4644],
- ... [ 1.5027, -0.3270, 0.5905, 0.6538],
- ... [-1.5745, 1.3330, -0.5596, -0.6548],
- ... [ 0.1264, -0.5080, 1.6420, 0.1992]])
- >>> torch.var_mean(a, dim=0, keepdim=True)
- (tensor([[1.5926, 1.0056, 1.2005, 0.3646]]),
- tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]]))
-
- .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
- """
- ...
- def vdot(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- vdot(input, other, *, out=None) -> Tensor
-
- Computes the dot product of two 1D vectors along a dimension.
-
- In symbols, this function computes
-
- .. math::
-
- \sum_{i=1}^n \overline{x_i}y_i.
-
- where :math:`\overline{x_i}` denotes the conjugate for complex
- vectors, and it is the identity for real vectors.
-
- .. note::
-
- Unlike NumPy's vdot, torch.vdot intentionally only supports computing the dot product
- of two 1D tensors with the same number of elements.
-
- .. seealso::
-
- :func:`torch.linalg.vecdot` computes the dot product of two batches of vectors along a dimension.
-
- Args:
- input (Tensor): first tensor in the dot product, must be 1D. Its conjugate is used if it's complex.
- other (Tensor): second tensor in the dot product, must be 1D.
-
- Keyword args:
-
- .. note:: out (Tensor, optional): the output tensor.
-
-
- Example::
-
- >>> torch.vdot(torch.tensor([2, 3]), torch.tensor([2, 1]))
- tensor(7)
- >>> a = torch.tensor((1 +2j, 3 - 1j))
- >>> b = torch.tensor((2 +1j, 4 - 0j))
- >>> torch.vdot(a, b)
- tensor([16.+1.j])
- >>> torch.vdot(b, a)
- tensor([16.-1.j])
- """
- ...
- def view_as_complex(input: Tensor) -> Tensor:
- r"""
- view_as_complex(input) -> Tensor
-
- Returns a view of :attr:`input` as a complex tensor. For an input complex
- tensor of :attr:`size` :math:`m1, m2, \dots, mi, 2`, this function returns a
- new complex tensor of :attr:`size` :math:`m1, m2, \dots, mi` where the last
- dimension of the input tensor is expected to represent the real and imaginary
- components of complex numbers.
-
- .. warning::
- :func:`view_as_complex` is only supported for tensors with
- :class:`torch.dtype` ``torch.float64`` and ``torch.float32``. The input is
- expected to have the last dimension of :attr:`size` 2. In addition, the
- tensor must have a `stride` of 1 for its last dimension. The strides of all
- other dimensions must be even numbers.
-
- Args:
- input (Tensor): the input tensor.
-
- Example::
-
- >>> x=torch.randn(4, 2)
- >>> x
- tensor([[ 1.6116, -0.5772],
- [-1.4606, -0.9120],
- [ 0.0786, -1.7497],
- [-0.6561, -1.6623]])
- >>> torch.view_as_complex(x)
- tensor([(1.6116-0.5772j), (-1.4606-0.9120j), (0.0786-1.7497j), (-0.6561-1.6623j)])
- """
- ...
- def view_as_complex_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- Performs the same operation as :func:`torch.view_as_complex`, but all output tensors
- are freshly created instead of aliasing the input.
- """
- ...
- def view_as_real(input: Tensor) -> Tensor:
- r"""
- view_as_real(input) -> Tensor
-
- Returns a view of :attr:`input` as a real tensor. For an input complex tensor of
- :attr:`size` :math:`m1, m2, \dots, mi`, this function returns a new
- real tensor of size :math:`m1, m2, \dots, mi, 2`, where the last dimension of size 2
- represents the real and imaginary components of complex numbers.
-
- .. warning::
- :func:`view_as_real` is only supported for tensors with ``complex dtypes``.
-
- Args:
- input (Tensor): the input tensor.
-
- Example::
-
- >>> x=torch.randn(4, dtype=torch.cfloat)
- >>> x
- tensor([(0.4737-0.3839j), (-0.2098-0.6699j), (0.3470-0.9451j), (-0.5174-1.3136j)])
- >>> torch.view_as_real(x)
- tensor([[ 0.4737, -0.3839],
- [-0.2098, -0.6699],
- [ 0.3470, -0.9451],
- [-0.5174, -1.3136]])
- """
- ...
- def view_as_real_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- Performs the same operation as :func:`torch.view_as_real`, but all output tensors
- are freshly created instead of aliasing the input.
- """
- ...
- @overload
- def view_copy(input: Tensor, dtype: _dtype, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- Performs the same operation as :func:`torch.view`, but all output tensors
- are freshly created instead of aliasing the input.
- """
- ...
- @overload
- def view_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- Performs the same operation as :func:`torch.view`, but all output tensors
- are freshly created instead of aliasing the input.
- """
- ...
- @overload
- def vsplit(input: Tensor, sections: _int) -> Tuple[Tensor, ...]:
- r"""
- vsplit(input, indices_or_sections) -> List of Tensors
-
- Splits :attr:`input`, a tensor with two or more dimensions, into multiple tensors
- vertically according to :attr:`indices_or_sections`. Each split is a view of
- :attr:`input`.
-
- This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=0)
- (the split dimension is 0), except that if :attr:`indices_or_sections` is an integer
- it must evenly divide the split dimension or a runtime error will be thrown.
-
- This function is based on NumPy's :func:`numpy.vsplit`.
-
- Args:
- input (Tensor): tensor to split.
- indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
-
- Example::
- >>> t = torch.arange(16.0).reshape(4,4)
- >>> t
- tensor([[ 0., 1., 2., 3.],
- [ 4., 5., 6., 7.],
- [ 8., 9., 10., 11.],
- [12., 13., 14., 15.]])
- >>> torch.vsplit(t, 2)
- (tensor([[0., 1., 2., 3.],
- [4., 5., 6., 7.]]),
- tensor([[ 8., 9., 10., 11.],
- [12., 13., 14., 15.]]))
- >>> torch.vsplit(t, [3, 6])
- (tensor([[ 0., 1., 2., 3.],
- [ 4., 5., 6., 7.],
- [ 8., 9., 10., 11.]]),
- tensor([[12., 13., 14., 15.]]),
- tensor([], size=(0, 4)))
- """
- ...
- @overload
- def vsplit(input: Tensor, indices: _size) -> Tuple[Tensor, ...]:
- r"""
- vsplit(input, indices_or_sections) -> List of Tensors
-
- Splits :attr:`input`, a tensor with two or more dimensions, into multiple tensors
- vertically according to :attr:`indices_or_sections`. Each split is a view of
- :attr:`input`.
-
- This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=0)
- (the split dimension is 0), except that if :attr:`indices_or_sections` is an integer
- it must evenly divide the split dimension or a runtime error will be thrown.
-
- This function is based on NumPy's :func:`numpy.vsplit`.
-
- Args:
- input (Tensor): tensor to split.
- indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
-
- Example::
- >>> t = torch.arange(16.0).reshape(4,4)
- >>> t
- tensor([[ 0., 1., 2., 3.],
- [ 4., 5., 6., 7.],
- [ 8., 9., 10., 11.],
- [12., 13., 14., 15.]])
- >>> torch.vsplit(t, 2)
- (tensor([[0., 1., 2., 3.],
- [4., 5., 6., 7.]]),
- tensor([[ 8., 9., 10., 11.],
- [12., 13., 14., 15.]]))
- >>> torch.vsplit(t, [3, 6])
- (tensor([[ 0., 1., 2., 3.],
- [ 4., 5., 6., 7.],
- [ 8., 9., 10., 11.]]),
- tensor([[12., 13., 14., 15.]]),
- tensor([], size=(0, 4)))
- """
- ...
- def vstack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- vstack(tensors, *, out=None) -> Tensor
-
- Stack tensors in sequence vertically (row wise).
-
- This is equivalent to concatenation along the first axis after all 1-D tensors have been reshaped by :func:`torch.atleast_2d`.
-
- Args:
- tensors (sequence of Tensors): sequence of tensors to concatenate
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Example::
-
- >>> a = torch.tensor([1, 2, 3])
- >>> b = torch.tensor([4, 5, 6])
- >>> torch.vstack((a,b))
- tensor([[1, 2, 3],
- [4, 5, 6]])
- >>> a = torch.tensor([[1],[2],[3]])
- >>> b = torch.tensor([[4],[5],[6]])
- >>> torch.vstack((a,b))
- tensor([[1],
- [2],
- [3],
- [4],
- [5],
- [6]])
- """
- ...
- @overload
- def where(condition: Tensor) -> Tuple[Tensor, ...]:
- r"""
- where(condition, input, other, *, out=None) -> Tensor
-
- Return a tensor of elements selected from either :attr:`input` or :attr:`other`, depending on :attr:`condition`.
-
- The operation is defined as:
-
- .. math::
- \text{out}_i = \begin{cases}
- \text{input}_i & \text{if } \text{condition}_i \\
- \text{other}_i & \text{otherwise} \\
- \end{cases}
-
- .. note::
- The tensors :attr:`condition`, :attr:`input`, :attr:`other` must be :ref:`broadcastable <broadcasting-semantics>`.
-
- Arguments:
- condition (BoolTensor): When True (nonzero), yield input, otherwise yield other
- input (Tensor or Scalar): value (if :attr:`input` is a scalar) or values selected at indices
- where :attr:`condition` is ``True``
- other (Tensor or Scalar): value (if :attr:`other` is a scalar) or values selected at indices
- where :attr:`condition` is ``False``
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Returns:
- Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`input`, :attr:`other`
-
- Example::
-
- >>> x = torch.randn(3, 2)
- >>> y = torch.ones(3, 2)
- >>> x
- tensor([[-0.4620, 0.3139],
- [ 0.3898, -0.7197],
- [ 0.0478, -0.1657]])
- >>> torch.where(x > 0, 1.0, 0.0)
- tensor([[0., 1.],
- [1., 0.],
- [1., 0.]])
- >>> torch.where(x > 0, x, y)
- tensor([[ 1.0000, 0.3139],
- [ 0.3898, 1.0000],
- [ 0.0478, 1.0000]])
- >>> x = torch.randn(2, 2, dtype=torch.double)
- >>> x
- tensor([[ 1.0779, 0.0383],
- [-0.8785, -1.1089]], dtype=torch.float64)
- >>> torch.where(x > 0, x, 0.)
- tensor([[1.0779, 0.0383],
- [0.0000, 0.0000]], dtype=torch.float64)
-
- .. function:: where(condition) -> tuple of LongTensor
- :noindex:
-
- ``torch.where(condition)`` is identical to
- ``torch.nonzero(condition, as_tuple=True)``.
-
- .. note::
- See also :func:`torch.nonzero`.
- """
- ...
- @overload
- def where(condition: Tensor, input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- where(condition, input, other, *, out=None) -> Tensor
-
- Return a tensor of elements selected from either :attr:`input` or :attr:`other`, depending on :attr:`condition`.
-
- The operation is defined as:
-
- .. math::
- \text{out}_i = \begin{cases}
- \text{input}_i & \text{if } \text{condition}_i \\
- \text{other}_i & \text{otherwise} \\
- \end{cases}
-
- .. note::
- The tensors :attr:`condition`, :attr:`input`, :attr:`other` must be :ref:`broadcastable <broadcasting-semantics>`.
-
- Arguments:
- condition (BoolTensor): When True (nonzero), yield input, otherwise yield other
- input (Tensor or Scalar): value (if :attr:`input` is a scalar) or values selected at indices
- where :attr:`condition` is ``True``
- other (Tensor or Scalar): value (if :attr:`other` is a scalar) or values selected at indices
- where :attr:`condition` is ``False``
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Returns:
- Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`input`, :attr:`other`
-
- Example::
-
- >>> x = torch.randn(3, 2)
- >>> y = torch.ones(3, 2)
- >>> x
- tensor([[-0.4620, 0.3139],
- [ 0.3898, -0.7197],
- [ 0.0478, -0.1657]])
- >>> torch.where(x > 0, 1.0, 0.0)
- tensor([[0., 1.],
- [1., 0.],
- [1., 0.]])
- >>> torch.where(x > 0, x, y)
- tensor([[ 1.0000, 0.3139],
- [ 0.3898, 1.0000],
- [ 0.0478, 1.0000]])
- >>> x = torch.randn(2, 2, dtype=torch.double)
- >>> x
- tensor([[ 1.0779, 0.0383],
- [-0.8785, -1.1089]], dtype=torch.float64)
- >>> torch.where(x > 0, x, 0.)
- tensor([[1.0779, 0.0383],
- [0.0000, 0.0000]], dtype=torch.float64)
-
- .. function:: where(condition) -> tuple of LongTensor
- :noindex:
-
- ``torch.where(condition)`` is identical to
- ``torch.nonzero(condition, as_tuple=True)``.
-
- .. note::
- See also :func:`torch.nonzero`.
- """
- ...
- @overload
- def where(condition: Tensor, self: Union[Number, _complex], other: Tensor) -> Tensor:
- r"""
- where(condition, input, other, *, out=None) -> Tensor
-
- Return a tensor of elements selected from either :attr:`input` or :attr:`other`, depending on :attr:`condition`.
-
- The operation is defined as:
-
- .. math::
- \text{out}_i = \begin{cases}
- \text{input}_i & \text{if } \text{condition}_i \\
- \text{other}_i & \text{otherwise} \\
- \end{cases}
-
- .. note::
- The tensors :attr:`condition`, :attr:`input`, :attr:`other` must be :ref:`broadcastable <broadcasting-semantics>`.
-
- Arguments:
- condition (BoolTensor): When True (nonzero), yield input, otherwise yield other
- input (Tensor or Scalar): value (if :attr:`input` is a scalar) or values selected at indices
- where :attr:`condition` is ``True``
- other (Tensor or Scalar): value (if :attr:`other` is a scalar) or values selected at indices
- where :attr:`condition` is ``False``
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Returns:
- Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`input`, :attr:`other`
-
- Example::
-
- >>> x = torch.randn(3, 2)
- >>> y = torch.ones(3, 2)
- >>> x
- tensor([[-0.4620, 0.3139],
- [ 0.3898, -0.7197],
- [ 0.0478, -0.1657]])
- >>> torch.where(x > 0, 1.0, 0.0)
- tensor([[0., 1.],
- [1., 0.],
- [1., 0.]])
- >>> torch.where(x > 0, x, y)
- tensor([[ 1.0000, 0.3139],
- [ 0.3898, 1.0000],
- [ 0.0478, 1.0000]])
- >>> x = torch.randn(2, 2, dtype=torch.double)
- >>> x
- tensor([[ 1.0779, 0.0383],
- [-0.8785, -1.1089]], dtype=torch.float64)
- >>> torch.where(x > 0, x, 0.)
- tensor([[1.0779, 0.0383],
- [0.0000, 0.0000]], dtype=torch.float64)
-
- .. function:: where(condition) -> tuple of LongTensor
- :noindex:
-
- ``torch.where(condition)`` is identical to
- ``torch.nonzero(condition, as_tuple=True)``.
-
- .. note::
- See also :func:`torch.nonzero`.
- """
- ...
- @overload
- def where(condition: Tensor, input: Tensor, other: Union[Number, _complex]) -> Tensor:
- r"""
- where(condition, input, other, *, out=None) -> Tensor
-
- Return a tensor of elements selected from either :attr:`input` or :attr:`other`, depending on :attr:`condition`.
-
- The operation is defined as:
-
- .. math::
- \text{out}_i = \begin{cases}
- \text{input}_i & \text{if } \text{condition}_i \\
- \text{other}_i & \text{otherwise} \\
- \end{cases}
-
- .. note::
- The tensors :attr:`condition`, :attr:`input`, :attr:`other` must be :ref:`broadcastable <broadcasting-semantics>`.
-
- Arguments:
- condition (BoolTensor): When True (nonzero), yield input, otherwise yield other
- input (Tensor or Scalar): value (if :attr:`input` is a scalar) or values selected at indices
- where :attr:`condition` is ``True``
- other (Tensor or Scalar): value (if :attr:`other` is a scalar) or values selected at indices
- where :attr:`condition` is ``False``
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Returns:
- Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`input`, :attr:`other`
-
- Example::
-
- >>> x = torch.randn(3, 2)
- >>> y = torch.ones(3, 2)
- >>> x
- tensor([[-0.4620, 0.3139],
- [ 0.3898, -0.7197],
- [ 0.0478, -0.1657]])
- >>> torch.where(x > 0, 1.0, 0.0)
- tensor([[0., 1.],
- [1., 0.],
- [1., 0.]])
- >>> torch.where(x > 0, x, y)
- tensor([[ 1.0000, 0.3139],
- [ 0.3898, 1.0000],
- [ 0.0478, 1.0000]])
- >>> x = torch.randn(2, 2, dtype=torch.double)
- >>> x
- tensor([[ 1.0779, 0.0383],
- [-0.8785, -1.1089]], dtype=torch.float64)
- >>> torch.where(x > 0, x, 0.)
- tensor([[1.0779, 0.0383],
- [0.0000, 0.0000]], dtype=torch.float64)
-
- .. function:: where(condition) -> tuple of LongTensor
- :noindex:
-
- ``torch.where(condition)`` is identical to
- ``torch.nonzero(condition, as_tuple=True)``.
-
- .. note::
- See also :func:`torch.nonzero`.
- """
- ...
- @overload
- def where(condition: Tensor, self: Union[Number, _complex], other: Union[Number, _complex]) -> Tensor:
- r"""
- where(condition, input, other, *, out=None) -> Tensor
-
- Return a tensor of elements selected from either :attr:`input` or :attr:`other`, depending on :attr:`condition`.
-
- The operation is defined as:
-
- .. math::
- \text{out}_i = \begin{cases}
- \text{input}_i & \text{if } \text{condition}_i \\
- \text{other}_i & \text{otherwise} \\
- \end{cases}
-
- .. note::
- The tensors :attr:`condition`, :attr:`input`, :attr:`other` must be :ref:`broadcastable <broadcasting-semantics>`.
-
- Arguments:
- condition (BoolTensor): When True (nonzero), yield input, otherwise yield other
- input (Tensor or Scalar): value (if :attr:`input` is a scalar) or values selected at indices
- where :attr:`condition` is ``True``
- other (Tensor or Scalar): value (if :attr:`other` is a scalar) or values selected at indices
- where :attr:`condition` is ``False``
-
- Keyword args:
- out (Tensor, optional): the output tensor.
-
- Returns:
- Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`input`, :attr:`other`
-
- Example::
-
- >>> x = torch.randn(3, 2)
- >>> y = torch.ones(3, 2)
- >>> x
- tensor([[-0.4620, 0.3139],
- [ 0.3898, -0.7197],
- [ 0.0478, -0.1657]])
- >>> torch.where(x > 0, 1.0, 0.0)
- tensor([[0., 1.],
- [1., 0.],
- [1., 0.]])
- >>> torch.where(x > 0, x, y)
- tensor([[ 1.0000, 0.3139],
- [ 0.3898, 1.0000],
- [ 0.0478, 1.0000]])
- >>> x = torch.randn(2, 2, dtype=torch.double)
- >>> x
- tensor([[ 1.0779, 0.0383],
- [-0.8785, -1.1089]], dtype=torch.float64)
- >>> torch.where(x > 0, x, 0.)
- tensor([[1.0779, 0.0383],
- [0.0000, 0.0000]], dtype=torch.float64)
-
- .. function:: where(condition) -> tuple of LongTensor
- :noindex:
-
- ``torch.where(condition)`` is identical to
- ``torch.nonzero(condition, as_tuple=True)``.
-
- .. note::
- See also :func:`torch.nonzero`.
- """
- ...
- @overload
- def xlogy(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- xlogy(input, other, *, out=None) -> Tensor
-
- Alias for :func:`torch.special.xlogy`.
- """
- ...
- @overload
- def xlogy(self: Union[Number, _complex], other: Tensor, *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- xlogy(input, other, *, out=None) -> Tensor
-
- Alias for :func:`torch.special.xlogy`.
- """
- ...
- @overload
- def xlogy(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor:
- r"""
- xlogy(input, other, *, out=None) -> Tensor
-
- Alias for :func:`torch.special.xlogy`.
- """
- ...
- @overload
- def xlogy_(input: Tensor, other: Tensor) -> Tensor: ...
- @overload
- def xlogy_(input: Tensor, other: Union[Number, _complex]) -> Tensor: ...
- def zero_(input: Tensor) -> Tensor: ...
- @overload
- def zeros(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- zeros(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Returns a tensor filled with the scalar value `0`, with the shape defined
- by the variable argument :attr:`size`.
-
- Args:
- size (int...): a sequence of integers defining the shape of the output tensor.
- Can be a variable number of arguments or a collection like a list or tuple.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Example::
-
- >>> torch.zeros(2, 3)
- tensor([[ 0., 0., 0.],
- [ 0., 0., 0.]])
-
- >>> torch.zeros(5)
- tensor([ 0., 0., 0., 0., 0.])
- """
- ...
- @overload
- def zeros(*size: _int, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- zeros(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Returns a tensor filled with the scalar value `0`, with the shape defined
- by the variable argument :attr:`size`.
-
- Args:
- size (int...): a sequence of integers defining the shape of the output tensor.
- Can be a variable number of arguments or a collection like a list or tuple.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Example::
-
- >>> torch.zeros(2, 3)
- tensor([[ 0., 0., 0.],
- [ 0., 0., 0.]])
-
- >>> torch.zeros(5)
- tensor([ 0., 0., 0., 0., 0.])
- """
- ...
- @overload
- def zeros(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- zeros(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Returns a tensor filled with the scalar value `0`, with the shape defined
- by the variable argument :attr:`size`.
-
- Args:
- size (int...): a sequence of integers defining the shape of the output tensor.
- Can be a variable number of arguments or a collection like a list or tuple.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Example::
-
- >>> torch.zeros(2, 3)
- tensor([[ 0., 0., 0.],
- [ 0., 0., 0.]])
-
- >>> torch.zeros(5)
- tensor([ 0., 0., 0., 0., 0.])
- """
- ...
- @overload
- def zeros(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- zeros(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
-
- Returns a tensor filled with the scalar value `0`, with the shape defined
- by the variable argument :attr:`size`.
-
- Args:
- size (int...): a sequence of integers defining the shape of the output tensor.
- Can be a variable number of arguments or a collection like a list or tuple.
-
- Keyword args:
- out (Tensor, optional): the output tensor.
- dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
- Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
- layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
- Default: ``torch.strided``.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, uses the current device for the default tensor type
- (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
- for CPU tensor types and the current CUDA device for CUDA tensor types.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
-
- Example::
-
- >>> torch.zeros(2, 3)
- tensor([[ 0., 0., 0.],
- [ 0., 0., 0.]])
-
- >>> torch.zeros(5)
- tensor([ 0., 0., 0., 0., 0.])
- """
- ...
- def zeros_like(input: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor:
- r"""
- zeros_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
-
- Returns a tensor filled with the scalar value `0`, with the same size as
- :attr:`input`. ``torch.zeros_like(input)`` is equivalent to
- ``torch.zeros(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
-
- .. warning::
- As of 0.4, this function does not support an :attr:`out` keyword. As an alternative,
- the old ``torch.zeros_like(input, out=output)`` is equivalent to
- ``torch.zeros(input.size(), out=output)``.
-
- Args:
- input (Tensor): the size of :attr:`input` will determine size of the output tensor.
-
- Keyword args:
- dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
- Default: if ``None``, defaults to the dtype of :attr:`input`.
- layout (:class:`torch.layout`, optional): the desired layout of returned tensor.
- Default: if ``None``, defaults to the layout of :attr:`input`.
- device (:class:`torch.device`, optional): the desired device of returned tensor.
- Default: if ``None``, defaults to the device of :attr:`input`.
- requires_grad (bool, optional): If autograd should record operations on the
- returned tensor. Default: ``False``.
- memory_format (:class:`torch.memory_format`, optional): the desired memory format of
- returned Tensor. Default: ``torch.preserve_format``.
-
- Example::
-
- >>> input = torch.empty(2, 3)
- >>> torch.zeros_like(input)
- tensor([[ 0., 0., 0.],
- [ 0., 0., 0.]])
- """
- ...
- __all__ = ['__and__', '__lshift__', '__or__', '__rshift__', '__xor__', '_adaptive_avg_pool2d',
- '_adaptive_avg_pool3d', '_add_batch_dim', '_add_relu', '_add_relu_', '_addmm_activation',
- '_aminmax', '_amp_foreach_non_finite_check_and_unscale_', '_amp_update_scale_', '_assert_async',
- '_assert_scalar', '_assert_tensor_metadata', '_batch_norm_impl_index', '_cast_Byte', '_cast_Char',
- '_cast_Double', '_cast_Float', '_cast_Half', '_cast_Int', '_cast_Long', '_cast_Short',
- '_choose_qparams_per_tensor', '_chunk_cat', '_coalesce', '_compute_linear_combination', '_conj',
- '_conj_copy', '_conj_physical', '_convert_indices_from_coo_to_csr',
- '_convert_indices_from_csr_to_coo', '_convert_weight_to_int4pack', '_convolution',
- '_convolution_mode', '_copy_from', '_copy_from_and_resize', '_cslt_compress', '_cslt_sparse_mm',
- '_cslt_sparse_mm_search', '_ctc_loss', '_cudnn_ctc_loss', '_cudnn_init_dropout_state',
- '_cudnn_rnn', '_cudnn_rnn_flatten_weight', '_cufft_clear_plan_cache',
- '_cufft_get_plan_cache_max_size', '_cufft_get_plan_cache_size', '_cufft_set_plan_cache_max_size',
- '_cummax_helper', '_cummin_helper', '_debug_has_internal_overlap', '_dim_arange',
- '_dirichlet_grad', '_disable_functionalization', '_efficientzerotensor', '_embedding_bag',
- '_embedding_bag_forward_only', '_empty_affine_quantized', '_empty_per_channel_affine_quantized',
- '_enable_functionalization', '_euclidean_dist', '_fake_quantize_learnable_per_channel_affine',
- '_fake_quantize_learnable_per_tensor_affine',
- '_fake_quantize_per_tensor_affine_cachemask_tensor_qparams',
- '_fake_quantize_per_tensor_affine_cachemask_tensor_qparams', '_fft_c2c', '_fft_c2r', '_fft_r2c',
- '_fill_mem_eff_dropout_mask_', '_foobar', '_foreach_abs', '_foreach_abs_', '_foreach_acos',
- '_foreach_acos_', '_foreach_add', '_foreach_add_', '_foreach_addcdiv', '_foreach_addcdiv_',
- '_foreach_addcmul', '_foreach_addcmul_', '_foreach_asin', '_foreach_asin_', '_foreach_atan',
- '_foreach_atan_', '_foreach_ceil', '_foreach_ceil_', '_foreach_clamp_max', '_foreach_clamp_max_',
- '_foreach_clamp_min', '_foreach_clamp_min_', '_foreach_copy_', '_foreach_cos', '_foreach_cos_',
- '_foreach_cosh', '_foreach_cosh_', '_foreach_div', '_foreach_div_', '_foreach_erf',
- '_foreach_erf_', '_foreach_erfc', '_foreach_erfc_', '_foreach_exp', '_foreach_exp_',
- '_foreach_expm1', '_foreach_expm1_', '_foreach_floor', '_foreach_floor_', '_foreach_frac',
- '_foreach_frac_', '_foreach_lerp', '_foreach_lerp_', '_foreach_lgamma', '_foreach_lgamma_',
- '_foreach_log', '_foreach_log10', '_foreach_log10_', '_foreach_log1p', '_foreach_log1p_',
- '_foreach_log2', '_foreach_log2_', '_foreach_log_', '_foreach_max', '_foreach_maximum',
- '_foreach_maximum_', '_foreach_minimum', '_foreach_minimum_', '_foreach_mul', '_foreach_mul_',
- '_foreach_neg', '_foreach_neg_', '_foreach_norm', '_foreach_pow', '_foreach_pow_',
- '_foreach_reciprocal', '_foreach_reciprocal_', '_foreach_round', '_foreach_round_',
- '_foreach_sigmoid', '_foreach_sigmoid_', '_foreach_sign', '_foreach_sign_', '_foreach_sin',
- '_foreach_sin_', '_foreach_sinh', '_foreach_sinh_', '_foreach_sqrt', '_foreach_sqrt_',
- '_foreach_sub', '_foreach_sub_', '_foreach_tan', '_foreach_tan_', '_foreach_tanh',
- '_foreach_tanh_', '_foreach_trunc', '_foreach_trunc_', '_foreach_zero_', '_from_functional_tensor',
- '_functional_assert_async', '_functional_assert_scalar', '_functional_sym_constrain_range',
- '_functional_sym_constrain_range_for_size', '_functionalize_apply_view_metas',
- '_functionalize_are_all_mutations_hidden_from_autograd',
- '_functionalize_are_all_mutations_under_no_grad_or_inference_mode', '_functionalize_commit_update',
- '_functionalize_has_metadata_mutation', '_functionalize_is_symbolic',
- '_functionalize_mark_mutation_hidden_from_autograd', '_functionalize_replace',
- '_functionalize_sync', '_functionalize_was_inductor_storage_resized',
- '_functionalize_was_storage_changed', '_fused_adagrad_', '_fused_adam_', '_fused_adamw_',
- '_fused_dropout', '_fused_moving_avg_obs_fq_helper', '_fused_moving_avg_obs_fq_helper',
- '_fused_sdp_choice', '_fused_sgd_', '_fw_primal_copy', '_grid_sampler_2d_cpu_fallback',
- '_has_compatible_shallow_copy_type', '_histogramdd_bin_edges', '_histogramdd_from_bin_cts',
- '_histogramdd_from_bin_tensors', '_index_put_impl_', '_indices_copy', '_int_mm', '_is_all_true',
- '_is_any_true', '_is_functional_tensor', '_is_zerotensor', '_lazy_clone', '_linalg_check_errors',
- '_linalg_det', '_linalg_det', '_linalg_eigh', '_linalg_eigh', '_linalg_slogdet', '_linalg_slogdet',
- '_linalg_solve_ex', '_linalg_solve_ex', '_linalg_svd', '_linalg_svd', '_log_softmax',
- '_log_softmax_backward_data', '_logcumsumexp', '_lstm_mps', '_lu_with_info', '_lu_with_info',
- '_make_dep_token', '_make_dual', '_make_dual_copy', '_make_per_channel_quantized_tensor',
- '_make_per_tensor_quantized_tensor', '_masked_scale', '_masked_softmax', '_mixed_dtypes_linear',
- '_mkldnn_reshape', '_mkldnn_transpose', '_mkldnn_transpose_', '_mps_convolution',
- '_mps_convolution_transpose', '_native_batch_norm_legit', '_native_batch_norm_legit_no_training',
- '_native_multi_head_attention', '_neg_view', '_neg_view_copy',
- '_nested_compute_contiguous_strides_offsets', '_nested_from_padded',
- '_nested_from_padded_and_nested_example', '_nested_get_jagged_dummy', '_nested_get_lengths',
- '_nested_get_offsets', '_nested_get_ragged_idx', '_nested_get_values', '_nested_get_values_copy',
- '_nested_tensor_from_mask', '_nested_tensor_from_mask_left_aligned',
- '_nested_tensor_from_tensor_list', '_nested_tensor_softmax_with_shape', '_nested_view_from_buffer',
- '_nested_view_from_buffer_copy', '_nested_view_from_jagged', '_nested_view_from_jagged_copy',
- '_nnpack_available', '_nnpack_spatial_convolution', '_pack_padded_sequence',
- '_pad_packed_sequence', '_pin_memory', '_prelu_kernel', '_print', '_propagate_xla_data',
- '_remove_batch_dim', '_reshape_alias_copy', '_reshape_from_tensor', '_resize_output_',
- '_rowwise_prune', '_sample_dirichlet', '_saturate_weight_to_fp16',
- '_scaled_dot_product_attention_math', '_scaled_dot_product_cudnn_attention',
- '_scaled_dot_product_cudnn_attention', '_scaled_dot_product_efficient_attention',
- '_scaled_dot_product_efficient_attention', '_scaled_dot_product_flash_attention',
- '_scaled_dot_product_flash_attention', '_scaled_dot_product_flash_attention_for_cpu',
- '_scaled_dot_product_flash_attention_for_cpu', '_scaled_mm', '_shape_as_tensor',
- '_sobol_engine_draw', '_sobol_engine_ff_', '_sobol_engine_initialize_state_',
- '_sobol_engine_scramble_', '_softmax', '_softmax_backward_data', '_sparse_broadcast_to',
- '_sparse_broadcast_to_copy', '_sparse_csr_prod', '_sparse_csr_sum',
- '_sparse_log_softmax_backward_data', '_sparse_semi_structured_addmm',
- '_sparse_semi_structured_apply', '_sparse_semi_structured_apply_dense',
- '_sparse_semi_structured_linear', '_sparse_semi_structured_mm', '_sparse_semi_structured_tile',
- '_sparse_softmax_backward_data', '_sparse_sparse_matmul', '_sparse_sum', '_stack',
- '_standard_gamma', '_standard_gamma_grad', '_sync', '_test_autograd_multiple_dispatch',
- '_test_autograd_multiple_dispatch_view', '_test_autograd_multiple_dispatch_view_copy',
- '_test_check_tensor', '_test_functorch_fallback', '_test_parallel_materialize',
- '_test_serialization_subcmul', '_to_cpu', '_to_functional_tensor', '_to_sparse_semi_structured',
- '_transform_bias_rescale_qkv', '_transformer_encoder_layer_fwd', '_trilinear',
- '_triton_multi_head_attention', '_triton_scaled_dot_attention', '_unique', '_unique2',
- '_unpack_dual', '_unpack_dual', '_unsafe_index', '_unsafe_index_put', '_use_cudnn_ctc_loss',
- '_use_cudnn_rnn_flatten_weight', '_validate_compressed_sparse_indices',
- '_validate_sparse_bsc_tensor_args', '_validate_sparse_bsr_tensor_args',
- '_validate_sparse_compressed_tensor_args', '_validate_sparse_coo_tensor_args',
- '_validate_sparse_csc_tensor_args', '_validate_sparse_csr_tensor_args', '_values_copy',
- '_weight_int4pack_mm', '_weight_int8pack_mm', '_weight_norm', '_weight_norm_interface', 'abs',
- 'abs_', 'absolute', 'acos', 'acos_', 'acosh', 'acosh_', 'adaptive_avg_pool1d',
- 'adaptive_max_pool1d', 'add', 'addbmm', 'addcdiv', 'addcmul', 'addmm', 'addmv', 'addmv_', 'addr',
- 'adjoint', 'affine_grid_generator', 'alias_copy', 'all', 'allclose', 'alpha_dropout',
- 'alpha_dropout_', 'amax', 'amin', 'aminmax', 'aminmax', 'angle', 'any', 'arange', 'arccos',
- 'arccos_', 'arccosh', 'arccosh_', 'arcsin', 'arcsin_', 'arcsinh', 'arcsinh_', 'arctan', 'arctan2',
- 'arctan_', 'arctanh', 'arctanh_', 'argmax', 'argmin', 'argsort', 'argwhere', 'as_strided',
- 'as_strided_', 'as_strided_copy', 'as_strided_scatter', 'as_tensor', 'asarray', 'asin', 'asin_',
- 'asinh', 'asinh_', 'atan', 'atan2', 'atan_', 'atanh', 'atanh_', 'avg_pool1d', 'baddbmm',
- 'bartlett_window', 'batch_norm', 'batch_norm_backward_elemt', 'batch_norm_backward_reduce',
- 'batch_norm_elemt', 'batch_norm_gather_stats', 'batch_norm_gather_stats_with_counts',
- 'batch_norm_stats', 'batch_norm_update_stats', 'bernoulli', 'bilinear',
- 'binary_cross_entropy_with_logits', 'bincount', 'binomial', 'bitwise_and', 'bitwise_left_shift',
- 'bitwise_not', 'bitwise_or', 'bitwise_right_shift', 'bitwise_xor', 'blackman_window', 'bmm',
- 'broadcast_to', 'bucketize', 'can_cast', 'cat', 'ccol_indices_copy', 'ceil', 'ceil_', 'celu',
- 'celu_', 'channel_shuffle', 'cholesky', 'cholesky_inverse', 'cholesky_solve',
- 'choose_qparams_optimized', 'chunk', 'clamp', 'clamp_', 'clamp_max', 'clamp_max_', 'clamp_min',
- 'clamp_min_', 'clip', 'clip_', 'clone', 'col_indices_copy', 'column_stack', 'combinations',
- 'complex', 'concat', 'concatenate', 'conj', 'conj_physical', 'conj_physical_', 'constant_pad_nd',
- 'conv1d', 'conv2d', 'conv3d', 'conv_tbc', 'conv_transpose1d', 'conv_transpose2d',
- 'conv_transpose3d', 'convolution', 'copysign', 'corrcoef', 'cos', 'cos_', 'cosh', 'cosh_',
- 'cosine_embedding_loss', 'cosine_similarity', 'count_nonzero', 'cov', 'cross', 'crow_indices_copy',
- 'ctc_loss', 'cudnn_affine_grid_generator', 'cudnn_batch_norm', 'cudnn_convolution',
- 'cudnn_convolution_add_relu', 'cudnn_convolution_relu', 'cudnn_convolution_transpose',
- 'cudnn_grid_sampler', 'cudnn_is_acceptable', 'cummax', 'cummax', 'cummin', 'cummin', 'cumprod',
- 'cumsum', 'cumulative_trapezoid', 'deg2rad', 'deg2rad_', 'dequantize', 'det', 'detach', 'detach_',
- 'detach_copy', 'diag', 'diag_embed', 'diagflat', 'diagonal', 'diagonal_copy', 'diagonal_scatter',
- 'diff', 'digamma', 'dist', 'div', 'divide', 'dot', 'dropout', 'dropout_', 'dsmm', 'dsplit',
- 'dstack', 'embedding', 'embedding_bag', 'embedding_renorm_', 'empty', 'empty_like',
- 'empty_permuted', 'empty_quantized', 'empty_strided', 'eq', 'equal', 'erf', 'erf_', 'erfc',
- 'erfc_', 'erfinv', 'exp', 'exp2', 'exp2_', 'exp_', 'expand_copy', 'expm1', 'expm1_', 'eye',
- 'fake_quantize_per_channel_affine', 'fake_quantize_per_tensor_affine', 'fbgemm_linear_fp16_weight',
- 'fbgemm_linear_fp16_weight_fp32_activation', 'fbgemm_linear_int8_weight',
- 'fbgemm_linear_int8_weight_fp32_activation', 'fbgemm_linear_quantize_weight',
- 'fbgemm_pack_gemm_matrix_fp16', 'fbgemm_pack_quantized_matrix', 'feature_alpha_dropout',
- 'feature_alpha_dropout_', 'feature_dropout', 'feature_dropout_', 'fill', 'fill_', 'fix', 'fix_',
- 'flatten', 'flip', 'fliplr', 'flipud', 'float_power', 'floor', 'floor_', 'floor_divide', 'fmax',
- 'fmin', 'fmod', 'frac', 'frac_', 'frexp', 'frexp', 'frobenius_norm', 'from_file', 'from_numpy',
- 'frombuffer', 'full', 'full_like', 'fused_moving_avg_obs_fake_quant', 'gather', 'gcd', 'gcd_',
- 'ge', 'geqrf', 'geqrf', 'ger', 'get_default_dtype', 'get_num_interop_threads', 'get_num_threads',
- 'gradient', 'greater', 'greater_equal', 'grid_sampler', 'grid_sampler_2d', 'grid_sampler_3d',
- 'group_norm', 'gru', 'gru_cell', 'gt', 'hamming_window', 'hann_window', 'hardshrink', 'heaviside',
- 'hinge_embedding_loss', 'histc', 'histogram', 'histogram', 'histogramdd', 'histogramdd', 'hsmm',
- 'hsplit', 'hspmm', 'hstack', 'hypot', 'i0', 'i0_', 'igamma', 'igammac', 'imag', 'index_add',
- 'index_copy', 'index_fill', 'index_put', 'index_put_', 'index_reduce', 'index_select',
- 'indices_copy', 'init_num_threads', 'inner', 'instance_norm', 'int_repr', 'inverse', 'is_complex',
- 'is_conj', 'is_distributed', 'is_floating_point', 'is_grad_enabled', 'is_inference',
- 'is_inference_mode_enabled', 'is_neg', 'is_nonzero', 'is_same_size', 'is_signed',
- 'is_vulkan_available', 'isclose', 'isfinite', 'isin', 'isinf', 'isnan', 'isneginf', 'isposinf',
- 'isreal', 'istft', 'kaiser_window', 'kl_div', 'kron', 'kthvalue', 'kthvalue', 'layer_norm', 'lcm',
- 'lcm_', 'ldexp', 'ldexp_', 'le', 'lerp', 'less', 'less_equal', 'lgamma', 'linspace', 'log',
- 'log10', 'log10_', 'log1p', 'log1p_', 'log2', 'log2_', 'log_', 'log_softmax', 'logaddexp',
- 'logaddexp2', 'logcumsumexp', 'logdet', 'logical_and', 'logical_not', 'logical_or', 'logical_xor',
- 'logit', 'logit_', 'logspace', 'logsumexp', 'lstm', 'lstm_cell', 'lt', 'lu_solve', 'lu_unpack',
- 'lu_unpack', 'margin_ranking_loss', 'masked_fill', 'masked_scatter', 'masked_select', 'matmul',
- 'matrix_exp', 'matrix_power', 'max', 'max', 'max_pool1d', 'max_pool1d_with_indices', 'max_pool2d',
- 'max_pool3d', 'maximum', 'mean', 'median', 'median', 'min', 'min', 'minimum', 'miopen_batch_norm',
- 'miopen_convolution', 'miopen_convolution_add_relu', 'miopen_convolution_relu',
- 'miopen_convolution_transpose', 'miopen_depthwise_convolution', 'miopen_rnn',
- 'mkldnn_adaptive_avg_pool2d', 'mkldnn_convolution', 'mkldnn_linear_backward_weights',
- 'mkldnn_max_pool2d', 'mkldnn_max_pool3d', 'mkldnn_rnn_layer', 'mm', 'mode', 'mode', 'moveaxis',
- 'movedim', 'msort', 'mul', 'multinomial', 'multiply', 'mv', 'mvlgamma', 'nan_to_num',
- 'nan_to_num_', 'nanmean', 'nanmedian', 'nanmedian', 'nanquantile', 'nansum', 'narrow',
- 'narrow_copy', 'native_batch_norm', 'native_channel_shuffle', 'native_dropout',
- 'native_group_norm', 'native_layer_norm', 'native_norm', 'ne', 'neg', 'neg_', 'negative',
- 'negative_', 'nextafter', 'nonzero', 'nonzero_static', 'norm_except_dim', 'normal', 'not_equal',
- 'nuclear_norm', 'numel', 'ones', 'ones_like', 'orgqr', 'ormqr', 'outer', 'pairwise_distance',
- 'pdist', 'permute', 'permute_copy', 'pinverse', 'pixel_shuffle', 'pixel_unshuffle', 'poisson',
- 'poisson_nll_loss', 'polar', 'polygamma', 'positive', 'pow', 'prelu', 'prod', 'promote_types',
- 'put', 'q_per_channel_axis', 'q_per_channel_scales', 'q_per_channel_zero_points', 'q_scale',
- 'q_zero_point', 'qr', 'qr', 'quantile', 'quantize_per_channel', 'quantize_per_tensor',
- 'quantize_per_tensor_dynamic', 'quantized_batch_norm', 'quantized_gru_cell', 'quantized_lstm_cell',
- 'quantized_max_pool1d', 'quantized_max_pool2d', 'quantized_max_pool3d', 'quantized_rnn_relu_cell',
- 'quantized_rnn_tanh_cell', 'rad2deg', 'rad2deg_', 'rand', 'rand_like', 'randint', 'randint_like',
- 'randn', 'randn_like', 'randperm', 'range', 'ravel', 'real', 'reciprocal', 'reciprocal_', 'relu',
- 'relu_', 'remainder', 'renorm', 'repeat_interleave', 'reshape', 'resize_as_', 'resize_as_sparse_',
- 'resolve_conj', 'resolve_neg', 'result_type', 'rms_norm', 'rnn_relu', 'rnn_relu_cell', 'rnn_tanh',
- 'rnn_tanh_cell', 'roll', 'rot90', 'round', 'round_', 'row_indices_copy', 'row_stack', 'rrelu',
- 'rrelu_', 'rsqrt', 'rsqrt_', 'rsub', 'saddmm', 'scalar_tensor', 'scatter', 'scatter_add',
- 'scatter_reduce', 'searchsorted', 'segment_reduce', 'select', 'select_copy', 'select_scatter',
- 'selu', 'selu_', 'set_flush_denormal', 'set_num_interop_threads', 'set_num_threads', 'sgn',
- 'sigmoid', 'sigmoid_', 'sign', 'signbit', 'sin', 'sin_', 'sinc', 'sinc_', 'sinh', 'sinh_',
- 'slice_copy', 'slice_inverse', 'slice_scatter', 'slogdet', 'slogdet', 'smm', 'softmax', 'sort',
- 'sort', 'sparse_bsc_tensor', 'sparse_bsr_tensor', 'sparse_compressed_tensor', 'sparse_coo_tensor',
- 'sparse_csc_tensor', 'sparse_csr_tensor', 'split_copy', 'split_with_sizes',
- 'split_with_sizes_copy', 'spmm', 'sqrt', 'sqrt_', 'square', 'square_', 'squeeze', 'squeeze_copy',
- 'sspaddmm', 'stack', 'std', 'std_mean', 'sub', 'subtract', 'sum', 'svd', 'svd', 'swapaxes',
- 'swapdims', 'sym_constrain_range', 'sym_constrain_range_for_size', 't', 't_copy', 'take',
- 'take_along_dim', 'tan', 'tan_', 'tanh', 'tanh_', 'tensor', 'tensor_split', 'threshold',
- 'threshold_', 'tile', 'topk', 'topk', 'trace', 'transpose', 'transpose_copy', 'trapezoid', 'trapz',
- 'triangular_solve', 'triangular_solve', 'tril', 'tril_indices', 'triplet_margin_loss', 'triu',
- 'triu_indices', 'true_divide', 'trunc', 'trunc_', 'unbind', 'unbind_copy', 'unflatten',
- 'unfold_copy', 'unique_dim', 'unsafe_chunk', 'unsafe_split', 'unsafe_split_with_sizes',
- 'unsqueeze', 'unsqueeze_copy', 'values_copy', 'vander', 'var', 'var_mean', 'vdot',
- 'view_as_complex', 'view_as_complex_copy', 'view_as_real', 'view_as_real_copy', 'view_copy',
- 'vsplit', 'vstack', 'where', 'xlogy', 'xlogy_', 'zero_', 'zeros', 'zeros_like']
|