| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769 |
- # mypy: allow-untyped-defs
- import atexit
- import collections
- import contextlib
- import copy
- import dataclasses
- import datetime
- import dis
- import enum
- import functools
- import gc
- import inspect
- import itertools
- import linecache
- import logging
- import math
- import operator
- import os
- import re
- import sys
- import textwrap
- import threading
- import time
- import types
- import typing
- import warnings
- import weakref
- from contextlib import contextmanager
- from functools import lru_cache, wraps
- from types import MethodWrapperType
- from typing import (
- Any,
- Callable,
- cast,
- ClassVar,
- Counter,
- DefaultDict,
- Deque,
- Dict,
- Iterator,
- KeysView,
- List,
- Optional,
- Set,
- Tuple,
- Type,
- Union,
- ValuesView,
- )
- from ..utils.hooks import RemovableHandle
- try:
- import numpy as np
- except ModuleNotFoundError:
- np = None # type: ignore[assignment]
- try:
- import torch._logging
- import torch._numpy as tnp
- from torch._guards import detect_fake_mode # noqa: F401n
- from torch._logging import LazyString
- from . import config
- # NOTE: Make sure `NP_SUPPORTED_MODULES` and `NP_TO_TNP_MODULE` are in sync.
- if np:
- NP_SUPPORTED_MODULES: Tuple[types.ModuleType, ...] = (
- np,
- np.fft,
- np.linalg,
- np.random,
- )
- NP_TO_TNP_MODULE = {
- np: tnp,
- np.fft: tnp.fft,
- np.linalg: tnp.linalg,
- np.random: tnp.random,
- }
- else:
- NP_SUPPORTED_MODULES = tuple()
- NP_TO_TNP_MODULE = {}
- from torch._subclasses.fake_tensor import FakeTensor, is_fake, maybe_get_fake_mode
- except ImportError:
- pass
- import importlib
- import torch
- import torch._functorch.config
- import torch.fx.experimental.symbolic_shapes
- import torch.utils._pytree as pytree
- from torch import fx
- from torch._dispatch.python import enable_python_dispatcher
- from torch._guards import TracingContext
- from torch._subclasses.meta_utils import is_sparse_compressed
- from torch._utils_internal import log_compilation_event
- from torch.fx._utils import _format_graph_code, lazy_format_graph_code
- from torch.nn.modules.lazy import LazyModuleMixin
- from torch.utils._triton import has_triton, has_triton_package
- counters: DefaultDict[str, Counter[str]] = collections.defaultdict(collections.Counter)
- optimus_scuba_log: Dict[str, Any] = {}
- troubleshooting_url = (
- "https://pytorch.org/docs/main/torch.compiler_troubleshooting.html"
- )
- nnmodule_doc_url = "https://pytorch.org/docs/main/torch.compiler_nn_module.html"
- nnmodule_doc_url_msg = f"See {nnmodule_doc_url} for more information and limitations."
- log = logging.getLogger(__name__)
- # profiling compilation time by function
- compilation_time_metrics: Dict[str, List[float]] = {}
- # profiling compilation time by frame phase
- frame_phase_timing: Dict[str, Dict[str, float]] = collections.defaultdict(
- lambda: collections.defaultdict(float)
- )
- timer_counter = itertools.count()
- def tabulate(rows, headers):
- try:
- import tabulate
- return tabulate.tabulate(rows, headers=headers)
- except ImportError:
- return "\n".join(
- ", ".join(map(str, row)) for row in itertools.chain([headers], rows)
- )
- curr_frame = 0
- # Note: Called for you by dynamo - you almost never ever want to invoke this yourself.
- def increment_frame():
- global curr_frame
- curr_frame = curr_frame + 1
- # Note: Called for you by dynamo - you almost never ever want to invoke this yourself.
- def reset_frame_count():
- global curr_frame
- frame_phase_timing.clear()
- compilation_time_metrics.clear()
- curr_frame = 0
- op_count = 0
- def increment_op_count(cnt):
- global op_count
- op_count += cnt
- # Calculate total time spent so far for each phase
- # For example, {'entire_frame_compile':8.574629999999999, 'backend_compile':5.26806}
- def calculate_time_spent():
- total = 0.0
- total_by_key = {}
- for timings in frame_phase_timing.values():
- for key, timing in timings.items():
- total += timing
- if key not in total_by_key:
- total_by_key[key] = timing
- else:
- total_by_key[key] += timing
- return total_by_key
- # Print a report of time spent so far
- # Ex:
- # TIMING:
- # entire_frame_compile:8.574629999999999
- # backend_compile:5.26806
- def print_time_report():
- total_by_key = calculate_time_spent()
- out = "TIMING:"
- for key, value in total_by_key.items():
- out = f"{out} {key}:{round(value, 5)}"
- print(out)
- def _add_time_spent(key, phase_name, time_spent):
- frame_phase_timing[key][phase_name] += time_spent
- # dynamo_timed API works as a function decorator
- # By wrapping a function in dynamo_timed, we can store a record in compilation_time_metrics
- # where the key is the functions name.
- # For example:
- #
- # @dynamo_timed
- # def _foo(...):
- #
- # Would show up as an entry in our timing dict:
- # OrderedDict([('bar.<locals>._foo', [0.083690, 0.23949, 3.1425e-05])])
- # This is extremely useful for granular debugging.
- #
- # For a higher-level mode, pass a phase_name into dynamo_timed
- # phase_names record an extra record into a separate compilation timing structure,
- # one keyed on frame+name rather than function.
- # The frame is incremented outside of this function, in def increment_frame() above.
- # `fwd_only` is used to identify if this phase or function is only called
- # during compiling fwd graphs, e.g, `entire_frame_compile` and `backend_compile`.
- # The other phases (`inductor_compile` and `code_gen`) are called for both fwd and bwd graphs.
- def dynamo_timed(original_function=None, phase_name=None, fwd_only=True):
- def dynamo_timed_inner(func):
- @wraps(func)
- def time_wrapper(*args, **kwargs):
- key = func.__qualname__
- if key not in compilation_time_metrics:
- compilation_time_metrics[key] = []
- fail_type: Optional[str] = None
- fail_reason: Optional[str] = None
- time_spent = float("-inf")
- try:
- with torch.profiler.record_function(f"{key} (dynamo_timed)"):
- t0 = time.time()
- r = func(*args, **kwargs)
- time_spent = time.time() - t0
- compilation_time_metrics[key].append(time_spent)
- except Exception as e:
- fail_type = str(type(e))
- fail_reason = str(e)
- raise
- finally:
- # Only record backward compilation metrics if phase_name is not None!
- if phase_name:
- frame_key = str(curr_frame)
- # fwd only compilation stages: entire_frame_compile, backend_compile.
- # use frame_key as time aggregation key.
- if fwd_only and fail_type is None:
- _add_time_spent(frame_key, phase_name, time_spent)
- else:
- # fwd + bwd compilation stages: inductor_compile, code_gen.
- # use frame_key as time aggregation key for fwd graphs;
- # use compile_id as time aggregation key for bwd graphs.
- if torch._guards.TracingContext.try_get() is not None:
- aot_graph_name = str(
- torch._guards.TracingContext.get().aot_graph_name
- )
- if (
- "forward" in aot_graph_name
- or "inference" in aot_graph_name
- ) and fail_type is None:
- _add_time_spent(frame_key, phase_name, time_spent)
- elif "backward" in aot_graph_name:
- compile_id = str(
- torch._guards.CompileContext.current_compile_id()
- )
- if fail_type is None:
- _add_time_spent(compile_id, phase_name, time_spent)
- # log backward compilation metrics at the end of `inductor_compile` of bwd graph,
- # one record for one bwd graph.
- if phase_name == "inductor_compile":
- if fail_type is None:
- inductor_compile_time = frame_phase_timing[
- compile_id
- ].get("inductor_compile", None)
- code_gen_time = frame_phase_timing[
- compile_id
- ].get("code_gen", None)
- else:
- inductor_compile_time = None
- code_gen_time = None
- metrics = BwdCompilationMetrics(
- compile_id,
- inductor_compile_time,
- code_gen_time,
- fail_type,
- fail_reason,
- )
- record_compilation_metrics(metrics)
- return r
- return time_wrapper
- if original_function:
- return dynamo_timed_inner(original_function)
- return dynamo_timed_inner
- def compile_times(repr="str", aggregate=False):
- """
- Get metrics about torchdynamo frontend/backend compilation times.
- Accumulates information from functions tagged with `@dynamo_timed`.
- repr='str' returns a printable string for user interaction, and 'csv'
- returns headers, rows which can be logged for output
- aggregate causes values from multiple compilations (e.g. split graphs)
- to be accumulated into one value. If false, expect more than one value
- per metric.
- """
- def fmt_fn(values, item_fn=lambda x: x):
- if aggregate:
- return item_fn(sum(values))
- return ", ".join(map(item_fn, values))
- if repr == "str":
- rows = [
- (k, fmt_fn(compilation_time_metrics[k], item_fn=lambda x: f"{x:.4f}"))
- for k in compilation_time_metrics
- ]
- out = "TorchDynamo compilation metrics:\n"
- out += tabulate(rows, headers=("Function", "Runtimes (s)"))
- return out
- elif repr == "csv":
- values = [
- fmt_fn(v, item_fn=lambda x: f"{x:.6f}")
- for v in compilation_time_metrics.values()
- ]
- headers = list(compilation_time_metrics.keys())
- return headers, values
- @atexit.register
- def dump_compile_times():
- log.info(compile_times(repr="str", aggregate=True))
- tensortype_to_dtype = {
- torch.FloatTensor: (torch.float32, torch.float),
- torch.DoubleTensor: (torch.float64, torch.double),
- torch.HalfTensor: (torch.float16, torch.half),
- torch.BFloat16Tensor: (torch.bfloat16,),
- torch.ByteTensor: (torch.uint8,),
- torch.CharTensor: (torch.int8,),
- torch.LongTensor: (torch.int64, torch.long),
- torch.IntTensor: (torch.int32, torch.int),
- torch.ShortTensor: (torch.int16, torch.short),
- torch.BoolTensor: (torch.bool,),
- }
- class DuplicateWarningChecker:
- def __init__(self, maxsize=4096):
- self.maxsize = maxsize
- self.reset()
- def reset(self):
- self.set = collections.OrderedDict()
- def add(self, key):
- if key in self.set:
- self.set.move_to_end(key, last=True)
- if not config.verbose:
- return False
- else:
- self.set[key] = None
- while len(self.set) > self.maxsize:
- self.set.popitem(last=False)
- return True
- graph_break_dup_warning_checker = DuplicateWarningChecker()
- def setup_compile_debug():
- compile_debug = os.environ.get("TORCH_COMPILE_DEBUG", "0") == "1"
- if compile_debug:
- return add_file_handler()
- return contextlib.ExitStack()
- def reset_graph_break_dup_checker():
- graph_break_dup_warning_checker.reset()
- def add_file_handler():
- log_path = os.path.join(get_debug_dir(), "torchdynamo")
- os.makedirs(log_path, exist_ok=True)
- log_file_handler = logging.FileHandler(os.path.join(log_path, "debug.log"))
- logger = logging.getLogger("torch._dynamo")
- logger.addHandler(log_file_handler)
- exitstack = contextlib.ExitStack()
- exitstack.callback(lambda: logger.removeHandler(log_file_handler))
- return exitstack
- def setup_log_file():
- exitstack = contextlib.ExitStack()
- if config.log_file_name is not None:
- log_file_handler = logging.FileHandler(config.log_file_name)
- for logger in torch._logging._internal.get_loggers():
- logger.addHandler(log_file_handler)
- exitstack.callback(lambda: logger.removeHandler(log_file_handler))
- return exitstack
- return exitstack
- def gen_record_file_name(exc, code):
- return f"{get_debug_dir()}/error_recordings/\
- {code.co_name}_{type(exc).__name__}_{code.co_firstlineno}.rec"
- def write_record_to_file(filename, exec_record):
- try:
- if os.path.exists(filename):
- log.warning(
- "Unable to write execution record %s; file already exists.", filename
- )
- else:
- os.makedirs(os.path.dirname(filename), exist_ok=True)
- with open(filename, "wb") as f:
- exec_record.dump(f)
- except Exception:
- log.exception("Unable to write execution record %s", filename)
- def count_calls(g: fx.Graph):
- c = 0
- for n in g.nodes:
- if "call" in n.op:
- c += 1
- return c
- def identity(x):
- return x
- def hashable(x):
- try:
- hash(x)
- return True
- except TypeError:
- return False
- # cannot hash writable memoryview object
- except ValueError:
- return False
- def nothing(*args, **kwargs):
- pass
- class ExactWeakKeyDictionary:
- """Similar to weakref.WeakKeyDictionary, but use `is`/`id` rather than `==` to compare equality"""
- def __init__(self):
- self.values = dict()
- self.refs = dict()
- def __getitem__(self, key):
- return self.values[id(key)]
- def get(self, key, default=None):
- return self.values.get(id(key), default)
- def __contains__(self, key):
- return id(key) in self.values
- def __setitem__(self, key, value):
- idx = id(key)
- if idx not in self.refs:
- self.refs[idx] = weakref.ref(key, lambda ref: self._remove_id(idx))
- self.values[idx] = value
- def _remove_id(self, idx):
- if idx in self.values:
- del self.values[idx]
- if idx in self.refs:
- del self.refs[idx]
- def clear(self):
- self.refs.clear()
- self.values.clear()
- def istype(obj, allowed_types):
- """isinstance() without subclasses"""
- if isinstance(allowed_types, (tuple, list, set)):
- return type(obj) in allowed_types
- return type(obj) is allowed_types
- if sys.version_info >= (3, 12):
- # Some typing classes moved to C in 3.12,
- # which no longer have the _Final mixin.
- _builtin_final_typing_classes = (
- typing.ParamSpecArgs,
- typing.ParamSpecKwargs,
- typing.ParamSpec,
- typing.TypeVar,
- typing.TypeVarTuple,
- typing.TypeAliasType,
- )
- def is_typing(value):
- # _Final catches most of typing classes:
- # - Any
- # - Callable
- # - Union
- # ...
- #
- # NB: we intentionally ignore classes that inherit from Generic, since they
- # can be used as both TypingVariable as well as UserDefinedClassVariable.
- if sys.version_info >= (3, 12) and isinstance(value, _builtin_final_typing_classes):
- return True
- return isinstance(value, typing._Final) or value is typing.Generic # type: ignore[attr-defined]
- def is_numpy_int_type(value):
- if not np:
- return False
- return istype(
- value,
- (
- np.int8,
- np.int16,
- np.int32,
- np.int64,
- np.uint8,
- np.uint16,
- np.uint32,
- np.uint64,
- ),
- )
- def is_numpy_float_type(value):
- if not np:
- return False
- return istype(
- value,
- (
- np.float16,
- np.float32,
- np.float64,
- ),
- )
- def is_function_or_wrapper(value):
- return (
- is_function(value)
- or isinstance(value, functools._lru_cache_wrapper)
- and is_function(inspect.getattr_static(value, "__wrapped__"))
- or isinstance(value, (torch._ops.OpOverloadPacket, torch._ops.OpOverload))
- )
- def is_function(value):
- return isinstance(
- value,
- (
- types.FunctionType,
- types.BuiltinFunctionType,
- types.MethodDescriptorType,
- types.WrapperDescriptorType,
- torch.jit.ScriptFunction,
- ),
- )
- def unwrap_if_wrapper(fn):
- return unwrap_with_attr_name_if_wrapper(fn)[0]
- def unwrap_with_attr_name_if_wrapper(fn):
- # unpack @functools.lru_cache wrapped function
- if isinstance(fn, functools._lru_cache_wrapper):
- fn = inspect.getattr_static(fn, "__wrapped__")
- attr_name = "__wrapped__"
- # unpack @torch._dynamo.optimize()(fn) wrapped function
- elif is_function(fn) and inspect.getattr_static(fn, "_torchdynamo_inline", False):
- fn = inspect.getattr_static(fn, "_torchdynamo_inline", fn)
- attr_name = "_torchdynamo_inline"
- # unpack torch.jit.script_if_tracing
- elif is_function(fn) and inspect.getattr_static(
- fn, "__script_if_tracing_wrapper", False
- ):
- fn = inspect.getattr_static(fn, "__original_fn", fn)
- attr_name = "__original_fn"
- else:
- attr_name = None
- return fn, attr_name
- def is_numpy_ndarray(value):
- if not np:
- return False
- return istype(value, np.ndarray)
- def istensor(obj):
- """Check of obj is a tensor"""
- tensor_list = (
- torch.Tensor,
- torch.nn.Parameter,
- *config.traceable_tensor_subclasses,
- )
- tensor_list = tensor_list + (torch._subclasses.FakeTensor,)
- return istype(obj, tensor_list)
- def is_lazy_module(mod):
- return isinstance(mod, LazyModuleMixin)
- @functools.lru_cache(4096)
- def print_once(*args):
- print(*args)
- def make_cell(val=None):
- """Some black magic to create a cell object that usually only exists in a closure"""
- x = val
- def f():
- return x
- assert f.__closure__ is not None and len(f.__closure__) == 1
- return f.__closure__[0]
- def proxy_args_kwargs(args, kwargs):
- try:
- proxy_args = tuple(arg.as_proxy() for arg in args)
- proxy_kwargs = {key: arg.as_proxy() for key, arg in kwargs.items()}
- return proxy_args, proxy_kwargs
- except NotImplementedError as e:
- from .exc import unimplemented
- from .variables.base import typestr
- unimplemented(
- f"call_function args: {typestr(*args)} {typestr(*list(kwargs.values()))}",
- from_exc=e,
- )
- @dataclasses.dataclass
- class CompilationMetrics:
- compile_id: str
- frame_key: str
- co_name: str
- co_filename: str
- co_firstlineno: int
- cache_size: int
- accumulated_cache_size: int
- guard_count: Optional[int]
- shape_env_guard_count: Optional[int]
- graph_op_count: Optional[int]
- graph_node_count: Optional[int]
- graph_input_count: Optional[int]
- start_time: float
- entire_frame_compile_time_s: Optional[float]
- backend_compile_time_s: Optional[float]
- inductor_compile_time_s: Optional[float]
- code_gen_time_s: Optional[float]
- fail_type: Optional[str]
- fail_reason: Optional[str]
- fail_user_frame_filename: Optional[str]
- fail_user_frame_lineno: Optional[int]
- non_compliant_ops: Set[str]
- compliant_custom_ops: Set[str]
- restart_reasons: Set[str]
- dynamo_time_before_restart_s: float
- # Sometimes, we will finish analyzing a frame but conclude we don't want
- # to install any guarded code. True means we actually decided to install
- # a compiled frame
- has_guarded_code: bool
- @dataclasses.dataclass
- class BwdCompilationMetrics:
- compile_id: str
- inductor_compile_time_s: Optional[float]
- code_gen_time_s: Optional[float]
- fail_type: Optional[str]
- fail_reason: Optional[str]
- DEFAULT_COMPILATION_METRICS_LIMIT = 64
- _compilation_metrics: Deque[
- Union[CompilationMetrics, BwdCompilationMetrics]
- ] = collections.deque(maxlen=DEFAULT_COMPILATION_METRICS_LIMIT)
- def record_compilation_metrics(
- compilation_metrics: Union[CompilationMetrics, BwdCompilationMetrics]
- ):
- global _compilation_metrics
- _compilation_metrics.append(compilation_metrics)
- if isinstance(compilation_metrics, CompilationMetrics):
- name = "compilation_metrics"
- else:
- name = "bwd_compilation_metrics"
- # Currently only record fwd compilation metrics, will add bwd compilation metrics
- # after the internal Scuba logging changes finish.
- if isinstance(compilation_metrics, CompilationMetrics):
- torch._logging.trace_structured(
- name,
- lambda: {
- k: list(v) if isinstance(v, set) else v
- for k, v in dataclasses.asdict(compilation_metrics).items()
- },
- )
- if config.log_compilation_metrics:
- log_compilation_event(compilation_metrics)
- def set_compilation_metrics_limit(new_size: int) -> None:
- global _compilation_metrics
- while len(_compilation_metrics) > new_size:
- _compilation_metrics.popleft()
- new_deque = collections.deque(_compilation_metrics, maxlen=new_size)
- _compilation_metrics = new_deque
- def clear_compilation_metrics() -> None:
- global _compilation_metrics
- _compilation_metrics.clear()
- def get_compilation_metrics() -> List[Union[CompilationMetrics, BwdCompilationMetrics]]:
- return list(_compilation_metrics)
- @dataclasses.dataclass
- class CleanupHook:
- """Remove a global variable when hook is called"""
- scope: Dict[str, Any]
- name: str
- def __call__(self, *args):
- # Make sure we're not shutting down
- if CleanupManager is not None:
- CleanupManager.count -= 1
- del self.scope[self.name]
- @staticmethod
- def create(scope, name, val):
- assert name not in scope
- CleanupManager.count += 1
- scope[name] = val
- return CleanupHook(scope, name)
- class CleanupManager(ExactWeakKeyDictionary):
- count = 0
- instance: ClassVar["CleanupManager"]
- def _remove_id(self, idx):
- for hook in self.values[idx]:
- hook()
- super()._remove_id(idx)
- CleanupManager.instance = CleanupManager()
- def clone_tensor(x):
- """Clone the tensor and its gradient"""
- y = x.clone().requires_grad_(x.requires_grad)
- if x.is_leaf and x.grad is not None:
- y.grad = x.grad.clone()
- return y
- def clone_input(x, *, dtype=None):
- """copy while preserving strides"""
- # TODO: this is questionable
- if is_fake(x):
- # this func fails on fake tensors in __torch_dispatch__
- return x
- def torch_clone(x):
- y = torch.clone(x)
- if x.is_leaf:
- y.requires_grad_(x.requires_grad)
- if x.is_leaf and x.grad is not None:
- y.grad = clone_input(x.grad, dtype=dtype)
- if hasattr(x, "_dynamo_dynamic_indices"):
- y._dynamo_dynamic_indices = x._dynamo_dynamic_indices.copy() # type: ignore[attr-defined]
- return y
- with torch.no_grad():
- if x.device.type == "xla":
- # Access data_ptr() for a xla tensor will cause crash
- return torch_clone(x)
- # Handle sparse storage (no stride).
- if x.layout is torch.sparse_coo:
- return torch.sparse_coo_tensor(
- torch_clone(x._indices()),
- torch_clone(x._values()),
- x.shape,
- is_coalesced=x.is_coalesced(),
- )
- elif is_sparse_compressed(x):
- if x.layout in {torch.sparse_csr, torch.sparse_bsr}:
- compressed_indices = x.crow_indices()
- plain_indices = x.col_indices()
- else:
- compressed_indices = x.ccol_indices()
- plain_indices = x.row_indices()
- return torch.sparse_compressed_tensor(
- torch_clone(compressed_indices),
- torch_clone(plain_indices),
- torch_clone(x.values()),
- x.shape,
- layout=x.layout,
- )
- needed_size = sum(
- (shape - 1) * stride for shape, stride in zip(x.size(), x.stride())
- )
- if x.is_quantized:
- result = torch.empty_quantized((needed_size + 32,), x)
- else:
- result = torch.empty(
- needed_size + 32, dtype=dtype or x.dtype, device=x.device
- )
- cache_line_offset = (
- (x.data_ptr() - result.data_ptr()) % 32
- ) // x.element_size()
- result.as_strided_(x.size(), x.stride(), cache_line_offset)
- try:
- result.copy_(x.clone())
- if x.is_leaf:
- result.requires_grad_(x.requires_grad)
- if x.is_leaf and x.grad is not None:
- result.grad = clone_input(x.grad, dtype=dtype)
- except RuntimeError:
- # RuntimeError: unsupported operation: more than one element of the written-to
- # tensor refers to a single memory location. Please clone() the tensor before
- # performing the operation.
- return torch_clone(x)
- if hasattr(x, "_dynamo_dynamic_indices"):
- result._dynamo_dynamic_indices = x._dynamo_dynamic_indices.copy() # type: ignore[attr-defined]
- return result
- def clone_inputs(example_inputs):
- res: Union[Dict[Any, Any], List[Any]]
- if type(example_inputs) is dict:
- res = dict(example_inputs)
- for key, value in res.items():
- if isinstance(value, tuple):
- res[key] = clone_inputs(value)
- else:
- assert isinstance(value, torch.Tensor), type(value)
- res[key] = clone_input(value)
- return res
- res = list(example_inputs)
- for i in range(len(res)):
- if isinstance(res[i], torch.Tensor):
- res[i] = clone_input(res[i])
- return res
- def skip_frame_if_in_functorch_mode(val: torch.Tensor):
- try:
- val.data_ptr() # will throw for functorch tensors
- except RuntimeError as e:
- from .exc import SkipFrame
- # This will be GradTrackingTensor/BatchedTensor/etc
- functorch_subclass_name = re.sub(r"\(.*", "", repr(val))
- raise SkipFrame(
- f"torch.compile cannot be run in context: {functorch_subclass_name}"
- ) from e
- @contextmanager
- def preserve_rng_state():
- disable_functorch = torch._C._DisableFuncTorch
- disable_current_modes = torch.utils._python_dispatch._disable_current_modes
- with disable_current_modes(), disable_functorch():
- rng_state = torch.clone(torch.random.get_rng_state())
- skip_frame_if_in_functorch_mode(rng_state)
- if torch.cuda.is_available():
- cuda_rng_state = torch.clone(torch.cuda.get_rng_state())
- try:
- yield
- finally:
- with torch.utils._python_dispatch._disable_current_modes():
- torch.random.set_rng_state(rng_state)
- if torch.cuda.is_available():
- torch.cuda.set_rng_state(cuda_rng_state) # type: ignore[possibly-undefined]
- def is_jit_model(model0):
- return isinstance(
- model0,
- (
- torch.jit._trace.TopLevelTracedModule,
- torch.jit._script.RecursiveScriptModule,
- torch.jit.ScriptFunction,
- torch.jit.ScriptModule,
- ),
- )
- def torchscript(model, example_inputs, verbose=False):
- if is_jit_model(model):
- # already done?
- return model
- try:
- return torch.jit.trace(model, example_inputs)
- except Exception:
- try:
- return torch.jit.script(model)
- except Exception:
- if verbose:
- log.exception("jit error")
- else:
- log.error("Both torch.jit.trace and torch.jit.script failed")
- return None
- def getfile(obj):
- try:
- return inspect.getfile(obj)
- except (TypeError, OSError):
- return None
- def is_namedtuple(obj):
- """Test if an object is a namedtuple or a torch.return_types.* quasi-namedtuple"""
- return is_namedtuple_cls(type(obj))
- def is_namedtuple_cls(cls):
- """Test if an object is a namedtuple or a (torch.return_types|torch.autograd.forward_ad).* quasi-namedtuple"""
- try:
- if issubclass(cls, tuple):
- bases = getattr(cls, "__bases__", []) or [None]
- module = getattr(cls, "__module__", None)
- return module in ("torch.return_types", "torch.autograd.forward_ad") or (
- bases[0] is tuple and hasattr(cls, "_make") and hasattr(cls, "_fields")
- )
- except TypeError:
- pass
- return False
- @functools.lru_cache(1)
- def namedtuple_fields(cls):
- """Get the fields of a namedtuple or a torch.return_types.* quasi-namedtuple"""
- if cls is slice:
- return ["start", "stop", "step"]
- assert issubclass(cls, tuple)
- if hasattr(cls, "_fields"):
- # normal namedtuples
- return cls._fields
- @dataclasses.dataclass
- class Marker:
- index: int
- # frustrating ones e.g. torch.return_types.max
- assert cls.__module__ == "torch.return_types"
- obj = cls(map(Marker, range(cls.n_fields)))
- fields: List[Optional[str]] = [None] * cls.n_fields
- for name in dir(obj):
- if name[0] != "_" and isinstance(getattr(obj, name), Marker):
- fields[getattr(obj, name).index] = name
- return fields
- def checkpoint_params(gm):
- with torch.no_grad():
- rng_state = torch.clone(torch.random.get_rng_state())
- if torch.cuda.is_available():
- cuda_rng_state = torch.clone(torch.cuda.get_rng_state())
- saved_state = []
- for param in itertools.chain(gm.parameters(), gm.buffers()):
- saved_state.append((param, param._version, torch.clone(param)))
- def restore():
- with torch.no_grad():
- torch.random.set_rng_state(rng_state)
- if torch.cuda.is_available():
- torch.cuda.set_rng_state(cuda_rng_state)
- for param, version, original_value in saved_state:
- if param._version != version:
- param.copy_(original_value)
- return restore
- def timed(model, example_inputs, times=1):
- if torch.cuda.is_available():
- synchronize = torch.cuda.synchronize
- else:
- synchronize = nothing
- synchronize()
- gc.collect()
- torch.manual_seed(1337)
- t0 = time.perf_counter()
- for _ in range(times):
- result = model(*example_inputs)
- synchronize()
- t1 = time.perf_counter()
- return result, t1 - t0 # type: ignore[possibly-undefined]
- def check_is_cuda(gm, example_inputs):
- return all(x.is_cuda for x in itertools.chain(example_inputs, gm.parameters(True)))
- @lru_cache(32)
- def rot_n_helper(n):
- assert n > 1
- vars = [f"v{i}" for i in range(n)]
- rotated = reversed(vars[-1:] + vars[:-1])
- fn = eval(f"lambda {','.join(vars)}: ({','.join(rotated)})")
- fn.__name__ = f"rot_{n}_helper"
- return fn
- common_constant_types = {
- int,
- float,
- complex,
- bool,
- str,
- bytes,
- type(None),
- Ellipsis.__class__,
- types.CodeType,
- torch.device,
- torch.dtype,
- torch.memory_format,
- torch.layout,
- }
- if has_triton_package():
- import triton
- common_constant_types.add(triton.language.dtype)
- def is_safe_constant(v):
- if istype(v, (tuple, frozenset)):
- return all(map(is_safe_constant, v))
- return isinstance(v, (enum.Enum, type)) or istype(
- v,
- common_constant_types | {slice},
- )
- def specialize_symnode(arg):
- from .variables import ConstantVariable, SymNodeVariable
- # Guard and specialize
- if isinstance(arg, SymNodeVariable):
- return ConstantVariable.create(arg.evaluate_expr())
- return arg
- def guard_if_dyn(arg):
- from .variables import ConstantVariable
- arg = specialize_symnode(arg)
- if isinstance(arg, ConstantVariable):
- return arg.as_python_constant()
- return arg
- def check_constant_args(args, kwargs):
- return all(x.is_python_constant() for x in itertools.chain(args, kwargs.values()))
- def check_unspec_python_args(args, kwargs):
- from .variables.constant import ConstantVariable
- from .variables.tensor import UnspecializedPythonVariable
- unspec_count = 0
- for x in itertools.chain(args, kwargs.values()):
- if isinstance(x, UnspecializedPythonVariable):
- unspec_count += 1
- elif not isinstance(x, ConstantVariable):
- return False
- return unspec_count > 0
- def check_unspec_or_constant_args(args, kwargs):
- # A fused version of:
- # return check_constant_args(args, kwargs) or check_unspec_python_args(args, kwargs)
- from .variables.tensor import UnspecializedPythonVariable
- for x in itertools.chain(args, kwargs.values()):
- if not (x.is_python_constant() or isinstance(x, UnspecializedPythonVariable)):
- return False
- return True
- def check_numpy_ndarray_args(args, kwargs):
- from .variables.tensor import NumpyNdarrayVariable
- return any(
- isinstance(x, NumpyNdarrayVariable)
- for x in itertools.chain(args, kwargs.values())
- )
- dict_keys: Type[KeysView[Any]] = type(dict().keys())
- dict_values: Type[ValuesView[Any]] = type(dict().values())
- odict_values: Type[ValuesView[Any]] = type(collections.OrderedDict().values())
- tuple_iterator: Type[Iterator[Any]] = type(iter(tuple()))
- tuple_iterator_len = tuple_iterator.__length_hint__ # type: ignore[attr-defined]
- object_new = object.__new__
- def nn_module_new(cls):
- obj = object_new(cls)
- torch.nn.Module.__init__(obj)
- return obj
- def product(it):
- return functools.reduce(operator.mul, it, 1)
- def tuple_iterator_getitem(it, index):
- _, (obj,), start = it.__reduce__()
- return obj[start + index]
- iter_next = next
- def to_subclass(t, cls):
- return t.as_subclass(cls)
- def dict_keys_getitem(d, n):
- return next(itertools.islice(iter(d), n, n + 1))
- def enum_repr(value, local):
- # enum class can override __str__ method. Use __class__ and name attribute
- # to extract the class name and key name.
- name = value.__class__.__name__
- val = value.name
- scope = "L" if local else "G"
- local_name = f'{scope}["{name}"].{val}'
- return local_name
- def set_example_value(node, example_value):
- # NB: example_value is a bit of a misnomer, because this is always a fake
- # tensor of some sort. Furthermore, these example values serve as the
- # runtime state of Dynamo tracing, which means if metadata mutation
- # occurs, the example_value gets directly updated (so you can't rely on
- # this to accurately reflect what the state of the value was at the time
- # the program was traced).
- node.meta["example_value"] = example_value
- shape_env = TracingContext.get().fake_mode.shape_env
- if symbol_to_path := torch.fx.experimental.symbolic_shapes.compute_unbacked_bindings(
- shape_env, example_value
- ):
- node.meta["unbacked_bindings"] = symbol_to_path
- def _get_fake_tensor(vt):
- fake_tensor = vt.as_proxy().node.meta.get("example_value")
- if not is_fake(fake_tensor):
- from .exc import unimplemented
- unimplemented("Cannot check Tensor object identity without its fake value")
- return fake_tensor
- def iter_contains(items, search, tx, check_tensor_identity=False):
- from .variables import (
- BuiltinVariable,
- ConstantVariable,
- TensorVariable,
- VariableTracker,
- )
- if search.is_python_constant():
- found_const = any(
- x.is_python_constant()
- and x.as_python_constant() == search.as_python_constant()
- for x in items
- )
- return ConstantVariable.create(found_const)
- must_check_tensor_id = False
- if check_tensor_identity and isinstance(search, TensorVariable):
- must_check_tensor_id = True
- # Match of Tensor means match of FakeTensor
- search = _get_fake_tensor(search)
- found: Optional[VariableTracker] = None
- for x in items:
- if must_check_tensor_id:
- if isinstance(x, TensorVariable):
- if search is _get_fake_tensor(x): # Object equivalence
- return ConstantVariable.create(True)
- else:
- check = BuiltinVariable(operator.eq).call_function(tx, [x, search], {})
- if found is None:
- found = check
- else:
- found = BuiltinVariable(operator.or_).call_function(
- tx, [check, found], {}
- )
- if found is None:
- found = ConstantVariable.create(False)
- return found
- def key_is_id(k):
- """Returns whether it indexes dictionaries using its id"""
- return isinstance(k, (torch.Tensor, torch.nn.Module, MethodWrapperType))
- def key_to_id(value):
- return [id(k) if key_is_id(k) else k for k in value.keys()]
- def const_repr(x, *, local) -> str:
- from .trace_rules import is_builtin_callable
- if isinstance(x, (list, tuple)):
- elems_repr = ",".join(const_repr(s, local=local) for s in x)
- if isinstance(x, list):
- return f"[{elems_repr}]"
- else:
- assert isinstance(x, tuple)
- if len(x) == 1:
- return f"({elems_repr},)"
- else:
- return f"({elems_repr})"
- elif isinstance(x, enum.Enum):
- # To workaround repr(Enum) returning invalid global reference before python 3.11
- # by calling enum_repr and removing quotes to render enum in guard code.
- return enum_repr(x, local=local).replace("'", "")
- elif is_builtin_callable(x):
- return x.__name__
- elif isinstance(x, type):
- def fullname(o):
- klass = o.__class__
- module = klass.__module__
- if module == "builtins":
- return klass.__qualname__ # avoid outputs like 'builtins.str'
- return module + "." + klass.__qualname__
- return fullname(x)
- else:
- return f"{x!r}"
- def dict_keys_repr(const_keys, *, local) -> str:
- keys_str = ",".join(const_repr(s, local=local) for s in const_keys)
- return "[" + keys_str + "]"
- GLOBAL_KEY_PREFIX = "__dict_key"
- from torch._subclasses import UnsupportedFakeTensorException # noqa: F401
- def wrap_fake_exception(fn):
- try:
- return fn()
- except UnsupportedFakeTensorException as e:
- from .exc import unimplemented
- msg = f"Unsupported: {e.reason} with fake tensor propagation."
- log.warning(msg)
- unimplemented(msg, from_exc=e)
- def deepcopy_to_fake_tensor(obj, fake_mode):
- with torch._subclasses.fake_tensor.FakeCopyMode(fake_mode):
- return wrap_fake_exception(lambda: copy.deepcopy(obj))
- def rmse(ref, res):
- """
- Calculate root mean squared error
- """
- return torch.sqrt(torch.mean(torch.square(ref - res)))
- def same(
- ref,
- res,
- fp64_ref=None,
- cos_similarity=False,
- tol=1e-4,
- equal_nan=False,
- exact_dtype=True,
- relax_numpy_equality=False,
- ignore_non_fp=False,
- log_error=log.error,
- ):
- """Check correctness to see if ref and res match"""
- if fp64_ref is None:
- fp64_ref = ref
- if isinstance(ref, (list, tuple, torch.nn.ParameterList, torch.Size)):
- assert isinstance(res, (list, tuple)), f"type mismatch {type(ref)} {type(res)}"
- if len(ref) != len(res):
- log_error("Length mismatch")
- return False
- return len(ref) == len(res) and all(
- same(
- ai,
- bi,
- fp64_refi,
- cos_similarity,
- tol,
- equal_nan,
- exact_dtype,
- relax_numpy_equality,
- ignore_non_fp,
- log_error=log_error,
- )
- for ai, bi, fp64_refi in zip(ref, res, fp64_ref)
- )
- elif type(ref).__name__ == "QuestionAnsweringModelOutput":
- # This skips checking accuracy for start_logits/end_logits.
- # Tentatively, start_logits/end_logits appear to be very prone to
- # inaccuracies and is somewhat subsumed by checking the loss.
- return same(
- ref.loss,
- res.loss,
- fp64_ref.loss,
- cos_similarity,
- tol,
- equal_nan,
- exact_dtype,
- relax_numpy_equality,
- ignore_non_fp,
- log_error=log_error,
- )
- elif isinstance(ref, dict):
- assert isinstance(res, dict)
- assert set(ref.keys()) == set(
- res.keys()
- ), f"keys mismatch {set(ref.keys())} == {set(res.keys())}"
- for k in sorted(ref.keys()):
- if not (
- same(
- ref[k],
- res[k],
- fp64_ref[k],
- cos_similarity=cos_similarity,
- tol=tol,
- equal_nan=equal_nan,
- exact_dtype=exact_dtype,
- relax_numpy_equality=relax_numpy_equality,
- ignore_non_fp=ignore_non_fp,
- log_error=log_error,
- )
- ):
- log_error("Accuracy failed for key name %s", k)
- return False
- return True
- elif isinstance(ref, (torch.Tensor, float)):
- assert not isinstance(ref, torch._subclasses.FakeTensor)
- assert not isinstance(res, torch._subclasses.FakeTensor)
- def to_tensor(t):
- return t if isinstance(t, torch.Tensor) else torch.tensor(t)
- ref, res, fp64_ref = (to_tensor(val) for val in (ref, res, fp64_ref))
- if ref.is_sparse:
- assert res.is_sparse
- ref = ref.to_dense()
- res = res.to_dense()
- assert isinstance(res, torch.Tensor), f"type mismatch {type(ref)} {type(res)}"
- if exact_dtype:
- if ref.dtype != res.dtype:
- log_error("dtype mismatch %s, %s", ref.dtype, res.dtype)
- return False
- if ref.dtype == torch.bool:
- if ignore_non_fp:
- return True
- # triton stores bool as int8, so add this for more accurate checking
- r = torch.allclose(
- ref.to(dtype=torch.uint8),
- res.to(dtype=torch.uint8),
- atol=tol,
- rtol=tol,
- equal_nan=equal_nan,
- )
- if not r:
- log_error("Accuracy failed: uint8 tensor did not match")
- return r
- if cos_similarity:
- ref = ref.flatten().to(torch.float32)
- res = res.flatten().to(torch.float32)
- if torch.allclose(ref, res, atol=tol, rtol=tol, equal_nan=True):
- # early exit that handles zero/nan better
- # cosine_similarity(zeros(10), zeros(10), dim=0) is 0
- return True
- score = torch.nn.functional.cosine_similarity(ref, res, dim=0, eps=1e-6)
- if score < 0.99:
- log.warning("Similarity score=%s", score.cpu().detach().item())
- return score >= 0.99
- else:
- if not exact_dtype:
- ref = ref.to(res.dtype)
- # First try usual allclose
- if torch.allclose(ref, res, atol=tol, rtol=tol, equal_nan=equal_nan):
- return True
- # Check error from fp64 version
- if fp64_ref.dtype == torch.float64:
- ref_error = rmse(fp64_ref, ref).item()
- # ref unable to produce this with stable numerics in this precision, ignore
- if math.isnan(ref_error):
- log.warning(
- "Found nan in reference. Consider running in higher precision."
- )
- res_error = rmse(fp64_ref, res).item()
- # In the case of using AMP (Automatic Mixed Precision), certain models have
- # failed the benchmark's correctness check. However, the end-to-end model's
- # accuracy when comparing AMP with FP32 is within a difference of less than 0.1%.
- # Thus, it's possible that the correctness check failures for these models are
- # false alarms. We use multiplier of 3 instead of 2 to avoid these false alarms.
- multiplier = 3.0 if res.dtype == torch.bfloat16 else 2.0
- if (
- fp64_ref.numel() < 1000
- or (ref.ndim == 4 and ref.shape[-1] == ref.shape[-2] == 1)
- # large tol means a benchmark has been specified as REQUIRE_HIGHER_TOLERANCE
- or tol >= 2 * 1e-2
- ):
- # In the presence of noise, noise might dominate our error
- # metric for smaller tensors.
- # Similary, for 1x1 kernels, there seems to be high noise with amp.
- multiplier = 3.0
- passes_test = res_error <= (multiplier * ref_error + tol / 10.0)
- if not passes_test:
- log_error(
- "RMSE (res-fp64): %.5f, (ref-fp64): %.5f and shape=%s. res.dtype: %s, multiplier: %f, tol: %f",
- res_error,
- ref_error,
- res.size(),
- res.dtype,
- multiplier,
- tol,
- )
- # import pdb; pdb.set_trace()
- return passes_test
- if ignore_non_fp:
- return True
- log_error("Accuracy failed: allclose not within tol=%s", tol)
- return False
- elif isinstance(ref, (str, int, type(None), bool, torch.device)):
- if ignore_non_fp:
- return True
- r = ref == res
- if not r:
- log_error("Accuracy failed (%s): %s != %s", type(ref), ref, res)
- return r
- elif is_numpy_int_type(ref) or is_numpy_float_type(ref):
- if relax_numpy_equality and not (
- is_numpy_int_type(res) or is_numpy_float_type(res)
- ):
- ref = ref.item()
- r = (type(ref) is type(res)) and (ref == res)
- if not r:
- log_error("Accuracy failed (numpy): %s != %s", ref, res)
- return r
- elif is_numpy_ndarray(ref):
- return (type(ref) is type(res)) and same(
- torch.as_tensor(ref),
- torch.as_tensor(res),
- fp64_ref,
- cos_similarity=cos_similarity,
- tol=tol,
- equal_nan=equal_nan,
- exact_dtype=exact_dtype,
- relax_numpy_equality=relax_numpy_equality,
- ignore_non_fp=ignore_non_fp,
- log_error=log_error,
- )
- elif type(ref).__name__ in (
- "MaskedLMOutput",
- "Seq2SeqLMOutput",
- "CausalLMOutputWithCrossAttentions",
- "LongformerMaskedLMOutput",
- "Instances",
- "SquashedNormal",
- "Boxes",
- "Normal",
- "TanhTransform",
- "Foo",
- "Variable",
- ):
- assert type(ref) is type(res)
- return all(
- same(
- getattr(ref, key),
- getattr(res, key),
- getattr(fp64_ref, key),
- cos_similarity=cos_similarity,
- tol=tol,
- equal_nan=equal_nan,
- exact_dtype=exact_dtype,
- relax_numpy_equality=relax_numpy_equality,
- ignore_non_fp=ignore_non_fp,
- log_error=log_error,
- )
- for key in ref.__dict__.keys()
- )
- else:
- raise RuntimeError(f"unsupported type: {type(ref).__name__}")
- def format_func_info(code):
- short_filename = code.co_filename.split("/")[-1]
- return f"'{code.co_name}' ({short_filename}:{code.co_firstlineno})"
- @contextlib.contextmanager
- def disable_cache_limit():
- prior = config.cache_size_limit
- config.cache_size_limit = sys.maxsize
- prior_acc_limit = config.accumulated_cache_size_limit
- config.accumulated_cache_size_limit = sys.maxsize
- try:
- yield
- finally:
- config.cache_size_limit = prior
- config.accumulated_cache_size_limit = prior_acc_limit
- # map from transformed code back to original user code
- orig_code_map = ExactWeakKeyDictionary()
- # keep a record of code_obj -> list of guard failure reasons for logging
- guard_failures: DefaultDict[Any, List[Any]] = collections.defaultdict(list)
- # Keep a record of graph break reasons for logging
- graph_break_reasons: List["torch._dynamo.output_graph.GraphCompileReason"] = list()
- # keep record of compiled code, if we are in "error if recompile"
- # to track code that dynamo has compiled previously
- seen_code_map = ExactWeakKeyDictionary()
- class CompileProfiler:
- """Utility for profiling how and what dynamo would compile.
- Can be used for
- * diagnosing recompilation issues
- * determining an appropriate compile cache limit
- * (TODO)confirming which functions got compiled/skipped
- """
- def __init__(self):
- self.frame_count = 0
- self.op_count = 0
- self.backend_ctx_ctor = disable_cache_limit
- def __call__(self, gm: torch.fx.GraphModule, example_inputs):
- self.frame_count += 1
- for node in gm.graph.nodes:
- if "call" in node.op:
- self.op_count += 1
- return gm.forward
- # no-op __enter__ and __exit__ to preserve BC
- def __enter__(self):
- return self
- def __exit__(self, typ, val, traceback):
- pass
- def get_metrics(self):
- return {"guard_failures": guard_failures}
- def report(self):
- metrics = self.get_metrics()
- gf = metrics["guard_failures"]
- def num_recompiles(code):
- return len(gf[code])
- def recompile_reasons(code):
- return "\n".join([str(x) for x in gf[code]])
- summarized_gf = [
- [format_func_info(code), num_recompiles(code), recompile_reasons(code)]
- for code in gf
- ]
- def graph_break_report():
- if "graph_break" in counters:
- graph_breaks = counters["graph_break"]
- return tabulate(
- [[msg, graph_breaks[msg]] for msg in graph_breaks],
- headers=["Graph Break Reason", "Count"],
- )
- def recompilation_report():
- if len(gf):
- max_recompiles = max(num_recompiles(code) for code in gf)
- recomp_table = tabulate(
- summarized_gf,
- headers=["Function", "Recompiles", "Recompile Reasons"],
- )
- return recomp_table + textwrap.dedent(
- f"""
- Set torch._dynamo.config.cache_size_limit to {max_recompiles} to avoid being cache limited.
- """
- )
- report = textwrap.dedent(
- """
- Torchdynamo Profiler Report
- ===========================
- Graph Breaks
- ------------
- Graph breaks happen when torchdynamo encounters code it can't safely trace.
- If you want to find out why breaks are happening, check below for each break reason
- You may gain additional insight by passing `fullgraph=True` to torch.compile,
- to stop at the first break.
- """
- )
- report += graph_break_report() or "No graph breaks detected."
- report += textwrap.dedent(
- """
- Recompilation
- -------------
- These subgraphs were recompiled more than once due to guard failures
- Guard failures indicate some condition assumed to be static by the tracer changed,
- making it unsafe to reuse the compiled program.
- """
- )
- report += recompilation_report() or "No recompilation detected.\n"
- return report
- # return same dir unless user changes config between calls
- @functools.lru_cache(None)
- def _get_debug_dir(root_dir):
- dir_name = (
- "run_"
- + datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S_%f")
- # use pid to avoid conflicts among ranks
- + "-pid_"
- + str(os.getpid())
- )
- return os.path.join(root_dir, dir_name)
- def get_debug_dir():
- debug_root = config.debug_dir_root
- return _get_debug_dir(debug_root)
- def extract_fake_example_value(node, required=True):
- if "example_value" in node.meta and is_fake(node.meta["example_value"]):
- return node.meta["example_value"]
- elif required:
- from torch._dynamo.exc import unimplemented
- unimplemented("`FakeTensor` example value was required but not available")
- else:
- return None
- def ensure_graph_fake(e, tx):
- assert maybe_get_fake_mode(e) is tx.fake_mode
- return e
- def get_fake_values_from_nodes(tx, nodes, allow_non_graph_fake):
- def visit(n: torch.fx.Node):
- if n.op == "call_function" and "example_value" not in n.meta:
- # fake tensor validity is checked inside get_fake_value using
- # ensure_graph_fake
- return get_fake_value(n, tx, allow_non_graph_fake)
- out = n.meta["example_value"]
- if not allow_non_graph_fake and isinstance(out, torch.Tensor):
- return ensure_graph_fake(out, tx)
- return out
- return torch.fx.node.map_arg(nodes, visit)
- def get_fake_value(node, tx, allow_non_graph_fake=False):
- """
- Run the computation represented by `node` using fake tensors and return the result.
- allow_non_graph_fake: whether to allow the return result to be:
- 1. non-fake or 2. fake that is not created by this instance of Dynamo.
- If `True`, you must be prepared to deal with such return values, ideally
- by further wrapping them as this graph's fakes.
- """
- from torch.utils._sympy.value_ranges import ValueRangeError
- from .exc import (
- TorchRuntimeError,
- unimplemented,
- Unsupported,
- UserError,
- UserErrorType,
- )
- op = node.op
- # FX Node should always return the same fake value
- if "example_value" in node.meta and is_fake(node.meta["example_value"]):
- return node.meta["example_value"]
- args, kwargs = get_fake_values_from_nodes(
- tx, (node.args, node.kwargs), allow_non_graph_fake
- )
- nnmodule = None
- if op == "call_method" and len(args) > 0 and isinstance(args[0], torch.nn.Module):
- # If the first argument is nn.Module, should copy to fake mode.
- args = (deepcopy_to_fake_tensor(args[0], tx.fake_mode),) + tuple(args[1:])
- if op == "call_module":
- nnmodule = tx.output.nn_modules[node.target]
- if is_lazy_module(nnmodule) and hasattr(nnmodule, "_initialize_hook"):
- # In the case of a lazy module, we want to run
- # the pre-hooks which initialize it.
- # Afterwards, lazy module deletes its pre-hooks
- # to avoid treating it as lazy on subsequent recompile.
- nnmodule._infer_parameters(nnmodule, args)
- # no matter it's lazy module or not, we should copy to fake mode.
- nnmodule = deepcopy_to_fake_tensor(nnmodule, tx.fake_mode)
- try:
- with tx.fake_mode, enable_python_dispatcher():
- ret_val = wrap_fake_exception(
- lambda: run_node(tx.output, node, args, kwargs, nnmodule)
- )
- except Unsupported:
- raise
- except RuntimeError as e:
- cause: BaseException = e
- if e.__cause__ is not None:
- cause = e.__cause__
- if isinstance(
- cause, torch._subclasses.fake_tensor.DataDependentOutputException
- ):
- unimplemented(
- f"data dependent operator: {cause.func}; "
- "to enable, set torch._dynamo.config.capture_scalar_outputs = True"
- )
- elif isinstance(
- cause, torch._subclasses.fake_tensor.DynamicOutputShapeException
- ):
- if not torch._dynamo.config.capture_dynamic_output_shape_ops:
- unimplemented(
- f"dynamic shape operator: {cause.func}; "
- "to enable, set torch._dynamo.config.capture_dynamic_output_shape_ops = True"
- )
- else:
- unimplemented(
- f"dynamic shape operator: {cause.func}; "
- "Operator does not have a meta kernel that supports dynamic output shapes, "
- "please report an issue to PyTorch"
- )
- elif isinstance(
- cause, torch._subclasses.fake_tensor.UnsupportedOperatorException
- ):
- op = cause.func
- import_suggestion = ""
- if isinstance(op, torch._ops.OpOverload):
- maybe_pystub = torch._C._dispatch_pystub(
- op._schema.name, op._schema.overload_name
- )
- if maybe_pystub is not None:
- module, ctx = maybe_pystub
- import_suggestion = (
- f"It's possible that the support was implemented in "
- f"module `{module}` and you may need to `import {module}`"
- f"({ctx}), otherwise "
- )
- unimplemented(
- f"unsupported operator: {cause.func} ({import_suggestion}see "
- "https://docs.google.com/document/d/1GgvOe7C8_NVOMLOCwDaYV1mXXyHMXY7ExoewHqooxrs/edit#heading=h.64r4npvq0w0"
- " for how to fix)"
- )
- elif isinstance(
- cause, torch.fx.experimental.symbolic_shapes.GuardOnDataDependentSymNode
- ):
- raise UserError( # noqa: B904
- UserErrorType.CONSTRAINT_VIOLATION,
- "Tried to use data-dependent value in the subsequent computation. "
- "This can happen when we encounter unbounded dynamic value that is unknown during tracing time. "
- "You will need to explicitly give hint to the compiler. Please take a look at "
- f"torch._check OR torch._check_is_size APIs. {cause}",
- case_name="constrain_as_size_example",
- )
- elif isinstance(cause, ValueRangeError):
- raise UserError(UserErrorType.CONSTRAINT_VIOLATION, e.args[0]) from e
- elif isinstance(cause, TypeError) and "argument" in str(cause):
- unimplemented(f"TypeError {node.target}: {cause}")
- raise TorchRuntimeError(str(e)).with_traceback(e.__traceback__) from None
- if not allow_non_graph_fake:
- _ = pytree.tree_map_only(
- torch.Tensor, functools.partial(ensure_graph_fake, tx=tx), ret_val
- )
- return ret_val
- _current_node = threading.local()
- def get_current_node():
- return getattr(_current_node, "value", None)
- @contextmanager
- def set_current_node(node):
- old = get_current_node()
- _current_node.value = node
- try:
- yield
- finally:
- _current_node.value = old
- def run_node(tracer, node, args, kwargs, nnmodule):
- """
- Runs a given node, with the given args and kwargs.
- Behavior is dictated by a node's op.
- run_node is useful for extracting real values out of nodes.
- See get_real_value for more info on common usage.
- Note: The tracer arg is only used for 'get_attr' ops
- Note: The nnmodule arg is only used for 'call_module' ops
- Nodes that are not call_function, call_method, call_module, or get_attr will
- raise an AssertionError.
- """
- op = node.op
- with set_current_node(node):
- def make_error_message(e):
- return f"Failed running {op} {node.target}(*{args}, **{kwargs}):\n" + str(e)
- try:
- if op == "call_function":
- return node.target(*args, **kwargs)
- elif op == "call_method":
- return getattr(args[0], node.target)(*args[1:], **kwargs)
- elif op == "call_module":
- assert nnmodule is not None
- return nnmodule(*args, **kwargs)
- elif op == "get_attr":
- return tracer.output_graph.get_submodule(node.target)
- elif op == "placeholder":
- assert "example_value" in node.meta
- return node.meta["example_value"]
- except (NotImplementedError, UnsupportedFakeTensorException) as e:
- # NB: mimic how wrap_fake_exception does it
- from .exc import unimplemented
- unimplemented(make_error_message(e), from_exc=e)
- except Exception as e:
- raise RuntimeError(make_error_message(e)).with_traceback(
- e.__traceback__
- ) from e
- raise AssertionError(op)
- def get_real_value(node, tracer):
- """
- Run the actual computation represented by `node` and return the result.
- This will execute any dependent nodes in the graph as well.
- """
- from .exc import TorchRuntimeError
- cache = tracer.real_value_cache
- if node in cache:
- return cache[node]
- op = node.op
- args, kwargs = torch.fx.node.map_arg(
- (node.args, node.kwargs),
- lambda n: get_real_value(n, tracer),
- )
- if op == "placeholder" and "grapharg" in node.meta:
- return node.meta["grapharg"].example
- if op == "call_module":
- nn_module = tracer.output_graph.nn_modules[node.target]
- if not is_lazy_module(nn_module):
- nn_module = copy.deepcopy(nn_module)
- else:
- # In the case of a lazy module, we want to run
- # the pre-hooks which initialize it
- nn_module(*args, **kwargs)
- else:
- nn_module = None
- try:
- real_value = run_node(tracer, node, args, kwargs, nn_module)
- cache[node] = real_value
- except RuntimeError as e:
- raise TorchRuntimeError(str(e)).with_traceback(e.__traceback__) from None
- return real_value
- def assert_no_fake_params_or_buffers(gm):
- from torch._subclasses.fake_tensor import FakeTensorConfig, is_fake
- def stack_or_hint(t):
- if FakeTensorConfig.debug:
- import traceback
- return f"FAKE TENSOR CREATION TRACEBACK: \n {traceback.format_list(t._debug_trace)}"
- else:
- return "Enable TORCH_FAKE_TENSOR_DEBUG=1 to get creation stack traces on fake tensors."
- for name, buffer in gm.named_buffers():
- assert not is_fake(
- buffer
- ), f"Unexpected fake buffer {name} {stack_or_hint(buffer)}"
- for name, param in gm.named_parameters():
- assert not is_fake(
- param
- ), f"Unexpected fake param {name} {stack_or_hint(param)}"
- def fqn(obj: Any):
- """
- Returns the fully qualified name of the object.
- """
- return f"{obj.__module__}.{obj.__qualname__}"
- def ifdynstaticdefault(count1, count2):
- if torch._dynamo.config.assume_static_by_default:
- return count1
- else:
- return count2
- def import_submodule(mod: types.ModuleType):
- """
- Ensure all the files in a given submodule are imported
- """
- for filename in sorted(os.listdir(os.path.dirname(cast(str, mod.__file__)))):
- if filename.endswith(".py") and filename[0] != "_":
- importlib.import_module(f"{mod.__name__}.{filename[:-3]}")
- def object_has_getattribute(value: Any):
- try:
- if isinstance(
- inspect.getattr_static(type(value), "__getattribute__"),
- types.FunctionType,
- ):
- return True
- except AttributeError:
- pass
- return False
- def get_custom_getattr(value: Any):
- try:
- getattr_fn = inspect.getattr_static(type(value), "__getattr__")
- except AttributeError:
- getattr_fn = None
- if getattr_fn is torch.nn.Module.__getattr__:
- # ignore this case of getattr
- getattr_fn = None
- return getattr_fn
- class TensorStaticReason(enum.Enum):
- PARAMETER = 2
- NOT_TENSOR = 4
- NN_MODULE_PROPERTY = 5
- def tensor_static_reason_to_message(reason: TensorStaticReason):
- if reason == TensorStaticReason.PARAMETER:
- return "mark_dynamic on parameter, parameters are always static today."
- if reason == TensorStaticReason.NOT_TENSOR:
- return "mark_dynamic on a non tensor, how did this happen?"
- if reason == TensorStaticReason.NN_MODULE_PROPERTY:
- return "tensor is static because it is nn module associated."
- raise AssertionError(f"Illegal reason {reason}")
- def tensor_always_has_static_shape(
- tensor: Union[torch.Tensor, Any],
- is_tensor: bool,
- guard_source: "torch._guards.GuardSource",
- ) -> Tuple[bool, Optional[TensorStaticReason]]:
- """
- Given a tensor, source, and is_tensor flag, determine if a shape should be static.
- Args:
- tensor - the real tensor to evaluate, parameters force a static shape.
- is_tensor - internal dynamo check, essentially "is_tensor": target_cls is TensorVariable,
- tensors not in a TensorVariable for whatever reason are forced static.
- Returns a tuple, where the first element is the bool of whether or not this tensor should have a static shape.
- The second element is a TensorStaticReason, useful for passing to tensor_static_reason_to_message if needed.
- """
- if guard_source.is_nn_module() and config.force_nn_module_property_static_shapes:
- return True, TensorStaticReason.NN_MODULE_PROPERTY
- if type(tensor) is torch.nn.Parameter and config.force_parameter_static_shapes:
- return True, TensorStaticReason.PARAMETER
- if not is_tensor:
- return True, TensorStaticReason.NOT_TENSOR
- return False, None
- def lazy_format_graph_tabular(fn_name, gm):
- def inner():
- try:
- from tabulate import tabulate # TODO: Check that this is installed
- except ImportError:
- return (
- "Tabulate module missing, please install tabulate to log the graph in tabular format, logging code instead:\n"
- + str(lazy_format_graph_code(fn_name, gm))
- )
- node_specs = [
- [n.op, n.name, n.target, n.args, n.kwargs] for n in gm.graph.nodes
- ]
- graph_str = tabulate(
- node_specs, headers=["opcode", "name", "target", "args", "kwargs"]
- )
- return _format_graph_code(fn_name, gm.forward.__code__.co_filename, graph_str)
- return LazyString(inner)
- def format_bytecode(prefix, name, filename, line_no, code):
- return f"{prefix} {name} {filename} line {line_no} \n{dis.Bytecode(code).dis()}\n"
- forward_hook_names = ["_forward_pre_hooks", "_forward_hooks"]
- backward_hook_names = ["_backward_pre_hooks", "_backward_hooks"]
- state_dict_hook_names = [
- "_state_dict_pre_hooks",
- "_state_dict_hooks",
- "_load_state_dict_pre_hooks",
- "_load_state_dict_post_hooks",
- ]
- all_hook_names = forward_hook_names + backward_hook_names + state_dict_hook_names
- def nn_module_has_global_hooks():
- # This is limited to backward hooks for now because NNModuleVariable
- # supports fwd hooks underneath.
- return len(torch.nn.modules.module._global_backward_hooks) or len(
- torch.nn.modules.module._global_backward_pre_hooks
- )
- def nn_module_get_all_hooks(
- mod,
- check_forward_hooks=False,
- check_backward_hooks=False,
- check_state_dict_hooks=False,
- ):
- reset_code = torch._C._dynamo.eval_frame.reset_code
- """
- Sometimes its useful to differentiate between types of hooks such as forward/backward/pre
- hooks executed during module.__call__, and state_dict hooks which are executed separately.
- """
- hook_dicts_to_check = []
- check_all_hooks = (
- not check_forward_hooks
- and not check_backward_hooks
- and not check_state_dict_hooks
- )
- if check_forward_hooks or check_all_hooks:
- hook_dicts_to_check.extend(forward_hook_names)
- if check_backward_hooks or check_all_hooks:
- hook_dicts_to_check.extend(backward_hook_names)
- if check_state_dict_hooks:
- hook_dicts_to_check.extend(state_dict_hook_names)
- all_hooks = []
- for hook_dict_name in hook_dicts_to_check:
- hooks = getattr(mod, hook_dict_name, [])
- for hook_name in hooks:
- hook = hooks[hook_name]
- all_hooks.append(hook)
- return all_hooks
- def nnmodule_has_hooks(
- mod,
- check_forward_hooks=False,
- check_backward_hooks=False,
- check_state_dict_hooks=False,
- ):
- """
- Helper function to check if a module has any hooks attached to it.
- """
- hooks = nn_module_get_all_hooks(
- mod,
- check_forward_hooks=check_forward_hooks,
- check_backward_hooks=check_backward_hooks,
- check_state_dict_hooks=check_state_dict_hooks,
- )
- return bool(hooks)
- def to_numpy_helper(value):
- """Convert tensor and tnp.ndarray to numpy.ndarray."""
- if is_fake(value):
- return value
- if isinstance(value, tnp.ndarray):
- return to_numpy_helper(value.tensor)
- elif isinstance(value, torch.Tensor):
- return value.numpy(force=True)
- elif isinstance(value, (tuple, list)):
- return type(value)(to_numpy_helper(obj) for obj in value)
- else:
- return value
- def numpy_to_tensor(value):
- """Convert tnp.ndarray to tensor, leave other types intact. If a list/tuple, loop through it to convert."""
- assert np is not None
- if isinstance(value, np.ndarray):
- return torch.as_tensor(value)
- if isinstance(value, tnp.ndarray):
- return value.tensor
- elif isinstance(value, (tuple, list)):
- return type(value)(numpy_to_tensor(obj) for obj in value)
- else:
- return value
- class numpy_to_tensor_wrapper:
- def __init__(self, f):
- self.f = f
- self.__name__ = "wrapped_" + self.f.__name__
- def __repr__(self):
- return f"<Wrapped function <original {self.f.__name__}>>"
- def __call__(self, *args, **kwargs):
- out = self.f(*args, **kwargs)
- return numpy_to_tensor(out)
- def numpy_attr_wrapper(obj, name):
- if isinstance(obj, tnp.ndarray):
- out = getattr(obj, name)
- return numpy_to_tensor(out)
- elif isinstance(obj, torch.Tensor):
- out = getattr(tnp.ndarray(obj), name)
- return numpy_to_tensor(out)
- class numpy_method_wrapper:
- """Convert obj from torch.Tensor to tnp.ndarray and call method. Then convert result back to torch.Tensor."""
- def __init__(self, method: str):
- self.method = method
- self.__name__ = "wrapped_" + self.method
- def __repr__(self):
- return f"<Wrapped method <original {self.method}>>"
- def __call__(self, *args, **kwargs):
- obj = args[0]
- if isinstance(obj, torch.Tensor):
- obj = tnp.ndarray(obj)
- method_callable = getattr(obj, self.method)
- out = method_callable(*args[1:], **kwargs)
- return numpy_to_tensor(out)
- class numpy_operator_wrapper:
- """Implements dunder methods for tnp.ndarray via functions from the operator library"""
- def __init__(self, op: Callable[..., Any]):
- self.op = op
- self.__name__ = f"wrapped_{op.__name__}"
- def __repr__(self):
- return f"<Wrapped operator <original {self.__name__}>>"
- def __call__(self, *args, **kwargs):
- assert not kwargs
- args = (
- tnp.ndarray(arg) if isinstance(arg, torch.Tensor) else arg for arg in args
- )
- out = self.op(*args)
- return numpy_to_tensor(out)
- def defake(x):
- if not isinstance(x, FakeTensor):
- return x
- size: torch._prims_common.ShapeType
- stride: torch._prims_common.StrideType
- if x._has_symbolic_sizes_strides:
- size = []
- for s in x.size():
- if isinstance(s, torch.SymInt):
- size.append(s.node.shape_env.size_hint(s.node.expr))
- else:
- size.append(s)
- stride = []
- for s in x.stride():
- if isinstance(s, torch.SymInt):
- stride.append(s.node.shape_env.size_hint(s.node.expr))
- else:
- stride.append(s)
- else:
- size = x.size()
- stride = x.stride()
- y = torch.empty_strided(
- size,
- stride,
- dtype=x.dtype,
- device=x.device,
- requires_grad=x.requires_grad,
- )
- y.zero_()
- return y
- def is_utils_checkpoint(obj):
- # Lazy import to avoid circular dependencies
- import torch.utils.checkpoint
- return obj is torch.utils.checkpoint.checkpoint
- def build_checkpoint_variable(**options):
- import torch._higher_order_ops.wrap as higher_order_ops
- from .variables.higher_order_ops import TorchHigherOrderOperatorVariable
- # TODO - This is a temporary situation where we have two versions of
- # checkpointing implementation. We will converge on one and remove the other.
- activation_checkpoint_op: torch._ops.HigherOrderOperator = (
- higher_order_ops.tag_activation_checkpoint
- )
- if torch._functorch.config.functionalize_rng_ops:
- activation_checkpoint_op = higher_order_ops.wrap_activation_checkpoint
- return TorchHigherOrderOperatorVariable.make(
- activation_checkpoint_op,
- **options,
- )
- def is_compile_supported(device_type):
- from .eval_frame import is_dynamo_supported
- compile_supported = is_dynamo_supported()
- if device_type == "cpu":
- pass
- elif device_type == "cuda" and compile_supported:
- compile_supported = has_triton()
- else:
- compile_supported = False
- return compile_supported
- # The following 3.11 source code functions are adapted from
- # https://github.com/python/cpython/blob/v3.11.4/Lib/traceback.py
- # in order to output source code corresponding to bytecode in 3.11+.
- # We need our own versions since we want to support multiline expressions.
- def _fix_offset(str: str, offset: int) -> int:
- """
- Convert byte offset `offset` of `str` into character offset.
- Byte offset is used for 3.11+ instruction column data.
- Takes things like unicode characters into consideration.
- Unchanged from CPython implementation.
- """
- as_utf8 = str.encode("utf-8")
- return len(as_utf8[:offset].decode("utf-8", errors="replace"))
- @dataclasses.dataclass
- class _Anchors:
- # inclusive
- left_end_lineno: int
- left_end_offset: int
- right_start_lineno: int
- # exclusive
- right_start_offset: int
- def _extract_anchors_from_expr(segment: str) -> Optional[_Anchors]:
- """
- Given source code `segment` corresponding to a bytecode
- instruction, determine:
- - for binary ops, the location of the binary op
- - for indexing, the location of the brackets.
- `segment` is expected to be a valid Python expression
- """
- assert sys.version_info >= (3, 11)
- import ast
- try:
- # Without brackets, `segment` is parsed as a statement.
- # We expect an expression, so wrap `segment` in
- # brackets to handle multi-line expressions.
- tree = ast.parse("(\n" + segment + "\n)")
- except SyntaxError:
- return None
- if len(tree.body) != 1:
- return None
- lines = segment.split("\n")
- # get character index given byte offset
- def normalize(lineno, offset):
- return _fix_offset(lines[lineno], offset)
- # Gets the next valid character index in `lines`, if
- # the current location is not valid. Handles empty lines.
- def next_valid_char(lineno, col):
- while lineno < len(lines) and col >= len(lines[lineno]):
- col = 0
- lineno += 1
- assert lineno < len(lines) and col < len(lines[lineno])
- return lineno, col
- # Get the next valid character index in `lines`.
- def increment(lineno, col):
- col += 1
- lineno, col = next_valid_char(lineno, col)
- assert lineno < len(lines) and col < len(lines[lineno])
- return lineno, col
- # Get the next valid character at least on the next line
- def nextline(lineno, col):
- col = 0
- lineno += 1
- lineno, col = next_valid_char(lineno, col)
- assert lineno < len(lines) and col < len(lines[lineno])
- return lineno, col
- statement = tree.body[0]
- if isinstance(statement, ast.Expr):
- expr = statement.value
- if isinstance(expr, ast.BinOp):
- # ast gives locations for BinOp subexpressions, e.g.
- # ( left_expr ) + ( right_expr )
- # left^^^^^ right^^^^^
- # -2 since end_lineno is 1-indexed and because we added an extra
- # bracket to `segment` when calling ast.parse
- cur_lineno = cast(int, expr.left.end_lineno) - 2
- cur_col = normalize(cur_lineno, expr.left.end_col_offset)
- cur_lineno, cur_col = next_valid_char(cur_lineno, cur_col)
- # Heuristic to find the operator character.
- # The original CPython implementation did not look for ), \, or #,
- # leading to incorrect anchor location, e.g.
- # (x) + (y)
- # ~~^~~~~~~
- while (ch := lines[cur_lineno][cur_col]).isspace() or ch in ")\\#":
- if ch in "\\#":
- cur_lineno, cur_col = nextline(cur_lineno, cur_col)
- else:
- cur_lineno, cur_col = increment(cur_lineno, cur_col)
- # binary op is 1 or 2 characters long, on the same line
- right_col = cur_col + 1
- if (
- right_col < len(lines[cur_lineno])
- and not (ch := lines[cur_lineno][right_col]).isspace()
- and ch not in "\\#"
- ):
- right_col += 1
- # right_col can be invalid since it is exclusive
- return _Anchors(cur_lineno, cur_col, cur_lineno, right_col)
- elif isinstance(expr, ast.Subscript):
- # ast gives locations for value and slice subexpressions, e.g.
- # ( value_expr ) [ slice_expr ]
- # value^^^^^ slice^^^^^
- # subscript^^^^^^^^^^^^^^^^^^^^
- # find left bracket (first '[' after value)
- left_lineno = cast(int, expr.value.end_lineno) - 2
- left_col = normalize(left_lineno, expr.value.end_col_offset)
- left_lineno, left_col = next_valid_char(left_lineno, left_col)
- while lines[left_lineno][left_col] != "[":
- left_lineno, left_col = increment(left_lineno, left_col)
- # find right bracket (final character of expression)
- right_lineno = cast(int, expr.end_lineno) - 2
- right_col = normalize(right_lineno, expr.end_col_offset)
- return _Anchors(left_lineno, left_col, right_lineno, right_col)
- elif isinstance(expr, ast.Call):
- # ( func_expr ) (args, kwargs)
- # func^^^^^
- # call^^^^^^^^^^^^^^^^^^^^^^^^
- # find left bracket (first '(' after func)
- left_lineno = cast(int, expr.func.end_lineno) - 2
- left_col = normalize(left_lineno, expr.func.end_col_offset)
- left_lineno, left_col = next_valid_char(left_lineno, left_col)
- while lines[left_lineno][left_col] != "(":
- left_lineno, left_col = increment(left_lineno, left_col)
- # find right bracket (final character of expression)
- right_lineno = cast(int, expr.end_lineno) - 2
- right_col = normalize(right_lineno, expr.end_col_offset)
- return _Anchors(left_lineno, left_col, right_lineno, right_col)
- return None
- def get_instruction_source_311(code: types.CodeType, inst: dis.Instruction) -> str:
- """
- Python 3.11+ only. Returns lines of source code (from code object `code`)
- corresponding to `inst`'s location data, and underlines relevant code to `inst`.
- Example: CALL on `g`:
- f(g(
- ^^
- h(x)))
- ^^^^^
- We need our own implementation since `format_frame_summary` in
- Python's `traceback` module doesn't handle multi-line expressions
- (and their anchor extraction code is not completely correct).
- """
- assert inst.positions is not None
- if inst.positions.lineno is None:
- return ""
- # The rstrip + "\n" pattern is used throughout this function to handle
- # linecache.getline errors. Error lines are treated as empty strings "", but we want
- # to treat them as blank lines "\n".
- first_line = linecache.getline(code.co_filename, inst.positions.lineno).rstrip()
- if inst.positions.end_lineno is None:
- return first_line
- if inst.positions.col_offset is None or inst.positions.end_col_offset is None:
- return first_line
- # character index of the start of the instruction
- start_offset = _fix_offset(first_line, inst.positions.col_offset)
- # character index of the end of the instruction
- # compute later since end may be a different line
- end_offset = None
- # expression corresponding to the instruction so we can get anchors
- segment = ""
- # underline markers to be printed - start with `~` marker and replace with `^` later
- markers = []
- # Compute segment and initial markers
- if inst.positions.end_lineno == inst.positions.lineno:
- end_offset = _fix_offset(first_line, inst.positions.end_col_offset)
- segment = first_line[start_offset:end_offset]
- markers.append(" " * start_offset + "~" * (end_offset - start_offset))
- else:
- segment = first_line[start_offset:] + "\n"
- markers.append(" " * start_offset + "~" * (len(first_line) - start_offset))
- last_line = linecache.getline(
- code.co_filename, inst.positions.end_lineno
- ).rstrip()
- end_offset = _fix_offset(last_line, inst.positions.end_col_offset)
- for lineno in range(inst.positions.lineno + 1, inst.positions.end_lineno):
- line = linecache.getline(code.co_filename, lineno).rstrip()
- segment += line + "\n"
- # don't underline leading spaces
- num_spaces = len(line) - len(line.lstrip())
- markers.append(" " * num_spaces + "~" * (len(line) - num_spaces))
- segment += last_line[:end_offset]
- num_spaces = len(last_line) - len(last_line.lstrip())
- markers.append(" " * num_spaces + "~" * (end_offset - num_spaces))
- anchors: Optional[_Anchors] = None
- try:
- anchors = _extract_anchors_from_expr(segment)
- except AssertionError:
- pass
- # replace `~` markers with `^` where necessary
- if anchors is None:
- markers = [marker.replace("~", "^") for marker in markers]
- else:
- # make markers mutable
- mutable_markers: List[List[str]] = [list(marker) for marker in markers]
- # anchor positions do not take start_offset into account
- if anchors.left_end_lineno == 0:
- anchors.left_end_offset += start_offset
- if anchors.right_start_lineno == 0:
- anchors.right_start_offset += start_offset
- # Turn `~`` markers between anchors to `^`
- for lineno in range(len(markers)):
- for col in range(len(mutable_markers[lineno])):
- if lineno < anchors.left_end_lineno:
- continue
- if lineno == anchors.left_end_lineno and col < anchors.left_end_offset:
- continue
- if (
- lineno == anchors.right_start_lineno
- and col >= anchors.right_start_offset
- ):
- continue
- if lineno > anchors.right_start_lineno:
- continue
- if mutable_markers[lineno][col] == "~":
- mutable_markers[lineno][col] = "^"
- # make markers into strings again
- markers = ["".join(marker) for marker in mutable_markers]
- result = ""
- for i in range(len(markers)):
- result += (
- linecache.getline(code.co_filename, inst.positions.lineno + i).rstrip()
- + "\n"
- )
- result += markers[i] + "\n"
- return result
- def get_static_address_type(t):
- if isinstance(t, torch.Tensor):
- return getattr(t, "_dynamo_static_input_type", None)
- return None
- def is_rng_state_getter_or_setter(value):
- getters = (
- # The following two functions are not identical, so don't remove anyone!
- torch._C.Generator.get_state,
- torch.default_generator.get_state,
- torch.get_rng_state,
- torch.cuda.get_rng_state,
- )
- setters = (
- torch._C.Generator.set_state,
- torch.default_generator.set_state,
- torch.set_rng_state,
- torch.cuda.set_rng_state,
- )
- return value in (*setters, *getters)
- def is_tensor_base_attr_getter(value):
- return (
- isinstance(value, types.MethodWrapperType)
- and value.__name__ == "__get__"
- and value.__self__.__objclass__ is torch._C._TensorBase # type: ignore[attr-defined]
- )
- def is_torch_function_object(value):
- return hasattr(value, "__torch_function__")
- def has_torch_function(vt: "torch._dynamo.variables.base.VariableTracker") -> bool:
- from torch._dynamo.variables import LazyVariableTracker, UserDefinedObjectVariable
- from torch._dynamo.variables.torch_function import TensorWithTFOverrideVariable
- if isinstance(vt, TensorWithTFOverrideVariable):
- return True
- if isinstance(vt, LazyVariableTracker):
- LazyVariableTracker.realize(vt)
- return isinstance(vt, UserDefinedObjectVariable) and hasattr(
- vt.value, "__torch_function__"
- )
- # see note [Tensor Fakification and Symbol Caching]
- def to_fake_tensor(t, fake_mode):
- symbolic_context = None
- source = None
- if tracing_context := torch._guards.TracingContext.try_get():
- if t in tracing_context.tensor_to_context:
- symbolic_context = tracing_context.tensor_to_context[t]
- source = symbolic_context.tensor_source
- return fake_mode.from_tensor(
- t, static_shapes=False, symbolic_context=symbolic_context, source=source
- )
- def get_first_attr(obj, *attrs):
- """
- Return the first available attribute or throw an exception if none is present.
- """
- for attr in attrs:
- if hasattr(obj, attr):
- return getattr(obj, attr)
- raise AssertionError(f"{obj} does not has any of the attributes: {attrs}")
- @contextlib.contextmanager
- def maybe_enable_compiled_autograd(should_enable):
- def compiler_fn(gm):
- def inner_compiler(gm_, example_inputs_):
- torch._dynamo.utils.counters["compiled_autograd"]["compiles"] += 1
- return torch._inductor.compile(gm_, example_inputs_)
- return torch.compile(gm, backend=inner_compiler, fullgraph=True, dynamic=True)
- if should_enable:
- with torch._dynamo.compiled_autograd.enable(compiler_fn) as ctx:
- yield ctx
- else:
- yield
- def invalid_removeable_handle():
- # need a subclass so weakref works
- class Invalid(dict): # type: ignore[type-arg]
- pass
- return RemovableHandle(Invalid())
- # Returns a "proxy" (new object with the same class and dict) for (non-GraphModule) nn.Module's.
- # Attribute changes to the original object/proxy will be reflected in the other.
- # This is useful for cases where we want a keep-alive reference to a module without increasing
- # its reference count.
- def nn_module_proxy(mod):
- if not isinstance(mod, torch.nn.Module):
- return mod
- if isinstance(mod, torch.fx.GraphModule):
- # Dynamo-generated GM's shouldn't contain user-created GM's
- return mod
- proxy = mod.__class__.__new__(mod.__class__)
- proxy.__dict__ = mod.__dict__
- return proxy
- class GmWrapper(torch.nn.Module):
- def __init__(self, gm, spec):
- super().__init__()
- self.gm = gm
- self.spec = spec
- def forward(self, *args):
- args: List[Any] = list(args)
- return self.gm(*pytree.tree_unflatten(args, self.spec))
- def flatten_graph_inputs(gm: torch.fx.GraphModule, inputs, compile_gm):
- """
- Mutate inputs so that they are flat and wrap gm such that it
- accepts those inputs. This is needed for graphs that take
- bumpy inputs.
- """
- inputs, spec = pytree.tree_flatten(inputs)
- compiled_fn = compile_gm(GmWrapper(gm, spec), inputs)
- idx_to_steal = [
- i
- for i, node in enumerate(gm.graph.nodes)
- if node.op == "placeholder" and node.meta.get("steal_arg", False)
- ]
- def wrapper(*args):
- # note this doesn't check the spec, assuming it is the same
- flat_args = pytree.arg_tree_leaves(*args)
- # flat_args is a new list, so we need to clear references from the old list
- for i in idx_to_steal:
- args[i].clear()
- # this call is boxed to avoid increasing refcount until we reach aot_module_simplified forward
- return compiled_fn(flat_args)
- return wrapper
- def get_locals_to_steal(maybe_gm):
- if not isinstance(maybe_gm, torch.fx.GraphModule) or not hasattr(maybe_gm, "meta"):
- return []
- return maybe_gm.meta.get("locals_to_steal", [])
- def set_locals_to_steal(gm, locals_to_steal):
- gm.meta["locals_to_steal"] = locals_to_steal
- class Lit:
- def __init__(self, s):
- self.s = s
- def __repr__(self):
- return self.s
- warn_once_cache: Set[str] = set()
- def warn_once(msg, stacklevel=1):
- # Dynamo causes all warnings.warn (in user code and in Dynamo code) to print all the time.
- # https://github.com/pytorch/pytorch/issues/128427.
- # warn_once is a workaround: if the msg has been warned on before, then we will not
- # warn again.
- # NB: it's totally ok to store a cache of all the strings: this is what warnings.warn does as well.
- if msg in warn_once_cache:
- return
- warn_once_cache.add(msg)
- warnings.warn(msg, stacklevel=stacklevel + 1)
|