_trace.py 58 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549
  1. # mypy: allow-untyped-defs
  2. import dataclasses
  3. import functools
  4. import inspect
  5. import logging
  6. import re
  7. import time
  8. import warnings
  9. from contextlib import contextmanager, nullcontext
  10. from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
  11. import torch
  12. import torch._dynamo
  13. import torch.fx
  14. import torch.utils._pytree as pytree
  15. from torch._dynamo.exc import UserError, UserErrorType
  16. from torch._export.non_strict_utils import (
  17. _fakify_script_objects,
  18. _gather_constant_attrs,
  19. make_constraints,
  20. make_fake_inputs,
  21. make_fake_params_buffers,
  22. produce_guards_and_solve_constraints,
  23. )
  24. from torch._export.passes._node_metadata_hook import (
  25. _node_metadata_hook,
  26. _set_node_metadata_hook,
  27. )
  28. from torch._export.passes.add_runtime_assertions_for_constraints_pass import (
  29. _AddRuntimeAssertionsForInlineConstraintsPass,
  30. )
  31. from torch._export.passes.collect_tracepoints_pass import CollectTracepointsPass
  32. from torch._export.passes.lift_constants_pass import (
  33. ConstantAttrMap,
  34. lift_constants_pass,
  35. rewrite_script_object_meta,
  36. )
  37. from torch._export.utils import placeholder_naming_pass, placeholder_prefixes
  38. from torch._export.verifier import SpecViolationError
  39. from torch._export.wrappers import _wrap_submodules
  40. from torch._functorch.aot_autograd import aot_export_module
  41. from torch._guards import detect_fake_mode
  42. from torch._library.fake_class_registry import FakeScriptObject
  43. from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
  44. from torch._utils_internal import log_export_usage
  45. from torch.export.dynamic_shapes import _combine_args
  46. from torch.export.exported_program import OutputKind
  47. from torch.fx._utils import first_call_function_nn_module_stack
  48. from torch.fx.experimental.symbolic_shapes import (
  49. ConstraintViolationError,
  50. free_unbacked_symbols,
  51. GuardOnDataDependentSymNode,
  52. ShapeEnv,
  53. )
  54. from torch.fx.graph import _PyTreeCodeGen, _PyTreeInfo
  55. from torch.fx.passes.runtime_assert import insert_deferred_runtime_asserts
  56. from torch.utils._pytree import TreeSpec
  57. from torch.utils._sympy.value_ranges import ValueRangeError
  58. from ._safeguard import AutogradStateOpsFailSafeguard
  59. from .exported_program import (
  60. _disable_prexisiting_fake_mode,
  61. ExportedProgram,
  62. InputKind,
  63. ModuleCallEntry,
  64. ModuleCallSignature,
  65. )
  66. from .graph_signature import (
  67. _sig_to_specs,
  68. ArgumentSpec,
  69. ConstantArgument,
  70. CustomObjArgument,
  71. ExportGraphSignature,
  72. SymIntArgument,
  73. TensorArgument,
  74. TokenArgument,
  75. )
  76. log = logging.getLogger(__name__)
  77. @dataclasses.dataclass
  78. class ExportDynamoConfig:
  79. """
  80. Manage Export-specific configurations of Dynamo.
  81. """
  82. allow_rnn: bool = True
  83. reorderable_logging_functions: Set[Callable] = dataclasses.field(
  84. default_factory=set
  85. )
  86. @dataclasses.dataclass
  87. class ExportedArtifact:
  88. gm: torch.fx.GraphModule
  89. sig: ExportGraphSignature
  90. constants: Dict[
  91. str,
  92. Union[
  93. torch.Tensor,
  94. FakeScriptObject,
  95. torch.ScriptObject,
  96. ],
  97. ]
  98. out_spec: Optional[TreeSpec] = None # type: ignore[Incompatible types in assignment]
  99. fake_mode: Optional[FakeTensorMode] = None # type: ignore[Incompatible types in assignment]
  100. module_call_specs: Optional[Dict[str, Dict[str, pytree.TreeSpec]]] = None # type: ignore[Incompatible types in assignment]
  101. DEFAULT_EXPORT_DYNAMO_CONFIG = ExportDynamoConfig()
  102. DEFAULT_EXPORT_DYNAMO_CONFIG.reorderable_logging_functions = {
  103. logging.critical,
  104. logging.debug,
  105. logging.error,
  106. logging.exception,
  107. logging.info,
  108. logging.log,
  109. logging.warning,
  110. print,
  111. warnings.warn,
  112. }
  113. @contextmanager
  114. def _ignore_backend_decomps():
  115. orig_mkldnn_flag = torch.backends.mkldnn.set_flags(False)
  116. orig_nnpack_flag = torch.backends.nnpack.set_flags(False)
  117. try:
  118. yield
  119. finally:
  120. torch.backends.mkldnn.set_flags(*orig_mkldnn_flag)
  121. torch.backends.nnpack.set_flags(*orig_nnpack_flag)
  122. def _fixup_key(x):
  123. return "L__self__" + _strip_root(x)
  124. def _strip_root(x):
  125. if isinstance(x, str) and x.startswith("_export_root"):
  126. stripped = x[len("_export_root") :]
  127. return stripped[1:] if stripped.startswith(".") else stripped
  128. return x
  129. def _add_runtime_assertions_to_cond_in_subgraph(range_constraints, gm, fake_mode):
  130. # We can't get rid of this yet, since for some reason
  131. # insert_deferred_runtime_assertions doesn't add assertions to cond
  132. # subgraphs
  133. if len(range_constraints) > 0:
  134. stack_trace = (
  135. 'File "torch/_export/passes/add_runtime_assertions_for_constraints_pass.py", line 46, '
  136. "in _AddRuntimeAssertionsForInlineConstraintsPass"
  137. )
  138. with fake_mode, _set_node_metadata_hook(
  139. gm, functools.partial(_node_metadata_hook, stack_trace=stack_trace)
  140. ):
  141. res = _AddRuntimeAssertionsForInlineConstraintsPass(range_constraints)(gm)
  142. assert res is not None
  143. gm = res.graph_module
  144. def _rewrite_node(gm):
  145. for node in gm.graph.nodes:
  146. if node.target == torch.ops.higher_order._export_tracepoint:
  147. if "path" in node.kwargs:
  148. path = _strip_root(node.kwargs["path"])
  149. with gm.graph.inserting_before(node):
  150. new_node = gm.graph.create_node(
  151. "call_function",
  152. torch.ops.higher_order._export_tracepoint,
  153. args=node.args,
  154. kwargs={
  155. "path": path,
  156. "kind": node.kwargs["kind"],
  157. },
  158. )
  159. new_node.meta = node.meta
  160. node.replace_all_uses_with(new_node)
  161. gm.graph.erase_node(node)
  162. def _convert_input_to_fake(gm, args, kwargs):
  163. params_buffers = _get_params_buffers(gm)
  164. fake_inps: List[torch.Tensor] = []
  165. for node in gm.graph.nodes:
  166. if node.op == "placeholder" and "val" in node.meta:
  167. fake_val = node.meta["val"]
  168. if fake_val is not None and isinstance(fake_val, torch.Tensor):
  169. fake_inps.append(fake_val)
  170. if detected_fake_mode := detect_fake_mode(fake_inps):
  171. fake_mode = detected_fake_mode
  172. else:
  173. fake_mode = FakeTensorMode(shape_env=ShapeEnv(), export=True)
  174. if len(args) == 0 and len(kwargs) == 0:
  175. return (), {}, params_buffers, fake_mode
  176. count = 0
  177. def convert_to_fake(x):
  178. nonlocal count
  179. val = fake_inps[count]
  180. count += 1
  181. return val
  182. fake_args = pytree.tree_map_only(torch.Tensor, convert_to_fake, args)
  183. # TODO properly use the cached fake tensor
  184. fake_kwargs = pytree.tree_map_only(torch.Tensor, fake_mode.from_tensor, kwargs)
  185. fake_params_buffers = pytree.tree_map_only(
  186. torch.Tensor,
  187. functools.partial(fake_mode.from_tensor, static_shapes=True),
  188. params_buffers,
  189. )
  190. return fake_args, fake_kwargs, fake_params_buffers, fake_mode
  191. def _replace_param_buffer_names(param_buffer_table, sig):
  192. for spec in sig.input_specs:
  193. if spec.kind in (
  194. InputKind.PARAMETER,
  195. InputKind.BUFFER,
  196. ):
  197. spec.target = param_buffer_table[spec.target]
  198. for spec in sig.output_specs:
  199. if spec.kind in (
  200. OutputKind.BUFFER_MUTATION,
  201. OutputKind.GRADIENT_TO_PARAMETER,
  202. ):
  203. spec.target = param_buffer_table[spec.target]
  204. def _convert_to_positional_args(orig_arg_names, args, kwargs):
  205. assert len(orig_arg_names) == len(args) + len(kwargs), (
  206. f"Total number of arg names is expected to be {len(orig_arg_names)} "
  207. f"but got {len(args)} positional args, {len(kwargs)} kwargs."
  208. )
  209. reordered_kwargs = [kwargs[kw_name] for kw_name in orig_arg_names[len(args) :]]
  210. return (
  211. *args,
  212. *reordered_kwargs,
  213. )
  214. def _normalize_nn_module_stack(gm_torch_level, root_cls):
  215. # Append a root module to every nn_module_stack.
  216. root = "L['self']"
  217. root_key = re.sub(r"[^a-zA-Z0-9]", "_", root)
  218. for gm in gm_torch_level.modules():
  219. if not isinstance(gm, torch.fx.GraphModule):
  220. continue
  221. for node in gm.graph.nodes:
  222. if node.op in ["placeholder", "output"]:
  223. continue
  224. add_root = True
  225. if nn_module_stack := node.meta.get("nn_module_stack", {}):
  226. path, ty = next(iter(nn_module_stack.values()))
  227. # After deserializing the class `ty` might not exist anymore so
  228. # it could be a string
  229. if inspect.isclass(ty) and issubclass(ty, torch.nn.Module):
  230. # TODO Figure out why sometimes we have root sometimes we don't.
  231. if path == root and ty is root_cls:
  232. add_root = False
  233. else:
  234. assert isinstance(ty, str)
  235. if add_root:
  236. def normalize_path(path):
  237. try:
  238. parts = []
  239. class Path:
  240. def __getattr__(self, name):
  241. parts.append(name)
  242. return self
  243. def __getitem__(self, idx):
  244. parts.append(str(idx))
  245. return self
  246. eval(path, {"L": {"self": Path()}})
  247. return ".".join(parts)
  248. except Exception: # TODO(zhxchen17) Remove this.
  249. return path
  250. nn_module_stack = {
  251. root_key: (root, root_cls.__module__ + "." + root_cls.__qualname__),
  252. **nn_module_stack,
  253. }
  254. node.meta["nn_module_stack"] = {
  255. key: (normalize_path(path), ty)
  256. for key, (path, ty) in nn_module_stack.items()
  257. }
  258. def _get_param_buffer_mapping(
  259. original_module: torch.nn.Module,
  260. traced_module: torch.nn.Module,
  261. ) -> Dict[str, str]:
  262. """
  263. Returns a mapping of parameter/buffer names from the new module to the
  264. original model. This is to help with restoring the FQN for parameter/buffers
  265. of a traced module to what the original module contains.
  266. """
  267. param_lookup: Dict[int, List[str]] = {}
  268. buffer_lookup: Dict[int, List[str]] = {}
  269. for name, param in original_module.named_parameters(remove_duplicate=False):
  270. param_lookup.setdefault(id(param), []).append(name)
  271. for name, buffer in original_module.named_buffers(remove_duplicate=False):
  272. buffer_lookup.setdefault(id(buffer), []).append(name)
  273. # reverse lists so FQN assignment is FIFO wrt model structure
  274. for name, fqns in param_lookup.items():
  275. param_lookup[name] = fqns[::-1]
  276. for name, fqns in buffer_lookup.items():
  277. buffer_lookup[name] = fqns[::-1]
  278. param_buffer_table: Dict[str, str] = {}
  279. for dynamo_name, dynamo_param in traced_module.named_parameters(
  280. remove_duplicate=False
  281. ):
  282. assert dynamo_name not in param_buffer_table
  283. if id(dynamo_param) in param_lookup:
  284. param_buffer_table[dynamo_name] = param_lookup[id(dynamo_param)].pop()
  285. for dynamo_name, dynamo_buffer in traced_module.named_buffers(
  286. remove_duplicate=False
  287. ):
  288. assert dynamo_name not in param_buffer_table
  289. if id(dynamo_buffer) in buffer_lookup:
  290. param_buffer_table[dynamo_name] = buffer_lookup[id(dynamo_buffer)].pop()
  291. return param_buffer_table
  292. def _remap_constants(
  293. orig_constant_attrs: ConstantAttrMap,
  294. graph_signature: ExportGraphSignature,
  295. constants: Dict[str, Union[torch.Tensor, torch.ScriptObject]],
  296. ) -> None:
  297. """Rewrite the graph signature and constants table to use the FQN from the original module."""
  298. remap_table: Dict[str, List[str]] = {}
  299. for name, value in constants.items():
  300. if value in orig_constant_attrs:
  301. remap_table[name] = orig_constant_attrs[value]
  302. for spec in graph_signature.input_specs:
  303. if spec.kind in (
  304. InputKind.CONSTANT_TENSOR,
  305. InputKind.CUSTOM_OBJ,
  306. ):
  307. orig_target = spec.target
  308. assert orig_target is not None
  309. targets = remap_table.get(orig_target, [orig_target])
  310. spec.target = targets[0]
  311. constant = constants[orig_target]
  312. del constants[orig_target]
  313. for target in targets:
  314. constants[target] = constant
  315. def _rename_constants_nodes(
  316. gm: torch.fx.GraphModule,
  317. graph_signature: ExportGraphSignature,
  318. ) -> None:
  319. """
  320. For strict mode, rename constants nodes that were previously annotated as buffers.
  321. """
  322. # handle name collisions with existing constants
  323. node_names = {node.name for node in gm.graph.nodes}
  324. def rename_constant(name):
  325. if name in node_names:
  326. n = 1
  327. while (dup_name := f"{name}_{n}") in node_names:
  328. n += 1
  329. name = dup_name
  330. node_names.add(name)
  331. return name
  332. # use input specs to map names from buffers to constants
  333. buffer_prefix = placeholder_prefixes[InputKind.BUFFER]
  334. const_prefix = placeholder_prefixes[InputKind.CONSTANT_TENSOR]
  335. buffer_to_constant = {}
  336. for spec in graph_signature.input_specs:
  337. if spec.kind == InputKind.CONSTANT_TENSOR and not spec.arg.name.startswith(
  338. const_prefix
  339. ):
  340. if spec.arg.name.startswith(buffer_prefix): # map from buffer to constants
  341. c_name = rename_constant(
  342. const_prefix + spec.arg.name[len(buffer_prefix) :]
  343. )
  344. else: # lifted constant
  345. c_name = rename_constant(const_prefix + spec.arg.name)
  346. buffer_to_constant[spec.arg.name] = c_name
  347. spec.arg.name = c_name
  348. for spec in graph_signature.output_specs:
  349. if spec.arg.name in buffer_to_constant:
  350. spec.arg.name = buffer_to_constant[spec.arg.name]
  351. # Rename constants nodes for all modules
  352. for mod in gm.modules():
  353. if not isinstance(mod, torch.fx.GraphModule):
  354. continue
  355. for node in mod.graph.nodes:
  356. if node.name in buffer_to_constant:
  357. node.name = node.target = buffer_to_constant[node.name]
  358. mod.recompile()
  359. def _restore_state_dict(
  360. original_module: torch.nn.Module, traced_module: torch.fx.GraphModule
  361. ) -> None:
  362. """
  363. Restores the state dict of the traced module to that of the original module.
  364. """
  365. param_buffer_table = _get_param_buffer_mapping(original_module, traced_module)
  366. # Since the graph module is flattened (no module heirarchy), we
  367. # need to noramlize the module by replacing "." with "_". If we
  368. # don't, it will try to save the weight to a submodule which no
  369. # longer exists.
  370. for name, fqn in param_buffer_table.items():
  371. param_buffer_table[name] = fqn.replace(".", "_")
  372. # Replace state dict attr names with the fqn
  373. for name, fqn in param_buffer_table.items():
  374. if not hasattr(traced_module, name):
  375. continue
  376. attr = getattr(traced_module, name)
  377. if isinstance(attr, torch.Tensor) and not isinstance(attr, torch.nn.Parameter):
  378. traced_module.register_buffer(fqn, attr)
  379. else:
  380. setattr(traced_module, fqn, attr)
  381. delattr(traced_module, name)
  382. # Replace graph getattr nodes with the correct name
  383. for node in traced_module.graph.nodes:
  384. if node.op == "get_attr":
  385. attr_name = node.target
  386. if attr_name in param_buffer_table:
  387. node.target = param_buffer_table[attr_name]
  388. traced_module.recompile()
  389. def _get_module_hierarchy(mod: torch.nn.Module) -> Dict[str, str]:
  390. return {
  391. name: type(m).__name__ for name, m in mod.named_modules(remove_duplicate=False)
  392. }
  393. def _make_module_call_graph(
  394. module_hierarchy: Dict[str, str],
  395. in_spec: TreeSpec,
  396. out_spec: TreeSpec,
  397. module_call_signatures: Dict[str, ModuleCallSignature],
  398. ) -> List[ModuleCallEntry]:
  399. ret = [
  400. ModuleCallEntry(fqn=fqn, signature=module_call_signatures.get(fqn))
  401. for fqn in module_hierarchy
  402. ]
  403. assert ret[0].fqn == ""
  404. ret[0].signature = ModuleCallSignature(
  405. inputs=[], outputs=[], in_spec=in_spec, out_spec=out_spec
  406. )
  407. return ret
  408. def _export_to_torch_ir(
  409. f: Callable,
  410. args: Tuple[Any, ...],
  411. kwargs: Optional[Dict[str, Any]] = None,
  412. dynamic_shapes: Optional[Union[Dict[str, Any], Tuple[Any], List[Any]]] = None,
  413. *,
  414. preserve_module_call_signature: Tuple[str, ...] = (),
  415. disable_constraint_solver: bool = False,
  416. _allow_complex_guards_as_runtime_asserts: bool = False,
  417. restore_fqn: bool = True,
  418. _log_export_usage: bool = True,
  419. same_signature: bool = True,
  420. ) -> torch.fx.GraphModule:
  421. """
  422. Traces either an nn.Module's forward function or just a callable with PyTorch
  423. operations inside and produce a torch.fx.GraphModule in torch IR.
  424. """
  425. if _log_export_usage:
  426. log_export_usage(event="export.private_api", flags={"_export_to_torch_ir"})
  427. if not isinstance(args, tuple):
  428. raise UserError(
  429. UserErrorType.INVALID_INPUT,
  430. f"Expecting `args` to be a tuple of example positional inputs, got {type(args)}",
  431. )
  432. kwargs = kwargs or {}
  433. with torch._dynamo.config.patch(dataclasses.asdict(DEFAULT_EXPORT_DYNAMO_CONFIG)):
  434. try:
  435. module_call_specs: Dict[str, Dict[str, pytree.TreeSpec]] = {}
  436. with _wrap_submodules(
  437. f, preserve_module_call_signature, module_call_specs
  438. ), _ignore_backend_decomps():
  439. gm_torch_level, _ = torch._dynamo.export(
  440. f,
  441. dynamic_shapes=dynamic_shapes, # type: ignore[arg-type]
  442. assume_static_by_default=True,
  443. tracing_mode="symbolic",
  444. disable_constraint_solver=disable_constraint_solver,
  445. # currently the following 2 flags are tied together for export purposes,
  446. # but untangle for sake of dynamo export api
  447. prefer_deferred_runtime_asserts_over_guards=_allow_complex_guards_as_runtime_asserts,
  448. _allow_complex_guards_as_runtime_asserts=_allow_complex_guards_as_runtime_asserts,
  449. _log_export_usage=_log_export_usage,
  450. same_signature=same_signature,
  451. )(
  452. *args,
  453. **kwargs,
  454. )
  455. except (ConstraintViolationError, ValueRangeError) as e:
  456. raise UserError(UserErrorType.CONSTRAINT_VIOLATION, str(e)) # noqa: B904
  457. except GuardOnDataDependentSymNode as e:
  458. raise UserError( # noqa: B904
  459. UserErrorType.ANTI_PATTERN,
  460. f"Consider annotating your code using torch._check*(). {str(e)}",
  461. case_name="constrain_as_size_example",
  462. )
  463. gm_torch_level.meta["module_call_specs"] = module_call_specs
  464. if isinstance(f, torch.nn.Module) and restore_fqn:
  465. _restore_state_dict(f, gm_torch_level)
  466. return gm_torch_level
  467. def _export_to_aten_ir(
  468. mod: torch.nn.Module,
  469. fake_args,
  470. fake_kwargs,
  471. fake_params_buffers,
  472. constant_attrs: ConstantAttrMap,
  473. *,
  474. transform=lambda x: x, # TODO(zhxchen17) Revisit if this is needed later.
  475. pre_dispatch=False,
  476. _is_torch_jit_trace=False,
  477. ):
  478. # [NOTE] If the user is exporting under training mode, we want to detect if there is any
  479. # state change in the autograd global state and error. If the user is exporting under inference
  480. # mode, we don't care. At predispatch level, we don't care about the state change.
  481. is_grad_enabled = torch._C.is_grad_enabled()
  482. grad_safe_guard = nullcontext()
  483. if not pre_dispatch and is_grad_enabled:
  484. grad_safe_guard = AutogradStateOpsFailSafeguard() # type: ignore[assignment]
  485. @contextmanager
  486. def _compiling_state_context():
  487. old_value = torch.compiler._is_compiling_flag
  488. try:
  489. torch.compiler._is_compiling_flag = True
  490. yield
  491. finally:
  492. torch.compiler._is_compiling_flag = old_value
  493. # This _reparametrize_module makes sure inputs and module.params/buffers have the same fake_mode,
  494. # otherwise aot_export_module will error out because it sees a mix of fake_modes.
  495. # And we want aot_export_module to use the fake_tensor mode in dynamo to keep the pipeline easy to reason about.
  496. with torch.nn.utils.stateless._reparametrize_module(
  497. mod,
  498. fake_params_buffers,
  499. tie_weights=True,
  500. strict=True,
  501. stack_weights=True,
  502. ), grad_safe_guard, _ignore_backend_decomps(), _compiling_state_context(): # type: ignore[attr-defined]
  503. gm, graph_signature = transform(aot_export_module)(
  504. mod,
  505. fake_args,
  506. trace_joint=False,
  507. pre_dispatch=pre_dispatch,
  508. kwargs=fake_kwargs,
  509. )
  510. # TODO unfortunately preserving graph-level metadata is not
  511. # working well with aot_export. So we manually copy it.
  512. # (The node-level meta is addressed above.)
  513. if isinstance(mod, torch.fx.GraphModule) and hasattr(mod, "meta"):
  514. gm.meta.update(mod.meta)
  515. def make_argument_spec(i, node) -> ArgumentSpec:
  516. if isinstance(node, (int, bool, float, type(None))):
  517. # For const outputs we just directly return this
  518. return ConstantArgument(name="", value=node)
  519. assert (
  520. "val" in node.meta
  521. ), f"{node} is not a constant or a node with a 'val' metadata field"
  522. val = node.meta["val"]
  523. if i < len(graph_signature.input_tokens):
  524. # TODO: We should be checking for a different type, once we add a new type
  525. return TokenArgument(name=node.name)
  526. elif isinstance(val, FakeTensor):
  527. return TensorArgument(name=node.name)
  528. elif isinstance(val, torch.SymInt):
  529. return SymIntArgument(name=node.name)
  530. elif isinstance(val, torch.ScriptObject):
  531. return CustomObjArgument(name=node.name, class_fqn=val._type().qualified_name()) # type: ignore[attr-defined]
  532. elif isinstance(val, FakeScriptObject):
  533. return CustomObjArgument(name=node.name, class_fqn=val.script_class_name)
  534. elif isinstance(val, (int, bool, str, float, type(None))):
  535. return ConstantArgument(name=node.name, value=val)
  536. else:
  537. raise AssertionError(
  538. f"Encountered an unsupported object of type {type(val)} "
  539. f"while writing the metadata for exported program"
  540. )
  541. is_joint = graph_signature.backward_signature is not None
  542. # NOTE: aot_export adds symint metadata for placeholders with int values;
  543. # since these become specialized, we replace such metadata with the original values
  544. flat_args = pytree.tree_leaves((fake_args, fake_kwargs))
  545. index = 0
  546. total_non_user_inputs = (
  547. len(graph_signature.parameters)
  548. + len(graph_signature.buffers)
  549. + len(graph_signature.input_tokens)
  550. )
  551. for node in gm.graph.nodes:
  552. if node.op == "placeholder":
  553. if index >= total_non_user_inputs:
  554. user_arg = flat_args[index - total_non_user_inputs]
  555. if not isinstance(user_arg, torch.Tensor):
  556. node.meta["val"] = user_arg
  557. index += 1
  558. input_specs, output_specs = _sig_to_specs(
  559. user_inputs=set(graph_signature.user_inputs),
  560. inputs_to_parameters=graph_signature.inputs_to_parameters, # type: ignore[arg-type]
  561. inputs_to_buffers=graph_signature.inputs_to_buffers, # type: ignore[arg-type]
  562. user_outputs=set(graph_signature.user_outputs), # type: ignore[arg-type]
  563. buffer_mutations=graph_signature.buffers_to_mutate, # type: ignore[arg-type]
  564. user_input_mutations=graph_signature.user_inputs_to_mutate, # type: ignore[arg-type]
  565. grad_params=graph_signature.backward_signature.gradients_to_parameters if is_joint else {}, # type: ignore[arg-type, union-attr]
  566. grad_user_inputs=graph_signature.backward_signature.gradients_to_user_inputs if is_joint else {}, # type: ignore[arg-type, union-attr]
  567. loss_output=graph_signature.backward_signature.loss_output if is_joint else None, # type: ignore[arg-type, union-attr]
  568. inputs=[
  569. make_argument_spec(i, node)
  570. for i, node in enumerate(gm.graph.nodes)
  571. if node.op == "placeholder"
  572. ],
  573. outputs=[
  574. make_argument_spec(i, node)
  575. for i, node in enumerate(
  576. pytree.tree_leaves(next(iter(reversed(gm.graph.nodes))).args)
  577. )
  578. ],
  579. input_tokens=graph_signature.input_tokens,
  580. output_tokens=graph_signature.output_tokens,
  581. )
  582. export_graph_signature = ExportGraphSignature(
  583. input_specs=input_specs, output_specs=output_specs
  584. )
  585. from torch._guards import detect_fake_mode
  586. fake_mode = detect_fake_mode(flat_args)
  587. from torch._dynamo import config as _dynamo_config
  588. if not _dynamo_config.do_not_emit_runtime_asserts:
  589. stack_trace = (
  590. 'File "torch/fx/passes/runtime_assert.py", line 24, '
  591. "in insert_deferred_runtime_asserts"
  592. )
  593. with _set_node_metadata_hook(
  594. gm, functools.partial(_node_metadata_hook, stack_trace=stack_trace)
  595. ):
  596. insert_deferred_runtime_asserts(
  597. gm,
  598. fake_mode.shape_env,
  599. f"exported program: {first_call_function_nn_module_stack(gm.graph)}",
  600. export=True,
  601. )
  602. if pre_dispatch:
  603. from torch._export.passes.replace_set_grad_with_hop_pass import (
  604. replace_set_grad_with_hop_pass,
  605. )
  606. gm = replace_set_grad_with_hop_pass(gm, export_graph_signature)
  607. # Remove nn_module_stack, stack_trace metadata from all placeholders/inputs nodes.
  608. for _mod in gm.modules():
  609. if not isinstance(_mod, torch.fx.GraphModule):
  610. continue
  611. for node in _mod.graph.nodes:
  612. if node.op in ["placeholder", "output"]:
  613. node.meta.pop("nn_module_stack", None)
  614. node.meta.pop("stack_trace", None)
  615. constants = rewrite_script_object_meta(gm)
  616. constants.update(lift_constants_pass(gm, export_graph_signature, constant_attrs))
  617. # Prettify names for placeholder nodes.
  618. placeholder_naming_pass(
  619. gm,
  620. export_graph_signature,
  621. mod,
  622. fake_args,
  623. fake_kwargs,
  624. fake_params_buffers,
  625. constants,
  626. )
  627. return ExportedArtifact(
  628. gm,
  629. export_graph_signature,
  630. constants,
  631. )
  632. def _get_params_buffers(mod: torch.nn.Module) -> Dict[str, torch.Tensor]:
  633. params_buffers: Dict[str, torch.Tensor] = {}
  634. for name, param in mod.named_parameters(remove_duplicate=False):
  635. params_buffers[name] = param
  636. for name, buffer in mod.named_buffers(remove_duplicate=False):
  637. params_buffers[name] = buffer
  638. return params_buffers
  639. def _get_forward_arg_names(
  640. mod: torch.nn.Module,
  641. args: Tuple[Any, ...],
  642. kwargs: Optional[Dict[str, Any]] = None,
  643. ) -> List[str]:
  644. """
  645. Gets the argument names to forward that are used, for restoring the
  646. original signature when unlifting the exported program module.
  647. - Positional args: retain the original argument names, and enumerate
  648. *args as args_0, args_1, ...
  649. - Keyword args: retain the original kwarg names in the order specified
  650. by the user. This order seems to matter for the current state of
  651. export lifted modules.
  652. """
  653. sig = inspect.signature(mod.forward)
  654. _args = sig.bind_partial(*args).arguments
  655. names: List[str] = []
  656. for name, value in _args.items():
  657. # handle variable number of positional args
  658. if sig.parameters[name].kind == inspect._ParameterKind.VAR_POSITIONAL:
  659. names.extend([f"{name}_{i}" for i, _ in enumerate(value)])
  660. else:
  661. names.append(name)
  662. # order of kwargs matters for input spec
  663. if kwargs:
  664. names.extend([kwarg for kwarg, _ in kwargs.items()])
  665. return names
  666. def _rewrite_dynamo_tensor_constants(
  667. orig_mod_buffers: Set[torch.Tensor],
  668. traced_mod_buffers: Dict[str, torch.Tensor],
  669. graph_signature: ExportGraphSignature,
  670. constants: Dict[str, Union[torch.Tensor, torch.ScriptObject]],
  671. ):
  672. """Dynamo erroneously marks tensor attributes on modules as a buffers.
  673. Rewrite them to be tensor constants.
  674. """
  675. for spec in graph_signature.input_specs:
  676. if spec.kind == InputKind.BUFFER:
  677. assert spec.target is not None
  678. value = traced_mod_buffers[spec.target]
  679. if value not in orig_mod_buffers:
  680. # This was a tensor constant erroneously marked as a buffer.
  681. # Convert it int oa constant in the graph signature, and add its
  682. # value to the constants table.
  683. spec.kind = InputKind.CONSTANT_TENSOR
  684. constants[spec.target] = value
  685. def _rewrite_non_persistent_buffers(
  686. orig_mod: torch.nn.Module,
  687. graph_signature: ExportGraphSignature,
  688. constants: Dict[str, Union[torch.Tensor, torch.ScriptObject]],
  689. ):
  690. """Dynamo erroneously drops the persistent flag on buffers.
  691. Rewrite non-persistent buffers to reflect the original module.
  692. """
  693. state_dict = orig_mod.state_dict()
  694. for spec in graph_signature.input_specs:
  695. if spec.kind == InputKind.BUFFER:
  696. assert spec.target is not None
  697. if spec.target not in state_dict:
  698. assert spec.target not in constants
  699. spec.persistent = False
  700. constants[spec.target] = orig_mod.get_buffer(spec.target)
  701. def _verify_nn_module_stack(graph_module: torch.fx.GraphModule) -> None:
  702. """
  703. Perform nn_module_stack checks on the graph.
  704. Current constraints:
  705. For the top level graph:
  706. - populated for 'call_function', 'get_attr'
  707. - None for 'placeholder', 'output'
  708. For submodule graphs:
  709. - None for 'placeholder', output'
  710. TODO(pianpwk): make this a consistent node-level check once nn_module_stack is populated for cond submodules.
  711. """
  712. # Check top-level graph for all nodes, all graphs for placeholder & output nodes
  713. for i, mod in enumerate([graph_module] + list(graph_module.modules())):
  714. if not isinstance(mod, torch.fx.GraphModule):
  715. continue
  716. for node in mod.graph.nodes:
  717. if node.op in ["call_function", "get_attr"]:
  718. if i == 0:
  719. if (
  720. nn_module_stack := node.meta.get("nn_module_stack", None)
  721. ) is None:
  722. raise SpecViolationError(
  723. f"Node {node} of type {node.op} is missing nn_module_stack metadata"
  724. )
  725. if not all(
  726. isinstance(k, str)
  727. and isinstance(v, tuple)
  728. and len(v) == 2
  729. and all(isinstance(x, str) for x in v)
  730. for k, v in nn_module_stack.items()
  731. ):
  732. raise SpecViolationError(
  733. f"Node {node} of type {node.op} has incorrect nn_module_stack metadata format"
  734. f"expected Dict[str, Tuple[str, str]], but got {nn_module_stack}"
  735. )
  736. elif node.op in ["placeholder", "output"]:
  737. if node.meta.get("nn_module_stack", None):
  738. raise SpecViolationError(
  739. f"Node {node} of type {node.op} contains nn_module_stack metadata, this should be None"
  740. )
  741. def _verify_stack_trace(graph_module: torch.fx.GraphModule) -> None:
  742. """
  743. Perform stack trace checks on the graph.
  744. Constraints:
  745. - None or non-empty str for 'call_function', 'get_attr'
  746. - None for 'placeholder', 'output'
  747. """
  748. for i, mod in enumerate([graph_module] + list(graph_module.modules())):
  749. if not isinstance(mod, torch.fx.GraphModule):
  750. continue
  751. for node in graph_module.graph.nodes:
  752. stack_trace = node.meta.get("stack_trace", None)
  753. if node.op in ["call_function", "get_attr"]:
  754. if not (stack_trace is None or isinstance(stack_trace, str)):
  755. raise SpecViolationError(
  756. f"Node {node} of type {node.op} has invalid stack_trace metadata, "
  757. f"expected a string or None but instead found: {stack_trace}"
  758. )
  759. elif node.op in ["placeholder", "output"]:
  760. if stack_trace:
  761. raise SpecViolationError(
  762. f"Node {node} of type {node.op} contains stack_trace metadata, "
  763. f"expected None but instead found: {stack_trace}"
  764. )
  765. def _verify_placeholder_names(gm: torch.fx.GraphModule, sig: ExportGraphSignature):
  766. """
  767. Performs a sanity check on the placeholder node names.
  768. - User input nodes: no restrictions, should match the original forward() signature
  769. - Params/buffers/constants/custom_obj/token nodes: should start with prefixes defined in <placeholder_prefixes>
  770. """
  771. name_to_kind = {spec.arg.name: spec.kind for spec in sig.input_specs}
  772. for mod in gm.modules():
  773. if not isinstance(mod, torch.fx.GraphModule):
  774. continue
  775. for node in mod.graph.nodes:
  776. if node.op == "placeholder":
  777. if node.name not in name_to_kind:
  778. continue
  779. node_kind = name_to_kind[node.name]
  780. prefix = placeholder_prefixes[node_kind]
  781. if not node.name.startswith(prefix):
  782. raise SpecViolationError(
  783. f"Placeholder node name {node.name} does not follow spec for {node_kind}, name should have prefix: {prefix}"
  784. )
  785. def get_ep_stats(ep: ExportedProgram) -> Dict[str, Any]:
  786. op_count = 0
  787. op_set = set()
  788. for m in ep.graph_module.modules():
  789. if not isinstance(m, torch.fx.GraphModule):
  790. continue
  791. for node in m.graph.nodes:
  792. if node.op != "call_function":
  793. continue
  794. op_count += 1
  795. assert hasattr(node.target, "__module__")
  796. assert hasattr(node.target, "__name__")
  797. op_set.add(f"{node.target.__module__}.{node.target.__name__}")
  798. return {"op_count": op_count, "op_set": op_set}
  799. _EXPORT_FLAGS: Optional[Set[str]] = None
  800. _EXPORT_MODULE_HIERARCHY: Optional[Dict[str, str]] = None
  801. def _log_export_wrapper(fn):
  802. @functools.wraps(fn)
  803. def wrapper(*args, **kwargs):
  804. global _EXPORT_FLAGS, _EXPORT_MODULE_HIERARCHY
  805. try:
  806. start = time.time()
  807. ep = fn(*args, **kwargs)
  808. end = time.time()
  809. log_export_usage(
  810. event="export.time",
  811. metrics=end - start,
  812. flags=_EXPORT_FLAGS,
  813. **get_ep_stats(ep),
  814. )
  815. except Exception as e:
  816. t = type(e)
  817. error_type = t.__module__ + "." + t.__qualname__
  818. log_export_usage(
  819. event="export.error",
  820. type=error_type,
  821. message=str(e),
  822. flags=_EXPORT_FLAGS,
  823. )
  824. raise e
  825. finally:
  826. _EXPORT_FLAGS = None
  827. _EXPORT_MODULE_HIERARCHY = None
  828. return ep
  829. return wrapper
  830. def _process_jit_trace_inputs_for_export(example_inputs, example_kwarg_inputs):
  831. if not isinstance(example_inputs, (tuple, list, dict)):
  832. example_inputs = (example_inputs,)
  833. elif isinstance(example_inputs, list):
  834. example_inputs = tuple(example_inputs)
  835. elif (
  836. isinstance(example_inputs, (torch.Tensor, dict))
  837. and example_kwarg_inputs is None
  838. ):
  839. example_inputs = (example_inputs,)
  840. if example_kwarg_inputs is None:
  841. example_kwarg_inputs = {}
  842. return example_inputs, example_kwarg_inputs
  843. @contextmanager
  844. def patch_forward(obj: torch.nn.Module, new_method):
  845. """Helper method to make it easier to cleanly torch.export() a method on a
  846. module that is not `forward`.
  847. """
  848. # Save the original method
  849. original_method = obj.forward
  850. # Patch the method
  851. obj.forward = new_method.__get__(obj, obj.__class__)
  852. try:
  853. yield
  854. finally:
  855. # Restore the original method
  856. obj.forward = original_method
  857. @contextmanager
  858. def _temp_disable_texpr_fuser():
  859. original_state = torch._C._jit_texpr_fuser_enabled()
  860. torch._C._jit_set_texpr_fuser_enabled(False)
  861. try:
  862. yield
  863. finally:
  864. torch._C._jit_set_texpr_fuser_enabled(original_state)
  865. class _WrapperModule(torch.nn.Module):
  866. def __init__(self, f):
  867. super().__init__()
  868. self.f = f
  869. def forward(self, *args, **kwargs):
  870. return self.f(*args, **kwargs)
  871. def _convert_ts_to_export_experimental(traced_callable, args, kwargs=None):
  872. with _temp_disable_texpr_fuser():
  873. from torch.jit._trace import TopLevelTracedModule
  874. export_args, export_kwargs = _process_jit_trace_inputs_for_export(args, kwargs)
  875. if isinstance(traced_callable, (TopLevelTracedModule, torch._C.ScriptModule)): # type: ignore[operator]
  876. return _export(
  877. traced_callable,
  878. export_args,
  879. export_kwargs,
  880. strict=False,
  881. _is_torch_jit_trace=True,
  882. ).module()
  883. elif isinstance(traced_callable, torch.ScriptMethod) and isinstance(
  884. traced_callable.owner(), (torch._C.ScriptModule, torch.nn.Module) # type: ignore[operator]
  885. ):
  886. with patch_forward(traced_callable.owner(), traced_callable): # type: ignore[operator]
  887. return _export(
  888. traced_callable.owner(), # type: ignore[operator]
  889. export_args,
  890. export_kwargs,
  891. strict=False,
  892. _is_torch_jit_trace=True,
  893. ).module()
  894. else:
  895. return _export(
  896. _WrapperModule(traced_callable),
  897. export_args,
  898. export_kwargs,
  899. strict=False,
  900. _is_torch_jit_trace=True,
  901. ).module()
  902. def _strict_export(
  903. mod: torch.nn.Module,
  904. args: Tuple[Any, ...],
  905. kwargs: Dict[str, Any],
  906. dynamic_shapes: Optional[Union[Dict[str, Any], Tuple[Any], List[Any]]],
  907. preserve_module_call_signature: Tuple[str, ...],
  908. pre_dispatch: bool,
  909. original_state_dict: Dict[str, Any],
  910. orig_in_spec: TreeSpec,
  911. _allow_complex_guards_as_runtime_asserts: bool,
  912. _disable_forced_specializations: Optional[bool],
  913. _is_torch_jit_trace: bool,
  914. ):
  915. gm_torch_level = _export_to_torch_ir(
  916. mod,
  917. args,
  918. kwargs,
  919. dynamic_shapes,
  920. preserve_module_call_signature=preserve_module_call_signature,
  921. restore_fqn=False, # don't need to restore because we will do it later
  922. _allow_complex_guards_as_runtime_asserts=_allow_complex_guards_as_runtime_asserts,
  923. _log_export_usage=False,
  924. )
  925. # We detect the fake_mode by looking at gm_torch_level's placeholders, this is the fake_mode created in dynamo.
  926. (
  927. fake_args,
  928. fake_kwargs,
  929. fake_params_buffers,
  930. dynamo_fake_mode,
  931. ) = _convert_input_to_fake(gm_torch_level, args, kwargs)
  932. # First, we want to pass through the graph to try populating
  933. # val field for getattr if there is anything missing.
  934. # This can happen when quantization adds extra params and forgets
  935. # to update "val"
  936. for node in gm_torch_level.graph.nodes:
  937. if node.op == "get_attr" and "val" not in node.meta:
  938. attr = getattr(gm_torch_level, node.target)
  939. # Checks if it is not a HigherOrderOp branch or a module
  940. if not isinstance(attr, torch.nn.Module):
  941. assert (
  942. dynamo_fake_mode is not None
  943. ), "Cannot find dynamo_fake_mode. This could be due to the exported graph module have no placeholders."
  944. node.meta["val"] = dynamo_fake_mode.from_tensor(
  945. attr, static_shapes=True
  946. )
  947. # When aot_export lifts the params, we lose metadata (e.g. source_fn_stack, stack_trace)
  948. # from the param nodes as they are treated as fresh inputs
  949. # Therefore, we manually extract them before calling into aot_export
  950. params_buffers_to_node_meta = {}
  951. for node in gm_torch_level.graph.nodes:
  952. target = node.target
  953. meta = node.meta
  954. if node.op == "call_module":
  955. submodule = getattr(gm_torch_level, target)
  956. if isinstance(submodule, torch.nn.Module):
  957. for name, _ in submodule.named_parameters(
  958. recurse=True, remove_duplicate=False
  959. ):
  960. params_buffers_to_node_meta[target + "." + name] = meta
  961. for name, _ in submodule.named_buffers(
  962. recurse=True, remove_duplicate=False
  963. ):
  964. params_buffers_to_node_meta[target + "." + name] = meta
  965. if node.op == "get_attr":
  966. submodule = getattr(gm_torch_level, target)
  967. if not isinstance(submodule, torch.fx.GraphModule):
  968. params_buffers_to_node_meta[target] = meta
  969. # If the call_function uses param as input, we also need to update params' meta
  970. # with this call_function node's meta.
  971. # This is basically the same flow as torch.fx.traceback.preserve_meta()
  972. if node.op == "call_function" and not isinstance(
  973. node.target, torch._ops.HigherOrderOperator
  974. ):
  975. for arg in node._input_nodes:
  976. if arg.op == "get_attr":
  977. for entry in torch.fx.proxy._COPY_META_FIELDS:
  978. if entry in meta:
  979. params_buffers_to_node_meta[arg.target][entry] = meta[entry]
  980. # Fix the graph output signature to be tuple if scalar
  981. out_spec = orig_out_spec = gm_torch_level._out_spec
  982. # Used to get rid of lint type error.
  983. assert out_spec is not None
  984. # aot_export expect the return type to always be a tuple.
  985. if out_spec.type not in (list, tuple):
  986. out_spec = pytree.TreeSpec(tuple, None, [out_spec])
  987. orig_arg_names = gm_torch_level.graph._codegen.pytree_info.orig_args # type: ignore[attr-defined]
  988. gm_torch_level.graph._codegen = _PyTreeCodeGen(
  989. _PyTreeInfo(
  990. orig_arg_names,
  991. gm_torch_level._in_spec,
  992. out_spec,
  993. )
  994. )
  995. gm_torch_level.recompile()
  996. _normalize_nn_module_stack(gm_torch_level, type(mod))
  997. # NOTE: graph module expects only positional args
  998. constant_attrs = _gather_constant_attrs(mod)
  999. with dynamo_fake_mode:
  1000. aten_export_artifact = _export_to_aten_ir(
  1001. gm_torch_level,
  1002. _convert_to_positional_args(orig_arg_names, fake_args, fake_kwargs),
  1003. {},
  1004. fake_params_buffers,
  1005. constant_attrs,
  1006. pre_dispatch=pre_dispatch,
  1007. )
  1008. # Decompose for readability.
  1009. gm = aten_export_artifact.gm
  1010. export_graph_signature = aten_export_artifact.sig
  1011. constants = aten_export_artifact.constants
  1012. # Don't copy over nn_module_stack, stack_trace metadata for params/buffers nodes
  1013. for metadata in params_buffers_to_node_meta.values():
  1014. metadata.pop("nn_module_stack", None)
  1015. metadata.pop("stack_trace", None)
  1016. # After aot_export, set the param/buffer metadata back into placeholders
  1017. # Technically, users can still construct this data from param names
  1018. # without relying on this metadata
  1019. for node in gm.graph.nodes:
  1020. if node.op == "placeholder":
  1021. if node.target in export_graph_signature.inputs_to_parameters:
  1022. param_name = export_graph_signature.inputs_to_parameters[node.target]
  1023. if param_name in params_buffers_to_node_meta:
  1024. for k, v in params_buffers_to_node_meta[param_name].items():
  1025. node.meta[k] = v
  1026. if node.target in export_graph_signature.inputs_to_buffers:
  1027. buffer_name = export_graph_signature.inputs_to_buffers[node.target]
  1028. if buffer_name in params_buffers_to_node_meta:
  1029. for k, v in params_buffers_to_node_meta[buffer_name].items():
  1030. node.meta[k] = v
  1031. # Do some cleanups on the graph module to restore the state dict to the
  1032. # expected form. Each of these steps should probably get fixed upstream.
  1033. # 1. Remove tensor constants that were added as buffers.
  1034. _rewrite_dynamo_tensor_constants(
  1035. orig_mod_buffers=set(mod.buffers()),
  1036. traced_mod_buffers=dict(gm_torch_level.named_buffers()),
  1037. graph_signature=export_graph_signature,
  1038. constants=constants,
  1039. )
  1040. # 2. Restore FQN of param/buffers
  1041. param_buffer_table: Dict[str, str] = _get_param_buffer_mapping(mod, gm_torch_level)
  1042. _replace_param_buffer_names(param_buffer_table, export_graph_signature)
  1043. # 3. Remove non-persistent buffers from the graph signature
  1044. _rewrite_non_persistent_buffers(mod, export_graph_signature, constants)
  1045. # 4. Rewrite constants to have the same FQN as the original module.
  1046. _remap_constants(constant_attrs, export_graph_signature, constants)
  1047. # 5. Rename constants nodes in graph module from buffers to constants
  1048. _rename_constants_nodes(gm, export_graph_signature)
  1049. aten_export_artifact.out_spec = orig_out_spec
  1050. aten_export_artifact.fake_mode = dynamo_fake_mode
  1051. aten_export_artifact.module_call_specs = gm_torch_level.meta["module_call_specs"]
  1052. return aten_export_artifact
  1053. def _non_strict_export(
  1054. mod: torch.nn.Module,
  1055. args: Tuple[Any, ...],
  1056. kwargs: Dict[str, Any],
  1057. dynamic_shapes: Optional[Union[Dict[str, Any], Tuple[Any], List[Any]]],
  1058. preserve_module_call_signature: Tuple[str, ...],
  1059. pre_dispatch: bool,
  1060. original_state_dict: Dict[str, Any],
  1061. orig_in_spec: TreeSpec,
  1062. _allow_complex_guards_as_runtime_asserts: bool,
  1063. _disable_forced_specializations: Optional[bool],
  1064. _is_torch_jit_trace: bool,
  1065. ):
  1066. out_spec = None
  1067. module_call_specs: Dict[str, Dict[str, pytree.TreeSpec]] = {}
  1068. def _tuplify_outputs(aot_export):
  1069. def _aot_export_non_strict(mod, args, kwargs=None, **flags):
  1070. kwargs = kwargs or {}
  1071. class Wrapper(torch.nn.Module):
  1072. def __init__(self, mod):
  1073. super().__init__()
  1074. self._export_root = mod
  1075. def forward(self, *args, **kwargs):
  1076. nonlocal out_spec
  1077. if isinstance(self._export_root, torch.fx.GraphModule):
  1078. with torch.fx.traceback.preserve_node_meta():
  1079. tree_out = torch.fx.Interpreter(self._export_root).run(
  1080. *args, **kwargs
  1081. )
  1082. else:
  1083. tree_out = self._export_root(*args, **kwargs)
  1084. flat_outs, out_spec = pytree.tree_flatten(tree_out)
  1085. return tuple(flat_outs)
  1086. wrapped_mod = Wrapper(mod)
  1087. # Patch export_root to the signatures so that wrapper module correctly populates the
  1088. # in/out spec
  1089. new_preserved_call_signatures = [
  1090. "_export_root." + i for i in preserve_module_call_signature
  1091. ]
  1092. with _wrap_submodules(
  1093. wrapped_mod, new_preserved_call_signatures, module_call_specs
  1094. ):
  1095. gm, sig = aot_export(wrapped_mod, args, kwargs=kwargs, **flags)
  1096. log.debug("Exported program from AOTAutograd:\n%s", gm)
  1097. sig.parameters = pytree.tree_map(_strip_root, sig.parameters)
  1098. sig.buffers = pytree.tree_map(_strip_root, sig.buffers)
  1099. sig.inputs_to_buffers = pytree.tree_map(_strip_root, sig.inputs_to_buffers)
  1100. sig.inputs_to_parameters = pytree.tree_map(
  1101. _strip_root, sig.inputs_to_parameters
  1102. )
  1103. sig.buffers_to_mutate = pytree.tree_map(_strip_root, sig.buffers_to_mutate)
  1104. for node in gm.graph.nodes:
  1105. if "nn_module_stack" in node.meta:
  1106. nn_module_stack = node.meta["nn_module_stack"]
  1107. node.meta["nn_module_stack"] = {
  1108. _fixup_key(key): val
  1109. for key, val in pytree.tree_map(
  1110. _strip_root, nn_module_stack
  1111. ).items()
  1112. }
  1113. return gm, sig
  1114. return _aot_export_non_strict
  1115. (
  1116. fake_mode,
  1117. fake_args,
  1118. fake_kwargs,
  1119. equalities_inputs,
  1120. original_signature,
  1121. ) = make_fake_inputs(
  1122. mod,
  1123. args,
  1124. kwargs,
  1125. dynamic_shapes,
  1126. _is_torch_jit_trace=_is_torch_jit_trace,
  1127. _allow_complex_guards_as_runtime_asserts=_allow_complex_guards_as_runtime_asserts, # for shape env initialization
  1128. )
  1129. fake_params_buffers = make_fake_params_buffers(fake_mode, _get_params_buffers(mod))
  1130. with fake_mode:
  1131. with _fakify_script_objects(mod, fake_args, fake_kwargs, fake_mode) as (
  1132. patched_mod,
  1133. new_fake_args,
  1134. new_fake_kwargs,
  1135. new_fake_constant_attrs,
  1136. map_fake_to_real,
  1137. ):
  1138. aten_export_artifact = _export_to_aten_ir(
  1139. patched_mod,
  1140. new_fake_args,
  1141. new_fake_kwargs,
  1142. fake_params_buffers,
  1143. new_fake_constant_attrs,
  1144. pre_dispatch=pre_dispatch,
  1145. transform=_tuplify_outputs,
  1146. _is_torch_jit_trace=_is_torch_jit_trace,
  1147. )
  1148. # aten_export_artifact.constants contains only fake script objects, we need to map them back
  1149. aten_export_artifact.constants = {
  1150. fqn: map_fake_to_real[obj] if isinstance(obj, FakeScriptObject) else obj
  1151. for fqn, obj in aten_export_artifact.constants.items()
  1152. }
  1153. try:
  1154. produce_guards_and_solve_constraints(
  1155. fake_mode,
  1156. aten_export_artifact.gm,
  1157. dynamic_shapes,
  1158. equalities_inputs,
  1159. original_signature,
  1160. _disable_forced_specializations=_disable_forced_specializations,
  1161. _is_torch_jit_trace=_is_torch_jit_trace,
  1162. )
  1163. except (ConstraintViolationError, ValueRangeError) as e:
  1164. raise UserError(UserErrorType.CONSTRAINT_VIOLATION, str(e)) # noqa: B904
  1165. _rewrite_non_persistent_buffers(
  1166. mod, aten_export_artifact.sig, aten_export_artifact.constants
  1167. )
  1168. aten_export_artifact.out_spec = out_spec
  1169. aten_export_artifact.fake_mode = fake_mode
  1170. aten_export_artifact.module_call_specs = module_call_specs
  1171. return aten_export_artifact
  1172. @_log_export_wrapper
  1173. @_disable_prexisiting_fake_mode
  1174. def _export(
  1175. mod: torch.nn.Module,
  1176. args: Tuple[Any, ...],
  1177. kwargs: Optional[Dict[str, Any]] = None,
  1178. dynamic_shapes: Optional[Union[Dict[str, Any], Tuple[Any], List[Any]]] = None,
  1179. *,
  1180. strict: bool = True,
  1181. preserve_module_call_signature: Tuple[str, ...] = (),
  1182. pre_dispatch: bool = False,
  1183. _allow_complex_guards_as_runtime_asserts: bool = False,
  1184. _disable_forced_specializations: Optional[bool] = False,
  1185. _is_torch_jit_trace: bool = False,
  1186. ) -> ExportedProgram:
  1187. """
  1188. Traces either an nn.Module's forward function or just a callable with PyTorch
  1189. operations inside and produce a ExportedProgram.
  1190. Args:
  1191. f: the `nn.Module` to trace.
  1192. args: example positional inputs.
  1193. kwargs: optional example keyword inputs.
  1194. dynamic_shapes:
  1195. An optional argument where the type should either be:
  1196. 1) a dict from argument names of ``f`` to their dynamic shape specifications,
  1197. 2) a tuple that specifies dynamic shape specifications for each input in original order.
  1198. If you are specifying dynamism on keyword args, you will need to pass them in the order that
  1199. is defined in the original function signature.
  1200. The dynamic shape of a tensor argument can be specified as either
  1201. (1) a dict from dynamic dimension indices to :func:`Dim` types, where it is
  1202. not required to include static dimension indices in this dict, but when they are,
  1203. they should be mapped to None; or (2) a tuple / list of :func:`Dim` types or None,
  1204. where the :func:`Dim` types correspond to dynamic dimensions, and static dimensions
  1205. are denoted by None. Arguments that are dicts or tuples / lists of tensors are
  1206. recursively specified by using mappings or sequences of contained specifications.
  1207. preserve_module_call_signature: A list of submodule paths for which the original
  1208. calling conventions are preserved as metadata.
  1209. _allow_complex_guards_as_runtime_asserts:
  1210. With the current dynamic shapes language for dims and derived dims, we can run into constraints
  1211. that are not expressible with the language. For example, flattening a matrix and adding to a vector,
  1212. both fully dynamic (i.e. x.reshape([-1]) + y) emits a guard s0 * s1 = s2, which is not expressible.
  1213. By default, we either raise a constraint violation error or specialize to static values.
  1214. If this flag is set to True, we avoid erroring out and instead allow complex constraints to exist as runtime
  1215. assertions in the graph. The sympy interpreter (torch/utils/_sympy/interp.py) will produce the math ops
  1216. required to compute and assert the value of the guard (e.g. sym_size_int, eq, _assert_scalar).
  1217. Additionally, if TORCH_DYNAMO_DO_NOT_EMIT_RUNTIME_ASSERTS=1 is specified, we will allow complex constraints
  1218. while not emitting runtime asserts, returning a cleaner graph with lesser guarantees around dynamic shapes.
  1219. _disable_forced_specializations:
  1220. Similar to _allow_complex_guards_as_runtime_asserts, but only avoids specializing to static values if set to True.
  1221. For complex guards that don't specialize, this flag doesn't have any effect. Ideally this would be subsumed by
  1222. _allow_complex_guards_as_runtime_asserts, but this handles one additional case: single-variable equalities where
  1223. the symbol is solvable for a concrete value (e.g. Eq(s0 // 4, 400) -> s0 = 1600). If set to True, this flag will
  1224. avoid specializations. Direct equalities (e.g. s0 = 4), will still specialize.
  1225. Returns:
  1226. An ExportedProgram containing the traced method.
  1227. """
  1228. if not isinstance(args, tuple):
  1229. raise UserError(
  1230. UserErrorType.INVALID_INPUT,
  1231. f"Expecting `args` to be a tuple of example positional inputs, got {type(args)}",
  1232. )
  1233. if _disable_forced_specializations and strict:
  1234. raise UserError(
  1235. UserErrorType.INVALID_INPUT,
  1236. "_disable_forced_specializations can be only be specified in non-strict mode.",
  1237. )
  1238. global _EXPORT_FLAGS, _EXPORT_MODULE_HIERARCHY
  1239. _EXPORT_MODULE_HIERARCHY = _get_module_hierarchy(mod)
  1240. flags = set()
  1241. flags.add("strict" if strict else "non_strict")
  1242. flags.add("pre_dispatch" if pre_dispatch else "aot_dispatch")
  1243. log_export_usage(event="export.enter", flags=flags)
  1244. _EXPORT_FLAGS = flags
  1245. kwargs = kwargs or {}
  1246. if isinstance(dynamic_shapes, torch.export.ShapesCollection):
  1247. dynamic_shapes = dynamic_shapes.dynamic_shapes(mod, args, kwargs)
  1248. flat_args, orig_in_spec = pytree.tree_flatten((args, kwargs))
  1249. original_state_dict = mod.state_dict(keep_vars=True)
  1250. if not _is_torch_jit_trace:
  1251. forward_arg_names = _get_forward_arg_names(mod, args, kwargs)
  1252. else:
  1253. forward_arg_names = None
  1254. # Call the appropriate export function based on the strictness of tracing.
  1255. export_func = _strict_export if strict else _non_strict_export
  1256. aten_export_artifact = export_func(
  1257. mod,
  1258. args,
  1259. kwargs,
  1260. dynamic_shapes,
  1261. preserve_module_call_signature,
  1262. pre_dispatch,
  1263. original_state_dict,
  1264. orig_in_spec,
  1265. _allow_complex_guards_as_runtime_asserts,
  1266. _disable_forced_specializations,
  1267. _is_torch_jit_trace,
  1268. )
  1269. # Decompose here for readability.
  1270. gm = aten_export_artifact.gm
  1271. export_graph_signature = aten_export_artifact.sig
  1272. out_spec = aten_export_artifact.out_spec
  1273. constants = aten_export_artifact.constants
  1274. fake_mode = aten_export_artifact.fake_mode
  1275. module_call_specs = aten_export_artifact.module_call_specs
  1276. # Add forward args metadata.
  1277. gm.meta["forward_arg_names"] = forward_arg_names
  1278. # The unbacked symint symbols are updated in aot_export
  1279. # so we serialize them here instead of inside dynamo.
  1280. gm.meta["inline_constraints"] = {
  1281. k: v
  1282. for k, v in fake_mode.shape_env.var_to_range.items()
  1283. if free_unbacked_symbols(k)
  1284. }
  1285. num_lifted = next(
  1286. (
  1287. i
  1288. for i, s in enumerate(export_graph_signature.input_specs)
  1289. if s.kind == InputKind.USER_INPUT
  1290. ),
  1291. len(export_graph_signature.input_specs),
  1292. )
  1293. combined_args = _combine_args(
  1294. mod, args, kwargs, _is_torch_jit_trace=_is_torch_jit_trace
  1295. )
  1296. range_constraints = make_constraints(
  1297. fake_mode,
  1298. gm,
  1299. combined_args,
  1300. dynamic_shapes,
  1301. num_lifted,
  1302. )
  1303. if strict:
  1304. _add_runtime_assertions_to_cond_in_subgraph(
  1305. range_constraints,
  1306. gm,
  1307. fake_mode,
  1308. )
  1309. # Make module signatures.
  1310. module_call_signatures = {}
  1311. for fqn, specs in module_call_specs.items():
  1312. mod_fqn = _strip_root(fqn) if not strict else fqn
  1313. module_call_signatures[mod_fqn] = ModuleCallSignature(
  1314. inputs=[], outputs=[], **specs
  1315. )
  1316. if len(preserve_module_call_signature) > 0:
  1317. if not strict:
  1318. _rewrite_node(gm)
  1319. res = CollectTracepointsPass(module_call_signatures, export_graph_signature)(gm)
  1320. assert res is not None
  1321. gm = res.graph_module
  1322. assert out_spec is not None
  1323. _verify_nn_module_stack(gm)
  1324. _verify_stack_trace(gm)
  1325. if not _is_torch_jit_trace:
  1326. _verify_placeholder_names(gm, export_graph_signature)
  1327. exported_program = ExportedProgram(
  1328. root=gm,
  1329. graph=gm.graph,
  1330. graph_signature=export_graph_signature,
  1331. state_dict=original_state_dict,
  1332. range_constraints=range_constraints,
  1333. module_call_graph=_make_module_call_graph(
  1334. _EXPORT_MODULE_HIERARCHY,
  1335. orig_in_spec,
  1336. out_spec,
  1337. module_call_signatures,
  1338. ),
  1339. example_inputs=(args, kwargs),
  1340. constants=aten_export_artifact.constants,
  1341. )
  1342. return exported_program