comptime.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395
  1. # mypy: allow-untyped-defs
  2. # This file establishes the public comptime interface to Dynamo.
  3. # This allows Dynamo users to execute arbitrary Python code while
  4. # Dynamo is symbolically evaluating their original programs.
  5. #
  6. # The goal of the public API is to give users rope, without actually
  7. # leaking private implementation details of Dynamo.
  8. import builtins
  9. import dis
  10. import traceback
  11. from typing import Optional, Union
  12. import torch
  13. from torch.fx.experimental.symbolic_shapes import free_symbols
  14. from .exc import unimplemented
  15. from .variables import NewCellVariable
  16. from .variables.constant import ConstantVariable
  17. from .variables.misc import ClosureVariable
  18. from .variables.tensor import SymNodeVariable
  19. class ComptimeVar:
  20. """
  21. A ComptimeVar represents a Python value, at some particular point
  22. in time, in the Python code we are symbolically evaluating with
  23. torchdynamo. This must be distinguished from a runtime value, as
  24. at compile-time there are some properties of the variable we
  25. do not know (for example, if the ComptimeVar represents a Tensor,
  26. we only know metadata about the tensor; we do NOT know what the
  27. actual data in the Tensor is.)
  28. """
  29. def __init__(self, v):
  30. self.__variable = v
  31. def as_proxy(self):
  32. """
  33. Returns an fx.Proxy (or tuple/list of fx.Proxy) representing
  34. this variable in the FX graph we are assembling to pass
  35. to the user compiler.
  36. This method only works for variables we actually track in
  37. the FX graph, aka Tensors (and ints, if you are compiling
  38. with dynamic shapes). In particular, if you have a list
  39. or tuple of tensors, you will get a list/tuple of proxies
  40. (not a single proxy representing the entire list/tuple).
  41. """
  42. return self.__variable.as_proxy()
  43. def is_proxy(self):
  44. """
  45. Returns True if as_proxy() would succeed.
  46. """
  47. return self.__variable.is_proxy()
  48. def as_fake(self):
  49. """
  50. Returns a "fake" value (either a FakeTensor or a SymInt)
  51. representing the variable in question. This only works
  52. for variables that denote Tensor or int. You can use
  53. this to query metadata; e.g., v.as_fake().size(0) will
  54. tell you the compile-time known size of the tensor.
  55. WARNING: Do NOT mutate the returned tensor.
  56. """
  57. return self.__variable.as_proxy().node.meta["example_value"]
  58. def size(self, dim: Optional[int] = None) -> Union[int, torch.SymInt]:
  59. """
  60. Returns the size of the tensor (if dim is None) or the size
  61. at the dimension dim. The returned size may be a SymInt.
  62. """
  63. return self.as_fake().size(dim)
  64. def python_type(self):
  65. """
  66. Returns what type(v) would have returned for the variable
  67. at compile time.
  68. """
  69. return self.__variable.python_type()
  70. def as_python_constant(self):
  71. """
  72. Returns the Python value this variable would have, but only if it is
  73. completely known at compile-time (e.g., it is constant).
  74. WARNING: Do NOT mutate the returned constant. The returned constant
  75. may or may not correspond to the actual value this variable may take
  76. on at runtime; for example, if the variable in question is a constant
  77. list, we may return a copy of that list.
  78. """
  79. return self.__variable.as_python_constant()
  80. def is_python_constant(self):
  81. """
  82. Returns True if as_python_constant would succeed.
  83. """
  84. return self.__variable.is_python_constant()
  85. def is_dynamic(self):
  86. if isinstance(self.__variable, SymNodeVariable):
  87. fs = free_symbols(self.__variable.sym_num)
  88. return bool(fs)
  89. return False
  90. def force_static(self):
  91. """
  92. Forces that a value is static, inducing a guard on its specific value
  93. """
  94. if isinstance(self.__variable, SymNodeVariable):
  95. self.__variable.evaluate_expr()
  96. elif isinstance(self.__variable, ConstantVariable):
  97. # TODO: Maybe complain if this isn't a int/bool/float variable
  98. pass
  99. else:
  100. raise AssertionError(
  101. f"cannot force {self.__variable} ({type(self.__variable)}) static"
  102. )
  103. def _i_will_not_complain_if_bc_breaks_VariableTracker(self):
  104. """
  105. Returns the internal data structure VariableTracker that Dynamo uses
  106. to represent variables at compile time. There are no BC guarantees on
  107. this API and WE RESERVE THE RIGHT TO BREAK YOUR CODE if you rely on
  108. it.
  109. """
  110. return self.__variable
  111. def __repr__(self):
  112. return self.__variable.debug_repr()
  113. # TODO: API for adding a custom guard
  114. class ComptimeContext:
  115. """
  116. This context class provides access to a public API for Dynamo's internals.
  117. If there is something here you would find useful that is missing, please
  118. file a feature request at https://github.com/pytorch/pytorch/
  119. """
  120. def __init__(self, tx):
  121. self.__tx = tx
  122. def get_local(self, name: str, *, stacklevel=0) -> ComptimeVar:
  123. """
  124. Retrieve the compile-time known information about a local.
  125. """
  126. tx = self.__get_tx(stacklevel)
  127. # This is analogous to LOAD_DEREF
  128. if hasattr(tx, "closure_cells") and name in tx.closure_cells:
  129. cell = tx.closure_cells[name]
  130. if isinstance(cell, ClosureVariable):
  131. return ComptimeVar(tx.output.root_tx.symbolic_locals[cell.name])
  132. else:
  133. return ComptimeVar(tx.output.side_effects.load_cell(cell))
  134. else:
  135. r = tx.symbolic_locals[name]
  136. if isinstance(r, NewCellVariable):
  137. return ComptimeVar(tx.output.side_effects.load_cell(r))
  138. else:
  139. return ComptimeVar(r)
  140. def graph_break(self, msg="ComptimeContext.graph_break"):
  141. """
  142. Manually trigger a graph break
  143. """
  144. unimplemented(msg)
  145. def graph(self):
  146. """
  147. Retrieve the partially constructed FX graph that would be
  148. passed to the user compiler after compilation.
  149. """
  150. return self.__tx.output.graph
  151. def assert_static(self, val):
  152. """
  153. Asserts that the int is static (and not dynamic, per dynamic shapes)
  154. """
  155. assert (
  156. not val.is_dynamic()
  157. ), "expected static but got dynamic (run with TORCH_LOGS=dynamic for more info)"
  158. def print_graph(self, *, verbose=True, file=None):
  159. """
  160. Print the partially constructed FX graph that would be passed
  161. to the user compiler after compilation.
  162. """
  163. print(
  164. self.__tx.output.graph.python_code("self", verbose=verbose).src, file=file
  165. )
  166. def parent(self):
  167. return ComptimeContext(self.__tx.parent)
  168. def __get_tx(self, stacklevel):
  169. tx = self.__tx
  170. for _ in range(stacklevel):
  171. tx = tx.parent
  172. return tx
  173. def print(self, val, *, file=None):
  174. print(repr(val), file=file)
  175. def print_disas(self, *, file=None, stacklevel=0):
  176. """
  177. Print the current series of opcodes being executed (not including
  178. parent frames), including where you are in the particular opcode
  179. stream.
  180. """
  181. tx = self.__get_tx(stacklevel)
  182. print(
  183. dis.Bytecode(
  184. tx.f_code,
  185. current_offset=tx.instructions[tx.instruction_pointer].offset,
  186. ).dis(),
  187. file=file,
  188. )
  189. def print_value_stack(self, *, file=None, stacklevel=0):
  190. """
  191. Print the current Python value stack. Note that this is NOT the same
  192. as the traceback; use print_bt() to print that. Note that at
  193. stacklevel=0, this will typically be empty, as comptime cannot
  194. currently be used in an expression context where there would be
  195. intermediates on the stack. If you would find this useful, please
  196. file a bug at https://github.com/pytorch/pytorch/
  197. NB: Stack grows downwards in our print
  198. """
  199. # TODO: improve printing
  200. tx = self.__get_tx(stacklevel)
  201. for s in tx.stack:
  202. print(f"- {s}", file=file)
  203. def print_locals(self, *, file=None, stacklevel=0):
  204. """
  205. Print all of the locals available in the current context.
  206. By default this view is very limited; you can get more information
  207. about any individual local using get_local().
  208. """
  209. # TODO: improve by improving the VariableTracker printing
  210. tx = self.__get_tx(stacklevel)
  211. for k, v in tx.symbolic_locals.items():
  212. print(f"{k} = {v}", file=file)
  213. def print_bt(self, *, file=None, stacklevel=0):
  214. """
  215. Print the user code backtrace, starting at the beginning of the
  216. frame Dynamo started evaluating. Note that this MAY NOT go all
  217. the way to the torch.compile invocation, as we may have done
  218. a graph break and are compiling an intermediate frame as the
  219. starting point. If you think the other behavior would be better,
  220. file a bug at https://github.com/pytorch/pytorch/
  221. """
  222. stack = []
  223. tx = self.__get_tx(stacklevel)
  224. while tx is not None:
  225. stack.append(tx.frame_summary())
  226. tx = getattr(tx, "parent", None)
  227. print(
  228. "".join(traceback.StackSummary.from_list(reversed(stack)).format()),
  229. file=file,
  230. )
  231. def print_guards(self, *, file=None):
  232. """
  233. Print the currently installed guards for the Dynamo context.
  234. This does NOT include guards associated with variables that
  235. may or may not be installed in the future if those variables
  236. are used.
  237. """
  238. # TODO: improve print format, current guard format is extremely
  239. # verbose
  240. print(
  241. "\n".join(f"{repr(guard)}" for guard in sorted(self.__tx.output.guards)),
  242. file=file,
  243. )
  244. def _i_will_not_complain_if_bc_breaks_InstructionTranslator(self):
  245. """
  246. Returns the internal data structure InstructionTranslator that Dynamo
  247. uses to track state of symbolic evaluation. There are no BC
  248. guarantees on this API and WE RESERVE THE RIGHT TO BREAK YOUR CODE if
  249. you rely on it.
  250. """
  251. return self.__tx
  252. class _Comptime:
  253. @staticmethod
  254. def __call__(fn, fallback_fn=lambda: None):
  255. """fn gets called at compile time in TorchDynamo, calls fallback_fn otherwise"""
  256. fallback_fn()
  257. # Convenience wrappers that are more compact to use
  258. @staticmethod
  259. def graph_break():
  260. comptime(lambda ctx: ctx.graph_break())
  261. @staticmethod
  262. def print(e):
  263. comptime(lambda ctx: ctx.print(ctx.get_local("e")), lambda: print(e))
  264. @staticmethod
  265. def print_graph():
  266. comptime(lambda ctx: ctx.print_graph())
  267. @staticmethod
  268. def print_disas(*, stacklevel=0):
  269. comptime(
  270. lambda ctx: ctx.print_disas(
  271. stacklevel=ctx.get_local("stacklevel").as_python_constant() + 1
  272. )
  273. )
  274. @staticmethod
  275. def print_value_stack(*, stacklevel=0):
  276. comptime(
  277. lambda ctx: ctx.print_value_stack(
  278. stacklevel=ctx.get_local("stacklevel").as_python_constant() + 1
  279. )
  280. )
  281. # This is a more useful variant of print_value_stack that can be used
  282. # in an expression context; e.g., x + print_value_stack_and_return(y + z),
  283. # you will see x on the stack prior to the addition operation
  284. @staticmethod
  285. def print_value_stack_and_return(e, *, stacklevel=0):
  286. comptime(
  287. lambda ctx: ctx.print_value_stack(
  288. stacklevel=ctx.get_local("stacklevel").as_python_constant() + 1
  289. )
  290. )
  291. return e
  292. @staticmethod
  293. def print_locals(*, stacklevel=0):
  294. comptime(
  295. lambda ctx: ctx.print_locals(
  296. stacklevel=ctx.get_local("stacklevel").as_python_constant() + 1
  297. )
  298. )
  299. @staticmethod
  300. def print_bt(*, stacklevel=0):
  301. comptime(
  302. lambda ctx: ctx.print_bt(
  303. stacklevel=ctx.get_local("stacklevel").as_python_constant() + 1
  304. )
  305. )
  306. @staticmethod
  307. def print_guards():
  308. comptime(lambda ctx: ctx.print_guards())
  309. @staticmethod
  310. def assert_static(val):
  311. comptime(lambda ctx: ctx.assert_static(ctx.get_local("val")))
  312. @staticmethod
  313. def force_static(val):
  314. comptime(lambda ctx: ctx.get_local("val").force_static())
  315. @staticmethod
  316. def breakpoint():
  317. """
  318. Like pdb breakpoint(), but drop into pdb whenever this line
  319. of code is compiled by dynamo. Use it by putting
  320. this in your model code::
  321. from torch._dynamo.comptime import comptime
  322. comptime.breakpoint()
  323. And then, inside pdb, you can access 'ctx' to query things
  324. about the compilation context::
  325. (Pdb) !ctx.print_bt()
  326. (Pdb) !ctx.print_locals()
  327. (Pdb) p ctx.get_local("attention").as_fake()
  328. """
  329. def inner(inner_ctx):
  330. ctx = inner_ctx.parent()
  331. builtins.breakpoint()
  332. comptime(inner)
  333. comptime = _Comptime()