executor.py 1.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061
  1. # mypy: allow-untyped-defs
  2. from typing import Callable, Optional
  3. from torch._prims.context import TorchRefsMode
  4. from torch.fx import GraphModule
  5. from torch.fx.experimental.proxy_tensor import make_fx, wrapper_and_args_for_make_fx
  6. def execute(
  7. gm: GraphModule,
  8. *args,
  9. executor: str = "aten",
  10. executor_parameters: Optional[dict] = None,
  11. ):
  12. """
  13. Prototype ATen executor.
  14. Just executes the context's graph.
  15. """
  16. if executor == "aten":
  17. return gm.forward(*args)
  18. msg = f"Received unexpected value for 'executor': {executor}. Allowed values are: aten."
  19. raise ValueError(msg)
  20. def make_traced(fn: Callable):
  21. """
  22. Returns a function that, when called, will
  23. trace its torch operations to prims and then
  24. execute those prims on the requested trace executor
  25. (possibly lowering them to that trace executor first).
  26. Only supports the torch operations defined in _torch_to_reference_map
  27. in context.py and operations with positional args. All args must
  28. be tensors.
  29. In the near future all these restrictions will be lifted.
  30. Example usage:
  31. def foo(a, b):
  32. return torch.add(a, b)
  33. traced_foo = make_traced(foo)
  34. a = torch.randn((1, 2, 3, 4, 5), device='cuda')
  35. b = torch.randn((1, 2, 3, 4, 5), device='cuda')
  36. result = traced_foo(a, b, executor='aten')
  37. """
  38. def _traced(*args, executor="aten", **kwargs):
  39. # TODO: caching
  40. wrapped, all_args = wrapper_and_args_for_make_fx(fn, args, kwargs)
  41. with TorchRefsMode():
  42. gm = make_fx(wrapped)(all_args)
  43. return execute(gm, all_args, executor=executor)
  44. return _traced