context.py 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145
  1. # mypy: allow-untyped-defs
  2. import functools
  3. from contextlib import nullcontext
  4. from typing import Any, Callable, Dict, Optional, Sequence
  5. import torch
  6. import torch._decomp
  7. import torch._prims
  8. import torch._refs
  9. import torch._refs.nn
  10. import torch._refs.nn.functional
  11. import torch._refs.special
  12. import torch.overrides
  13. from torch._prims_common import torch_function_passthrough
  14. @functools.lru_cache(None)
  15. def torch_to_refs_map():
  16. """
  17. Mapping of torch API functions to torch._refs functions.
  18. E.g. torch_to_refs_map()[torch.add] == torch._refs.add
  19. """
  20. modules = [
  21. (torch, torch._refs),
  22. (torch.nn, torch._refs.nn),
  23. (torch.nn.functional, torch._refs.nn.functional),
  24. (torch.special, torch._refs.special),
  25. (torch.fft, torch._refs.fft),
  26. (torch.linalg, torch._refs.linalg),
  27. ]
  28. r: Dict[Any, Any] = {
  29. torch.Tensor.__invert__: torch._refs.bitwise_not,
  30. torch.Tensor.__xor__: torch._refs.bitwise_xor,
  31. torch.Tensor.__and__: torch._refs.bitwise_and,
  32. torch.Tensor.__or__: torch._refs.bitwise_or,
  33. torch.Tensor.__eq__: torch._refs.eq,
  34. torch.Tensor.__rsub__: torch._refs.rsub,
  35. torch.Tensor.__rtruediv__: torch._refs.rtruediv,
  36. torch.Tensor.__floordiv__: torch._refs.floor_divide,
  37. torch.Tensor.__rfloordiv__: torch._refs.rfloordiv,
  38. torch.Tensor.__pow__: torch._refs.pow,
  39. torch.Tensor.__rpow__: torch._refs.rpow,
  40. torch.Tensor.new_empty: torch._refs.new_empty,
  41. torch.Tensor.new_full: torch._refs.new_full,
  42. torch.Tensor.new_zeros: torch._refs.new_zeros,
  43. torch.Tensor.new_ones: torch._refs.new_ones,
  44. torch.Tensor.fill_: torch._refs.fill_,
  45. torch.Tensor.zero_: torch._refs.zero_,
  46. torch.Tensor.to: torch._refs.to,
  47. torch.Tensor.sum_to_size: torch._refs.sum_to_size,
  48. # TODO: Should these methods be mapped some other way?
  49. torch.Tensor.copy_: torch._prims.copy_to,
  50. torch.Tensor.resize: torch._prims.resize,
  51. }
  52. for mod_torch, mod_refs in modules:
  53. for s in mod_refs.__all__: # type: ignore[attr-defined]
  54. r[mod_torch.__dict__.get(s)] = mod_refs.__dict__.get(s)
  55. # Support remapping torch.Tensor.foo to _refs.foo
  56. for s in dir(torch.Tensor):
  57. if s in torch._refs.__all__:
  58. r[getattr(torch.Tensor, s)] = torch._refs.__dict__.get(s)
  59. # Support conversions
  60. for s in torch._refs._conversions.__all__:
  61. tensor_attr = getattr(torch.Tensor, s, None) or getattr(torch, s)
  62. r[tensor_attr] = torch._refs._conversions.__dict__.get(s)
  63. return r
  64. @functools.lru_cache(None)
  65. def all_prims():
  66. """
  67. Set of all prim functions, e.g., torch._prims.add in all_prims()
  68. """
  69. return {torch._prims.__dict__.get(s) for s in torch._prims.__all__}
  70. class TorchRefsMode(torch.overrides.TorchFunctionMode):
  71. """
  72. Switches the interpretation of torch.* functions and Tensor methods to
  73. use PrimTorch refs in torch._refs. (Direct calls to _refs are unaffected.)
  74. >>> # xdoctest: +SKIP
  75. >>> with TorchRefsMode():
  76. ... torch.add(x, y) # calls torch._refs.add(x, y)
  77. By default, this context manager will fall back on the torch.* if the
  78. ref does not exist; set strict=True to error if this occurs.
  79. If the ref exists we still would like to fall back on the torch.* sometimes,
  80. this behavior can be customized by passing a function to should_fallback_fn.
  81. """
  82. def __init__(
  83. self,
  84. strict=False,
  85. should_fallback_fn=lambda *_: False,
  86. prims_mode_cls=nullcontext,
  87. ):
  88. self.strict = strict
  89. self.should_fallback_fn = should_fallback_fn
  90. self.prims_mode_cls = prims_mode_cls
  91. def __torch_function__(
  92. self,
  93. orig_func: Callable,
  94. types: Sequence,
  95. args: Sequence[Any] = (),
  96. kwargs: Optional[Dict] = None,
  97. ):
  98. if kwargs is None:
  99. kwargs = {}
  100. # For primitive operations, run them as is without interception
  101. # Unless we are in prims_mode, in which case we want to use nvprims
  102. if orig_func in torch_function_passthrough or orig_func in all_prims():
  103. with self.prims_mode_cls():
  104. return orig_func(*args, **kwargs)
  105. mapping = torch_to_refs_map()
  106. func = mapping.get(orig_func, None)
  107. # For torch.ops.aten.*, use registered decompositions from torch._decomp
  108. # torch._decomp.decomposition_table provides a mapping from
  109. # torch.ops.aten.* to torch._refs or torch._decomp.decompositions
  110. # implementations.
  111. # There're other ways to implement this functionality,
  112. # see https://github.com/pytorch/pytorch/pull/82657#discussion_r939776417
  113. if func is None and isinstance(orig_func, torch._ops.OpOverload):
  114. func = torch._decomp.decomposition_table.get(orig_func, None)
  115. if func is not None:
  116. # If the ref exists query whether we should use it or not
  117. if self.should_fallback_fn(self, orig_func, func, args, kwargs):
  118. return orig_func(*args, **kwargs)
  119. # torch calls inside func should be interpreted as refs calls
  120. with self:
  121. return func(*args, **kwargs)
  122. if self.strict:
  123. raise RuntimeError(
  124. f"no _refs support for {torch.overrides.resolve_name(orig_func)}"
  125. )
  126. return orig_func(*args, **kwargs)