_functorch.pyi 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384
  1. # mypy: allow-untyped-defs
  2. from enum import Enum
  3. from typing import Optional, Tuple
  4. from torch import Tensor
  5. # Defined in torch/csrc/functorch/init.cpp
  6. def _set_dynamic_layer_keys_included(included: bool) -> None: ...
  7. def get_unwrapped(tensor: Tensor) -> Tensor: ...
  8. def is_batchedtensor(tensor: Tensor) -> bool: ...
  9. def is_functionaltensor(tensor: Tensor) -> bool: ...
  10. def is_functorch_wrapped_tensor(tensor: Tensor) -> bool: ...
  11. def is_gradtrackingtensor(tensor: Tensor) -> bool: ...
  12. def is_legacy_batchedtensor(tensor: Tensor) -> bool: ...
  13. def maybe_get_bdim(tensor: Tensor) -> int: ...
  14. def maybe_get_level(tensor: Tensor) -> int: ...
  15. def maybe_current_level() -> Optional[int]: ...
  16. def unwrap_if_dead(tensor: Tensor) -> Tensor: ...
  17. def _unwrap_for_grad(tensor: Tensor, level: int) -> Tensor: ...
  18. def _wrap_for_grad(tensor: Tensor, level: int) -> Tensor: ...
  19. def _unwrap_batched(tensor: Tensor, level: int) -> Tuple[Tensor, Optional[int]]: ...
  20. def current_level() -> int: ...
  21. def count_jvp_interpreters() -> int: ...
  22. def _add_batch_dim(tensor: Tensor, bdim: int, level: int) -> Tensor: ...
  23. def set_single_level_autograd_function_allowed(allowed: bool) -> None: ...
  24. def get_single_level_autograd_function_allowed() -> bool: ...
  25. def _unwrap_functional_tensor(tensor: Tensor, reapply_views: bool) -> Tensor: ...
  26. def _wrap_functional_tensor(tensor: Tensor, level: int) -> Tensor: ...
  27. def _vmap_increment_nesting(batch_size: int, randomness: str) -> int: ...
  28. def _vmap_decrement_nesting() -> int: ...
  29. def _grad_increment_nesting() -> int: ...
  30. def _grad_decrement_nesting() -> int: ...
  31. def _jvp_increment_nesting() -> int: ...
  32. def _jvp_decrement_nesting() -> int: ...
  33. # Defined in aten/src/ATen/functorch/Interpreter.h
  34. class TransformType(Enum):
  35. Torch: TransformType = ...
  36. Vmap: TransformType = ...
  37. Grad: TransformType = ...
  38. Jvp: TransformType = ...
  39. Functionalize: TransformType = ...
  40. class RandomnessType(Enum):
  41. Error: TransformType = ...
  42. Same: TransformType = ...
  43. Different: TransformType = ...
  44. class CInterpreter:
  45. def key(self) -> TransformType: ...
  46. def level(self) -> int: ...
  47. class CGradInterpreterPtr:
  48. def __init__(self, interpreter: CInterpreter): ...
  49. def lift(self, Tensor) -> Tensor: ...
  50. def prevGradMode(self) -> bool: ...
  51. class CJvpInterpreterPtr:
  52. def __init__(self, interpreter: CInterpreter): ...
  53. def lift(self, Tensor) -> Tensor: ...
  54. def prevFwdGradMode(self) -> bool: ...
  55. class CFunctionalizeInterpreterPtr:
  56. def __init__(self, interpreter: CInterpreter): ...
  57. def key(self) -> TransformType: ...
  58. def level(self) -> int: ...
  59. def functionalizeAddBackViews(self) -> bool: ...
  60. class CVmapInterpreterPtr:
  61. def __init__(self, interpreter: CInterpreter): ...
  62. def key(self) -> TransformType: ...
  63. def level(self) -> int: ...
  64. def batchSize(self) -> int: ...
  65. def randomness(self) -> RandomnessType: ...
  66. class DynamicLayer: ...
  67. def get_dynamic_layer_stack_depth() -> int: ...
  68. def get_interpreter_stack() -> list[CInterpreter]: ...
  69. def peek_interpreter_stack() -> CInterpreter: ...
  70. def pop_dynamic_layer_stack() -> DynamicLayer: ...
  71. def pop_dynamic_layer_stack_and_undo_to_depth(int) -> None: ...
  72. def push_dynamic_layer_stack(dl: DynamicLayer) -> int: ...