__init__.py 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148
  1. # mypy: allow-untyped-defs
  2. r"""
  3. This package enables an interface for accessing MPS (Metal Performance Shaders) backend in Python.
  4. Metal is Apple's API for programming metal GPU (graphics processor unit). Using MPS means that increased
  5. performance can be achieved, by running work on the metal GPU(s).
  6. See https://developer.apple.com/documentation/metalperformanceshaders for more details.
  7. """
  8. from typing import Union
  9. import torch
  10. from .. import Tensor
  11. _is_in_bad_fork = getattr(torch._C, "_mps_is_in_bad_fork", lambda: False)
  12. _default_mps_generator: torch._C.Generator = None # type: ignore[assignment]
  13. # local helper function (not public or exported)
  14. def _get_default_mps_generator() -> torch._C.Generator:
  15. global _default_mps_generator
  16. if _default_mps_generator is None:
  17. _default_mps_generator = torch._C._mps_get_default_generator()
  18. return _default_mps_generator
  19. def device_count() -> int:
  20. r"""Returns the number of available MPS devices."""
  21. return int(torch._C._has_mps and torch._C._mps_is_available())
  22. def synchronize() -> None:
  23. r"""Waits for all kernels in all streams on a MPS device to complete."""
  24. return torch._C._mps_deviceSynchronize()
  25. def get_rng_state(device: Union[int, str, torch.device] = "mps") -> Tensor:
  26. r"""Returns the random number generator state as a ByteTensor.
  27. Args:
  28. device (torch.device or int, optional): The device to return the RNG state of.
  29. Default: ``'mps'`` (i.e., ``torch.device('mps')``, the current MPS device).
  30. """
  31. return _get_default_mps_generator().get_state()
  32. def set_rng_state(
  33. new_state: Tensor, device: Union[int, str, torch.device] = "mps"
  34. ) -> None:
  35. r"""Sets the random number generator state.
  36. Args:
  37. new_state (torch.ByteTensor): The desired state
  38. device (torch.device or int, optional): The device to set the RNG state.
  39. Default: ``'mps'`` (i.e., ``torch.device('mps')``, the current MPS device).
  40. """
  41. new_state_copy = new_state.clone(memory_format=torch.contiguous_format)
  42. _get_default_mps_generator().set_state(new_state_copy)
  43. def manual_seed(seed: int) -> None:
  44. r"""Sets the seed for generating random numbers.
  45. Args:
  46. seed (int): The desired seed.
  47. """
  48. # the torch.mps.manual_seed() can be called from the global
  49. # torch.manual_seed() in torch/random.py. So we need to make
  50. # sure mps is available (otherwise we just return without
  51. # erroring out)
  52. if not torch._C._has_mps:
  53. return
  54. seed = int(seed)
  55. _get_default_mps_generator().manual_seed(seed)
  56. def seed() -> None:
  57. r"""Sets the seed for generating random numbers to a random number."""
  58. _get_default_mps_generator().seed()
  59. def empty_cache() -> None:
  60. r"""Releases all unoccupied cached memory currently held by the caching
  61. allocator so that those can be used in other GPU applications.
  62. """
  63. torch._C._mps_emptyCache()
  64. def set_per_process_memory_fraction(fraction) -> None:
  65. r"""Set memory fraction for limiting process's memory allocation on MPS device.
  66. The allowed value equals the fraction multiplied by recommended maximum device memory
  67. (obtained from Metal API device.recommendedMaxWorkingSetSize).
  68. If trying to allocate more than the allowed value in a process, it will raise an out of
  69. memory error in allocator.
  70. Args:
  71. fraction(float): Range: 0~2. Allowed memory equals total_memory * fraction.
  72. .. note::
  73. Passing 0 to fraction means unlimited allocations
  74. (may cause system failure if out of memory).
  75. Passing fraction greater than 1.0 allows limits beyond the value
  76. returned from device.recommendedMaxWorkingSetSize.
  77. """
  78. if not isinstance(fraction, float):
  79. raise TypeError("Invalid type for fraction argument, must be `float`")
  80. if fraction < 0 or fraction > 2:
  81. raise ValueError(f"Invalid fraction value: {fraction}. Allowed range: 0~2")
  82. torch._C._mps_setMemoryFraction(fraction)
  83. def current_allocated_memory() -> int:
  84. r"""Returns the current GPU memory occupied by tensors in bytes.
  85. .. note::
  86. The returned size does not include cached allocations in
  87. memory pools of MPSAllocator.
  88. """
  89. return torch._C._mps_currentAllocatedMemory()
  90. def driver_allocated_memory() -> int:
  91. r"""Returns total GPU memory allocated by Metal driver for the process in bytes.
  92. .. note::
  93. The returned size includes cached allocations in MPSAllocator pools
  94. as well as allocations from MPS/MPSGraph frameworks.
  95. """
  96. return torch._C._mps_driverAllocatedMemory()
  97. from . import profiler
  98. from .event import Event
  99. __all__ = [
  100. "device_count",
  101. "get_rng_state",
  102. "manual_seed",
  103. "seed",
  104. "set_rng_state",
  105. "synchronize",
  106. "empty_cache",
  107. "set_per_process_memory_fraction",
  108. "current_allocated_memory",
  109. "driver_allocated_memory",
  110. "Event",
  111. "profiler",
  112. ]