random.py 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. # mypy: allow-untyped-defs
  2. import contextlib
  3. from typing import Generator
  4. import warnings
  5. from torch._C import default_generator
  6. import torch
  7. def set_rng_state(new_state: torch.Tensor) -> None:
  8. r"""Sets the random number generator state.
  9. .. note:: This function only works for CPU. For CUDA, please use
  10. :func:`torch.manual_seed`, which works for both CPU and CUDA.
  11. Args:
  12. new_state (torch.ByteTensor): The desired state
  13. """
  14. default_generator.set_state(new_state)
  15. def get_rng_state() -> torch.Tensor:
  16. r"""Returns the random number generator state as a `torch.ByteTensor`.
  17. .. note:: The returned state is for the default generator on CPU only.
  18. See also: :func:`torch.random.fork_rng`.
  19. """
  20. return default_generator.get_state()
  21. def manual_seed(seed) -> torch._C.Generator:
  22. r"""Sets the seed for generating random numbers on all devices. Returns a
  23. `torch.Generator` object.
  24. Args:
  25. seed (int): The desired seed. Value must be within the inclusive range
  26. `[-0x8000_0000_0000_0000, 0xffff_ffff_ffff_ffff]`. Otherwise, a RuntimeError
  27. is raised. Negative inputs are remapped to positive values with the formula
  28. `0xffff_ffff_ffff_ffff + seed`.
  29. """
  30. seed = int(seed)
  31. import torch.cuda
  32. if not torch.cuda._is_in_bad_fork():
  33. torch.cuda.manual_seed_all(seed)
  34. import torch.mps
  35. if not torch.mps._is_in_bad_fork():
  36. torch.mps.manual_seed(seed)
  37. import torch.xpu
  38. if not torch.xpu._is_in_bad_fork():
  39. torch.xpu.manual_seed_all(seed)
  40. _seed_custom_device(seed)
  41. return default_generator.manual_seed(seed)
  42. def seed() -> int:
  43. r"""Sets the seed for generating random numbers to a non-deterministic
  44. random number on all devices. Returns a 64 bit number used to seed the RNG.
  45. """
  46. seed = default_generator.seed()
  47. import torch.cuda
  48. if not torch.cuda._is_in_bad_fork():
  49. torch.cuda.manual_seed_all(seed)
  50. import torch.mps
  51. if not torch.mps._is_in_bad_fork():
  52. torch.mps.manual_seed(seed)
  53. import torch.xpu
  54. if not torch.xpu._is_in_bad_fork():
  55. torch.xpu.manual_seed_all(seed)
  56. _seed_custom_device(seed)
  57. return seed
  58. def _seed_custom_device(seed) -> None:
  59. r"""Sets the seed to generate random numbers for custom device.
  60. Args:
  61. seed (int): The desired seed.
  62. See [Note: support the custom device with privateuse1]
  63. """
  64. seed = int(seed)
  65. custom_backend_name = torch._C._get_privateuse1_backend_name()
  66. if hasattr(torch, custom_backend_name):
  67. custom_device_mod = getattr(torch, custom_backend_name)
  68. _bad_fork_name = "_is_in_bad_fork"
  69. _seed_all_name = "manual_seed_all"
  70. if hasattr(custom_device_mod, _bad_fork_name) and hasattr(custom_device_mod, _seed_all_name):
  71. if not getattr(custom_device_mod, _bad_fork_name)():
  72. getattr(custom_device_mod, _seed_all_name)(seed)
  73. else:
  74. message = f"Set seed for `{custom_backend_name}` device does not take effect, please add API's "
  75. message += f"`{_bad_fork_name}` and `{_seed_all_name}` to `{custom_backend_name}` device module."
  76. warnings.warn(message, UserWarning, stacklevel=3)
  77. def initial_seed() -> int:
  78. r"""Returns the initial seed for generating random numbers as a
  79. Python `long`.
  80. .. note:: The returned seed is for the default generator on CPU only.
  81. """
  82. return default_generator.initial_seed()
  83. _fork_rng_warned_already = False
  84. @contextlib.contextmanager
  85. def fork_rng(devices=None, enabled=True, _caller="fork_rng", _devices_kw="devices", device_type="cuda") -> Generator:
  86. """
  87. Forks the RNG, so that when you return, the RNG is reset
  88. to the state that it was previously in.
  89. Args:
  90. devices (iterable of Device IDs): devices for which to fork
  91. the RNG. CPU RNG state is always forked. By default, :meth:`fork_rng` operates
  92. on all devices, but will emit a warning if your machine has a lot
  93. of devices, since this function will run very slowly in that case.
  94. If you explicitly specify devices, this warning will be suppressed
  95. enabled (bool): if ``False``, the RNG is not forked. This is a convenience
  96. argument for easily disabling the context manager without having
  97. to delete it and unindent your Python code under it.
  98. device_type (str): device type str, default is `cuda`. As for custom device,
  99. see details in [Note: support the custom device with privateuse1]
  100. """
  101. device_type = torch.device(device_type).type
  102. device_mod = getattr(torch, device_type, None)
  103. if device_mod is None:
  104. raise RuntimeError(f"torch has no module of `{device_type}`, you should register " +
  105. "a module by `torch._register_device_module`.")
  106. global _fork_rng_warned_already
  107. # Internal arguments:
  108. # _caller: the function which called fork_rng, which the user used
  109. # _devices_kw: the devices keyword of _caller
  110. if not enabled:
  111. yield
  112. return
  113. if devices is None:
  114. num_devices = device_mod.device_count()
  115. if num_devices > 1 and not _fork_rng_warned_already:
  116. message = (f"{device_type.upper()} reports that you have {num_devices} available devices, and "
  117. f"you have used {_caller} without explicitly specifying which devices are being used. "
  118. f"For safety, we initialize *every* {device_type.upper()} device by default, which can "
  119. f"be quite slow if you have a lot of {device_type.upper()}s. If you know that you are only"
  120. f" making use of a few {device_type.upper()} devices, set the environment variable "
  121. f"{device_type.upper()}_VISIBLE_DEVICES or the '{_devices_kw}' keyword argument of {_caller} "
  122. "with the set of devices you are actually using. For example, if you are using CPU only, "
  123. "set device.upper()_VISIBLE_DEVICES= or devices=[]; if you are using device 0 only, "
  124. f"set {device_type.upper()}_VISIBLE_DEVICES=0 or devices=[0]. To initialize all devices "
  125. f"and suppress this warning, set the '{_devices_kw}' keyword argument to "
  126. f"`range(torch.{device_type}.device_count())`.")
  127. warnings.warn(message)
  128. _fork_rng_warned_already = True
  129. devices = list(range(num_devices))
  130. else:
  131. # Protect against user passing us a generator; we need to traverse this
  132. # multiple times but a generator will be exhausted upon first traversal
  133. devices = list(devices)
  134. cpu_rng_state = torch.get_rng_state()
  135. device_rng_states = [device_mod.get_rng_state(device) for device in devices]
  136. try:
  137. yield
  138. finally:
  139. torch.set_rng_state(cpu_rng_state)
  140. for device, device_rng_state in zip(devices, device_rng_states):
  141. device_mod.set_rng_state(device_rng_state, device)