spawn.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282
  1. # mypy: allow-untyped-defs
  2. import logging
  3. import multiprocessing
  4. import multiprocessing.connection
  5. import os
  6. import pickle
  7. import signal
  8. import sys
  9. import tempfile
  10. import time
  11. import warnings
  12. from typing import Optional
  13. from . import _prctl_pr_set_pdeathsig # type: ignore[attr-defined]
  14. log = logging.getLogger(__name__)
  15. class ProcessException(Exception):
  16. __slots__ = ["error_index", "error_pid"]
  17. def __init__(self, msg: str, error_index: int, pid: int):
  18. super().__init__(msg)
  19. self.msg = msg
  20. self.error_index = error_index
  21. self.pid = pid
  22. def __reduce__(self):
  23. return type(self), (self.msg, self.error_index, self.pid)
  24. class ProcessRaisedException(ProcessException):
  25. """Exception raised when a process failed due to an exception raised by the code."""
  26. def __init__(
  27. self,
  28. msg: str,
  29. error_index: int,
  30. error_pid: int,
  31. ):
  32. super().__init__(msg, error_index, error_pid)
  33. class ProcessExitedException(ProcessException):
  34. """Exception raised when a process failed due to signal or exited with a specific code."""
  35. __slots__ = ["exit_code"]
  36. def __init__(
  37. self,
  38. msg: str,
  39. error_index: int,
  40. error_pid: int,
  41. exit_code: int,
  42. signal_name: Optional[str] = None,
  43. ):
  44. super().__init__(msg, error_index, error_pid)
  45. self.exit_code = exit_code
  46. self.signal_name = signal_name
  47. def __reduce__(self):
  48. return (
  49. type(self),
  50. (self.msg, self.error_index, self.pid, self.exit_code, self.signal_name),
  51. )
  52. def _wrap(fn, i, args, error_file):
  53. # prctl(2) is a Linux specific system call.
  54. # On other systems the following function call has no effect.
  55. # This is set to ensure that non-daemonic child processes can
  56. # terminate if their parent terminates before they do.
  57. _prctl_pr_set_pdeathsig(signal.SIGINT)
  58. try:
  59. fn(i, *args)
  60. except KeyboardInterrupt:
  61. pass # SIGINT; Killed by parent, do nothing
  62. except Exception:
  63. # Propagate exception to parent process, keeping original traceback
  64. import traceback
  65. with open(error_file, "wb") as fh:
  66. pickle.dump(traceback.format_exc(), fh)
  67. sys.exit(1)
  68. class ProcessContext:
  69. def __init__(self, processes, error_files):
  70. self.error_files = error_files
  71. self.processes = processes
  72. self.sentinels = {
  73. process.sentinel: index for index, process in enumerate(processes)
  74. }
  75. def pids(self):
  76. return [int(process.pid) for process in self.processes]
  77. def join(self, timeout=None):
  78. r"""Join one or more processes within spawn context.
  79. Attempt to join one or more processes in this spawn context.
  80. If one of them exited with a non-zero exit status, this function
  81. kills the remaining processes and raises an exception with the cause
  82. of the first process exiting.
  83. Returns ``True`` if all processes have been joined successfully,
  84. ``False`` if there are more processes that need to be joined.
  85. Args:
  86. timeout (float): Wait this long before giving up on waiting.
  87. """
  88. # Ensure this function can be called even when we're done.
  89. if len(self.sentinels) == 0:
  90. return True
  91. # Wait for any process to fail or all of them to succeed.
  92. ready = multiprocessing.connection.wait(
  93. self.sentinels.keys(),
  94. timeout=timeout,
  95. )
  96. error_index = None
  97. for sentinel in ready:
  98. index = self.sentinels.pop(sentinel)
  99. process = self.processes[index]
  100. process.join()
  101. if process.exitcode != 0:
  102. error_index = index
  103. break
  104. # Return if there was no error.
  105. if error_index is None:
  106. # Return whether or not all processes have been joined.
  107. return len(self.sentinels) == 0
  108. # Assume failure. Terminate processes that are still alive.
  109. # Try SIGTERM then SIGKILL if the process isn't going down.
  110. # The reason is related to python signal handling is limited
  111. # to main thread and if that is in c/c++ land and stuck it won't
  112. # to handle it. We have seen processes getting stuck not handling
  113. # SIGTERM for the above reason.
  114. timeout: int = 30
  115. for process in self.processes:
  116. if process.is_alive():
  117. log.warning("Terminating process %s via signal SIGTERM", process.pid)
  118. process.terminate()
  119. end = time.monotonic() + timeout
  120. for process in self.processes:
  121. time_to_wait = max(0, end - time.monotonic())
  122. process.join(time_to_wait)
  123. for process in self.processes:
  124. if process.is_alive():
  125. log.warning(
  126. "Unable to shutdown process %s via SIGTERM , forcefully exiting via SIGKILL",
  127. process.pid,
  128. )
  129. process.kill()
  130. process.join()
  131. # The file will only be created if the process crashed.
  132. failed_process = self.processes[error_index]
  133. if not os.access(self.error_files[error_index], os.R_OK):
  134. exitcode = self.processes[error_index].exitcode
  135. if exitcode < 0:
  136. try:
  137. name = signal.Signals(-exitcode).name
  138. except ValueError:
  139. name = f"<Unknown signal {-exitcode}>"
  140. raise ProcessExitedException(
  141. "process %d terminated with signal %s" % (error_index, name),
  142. error_index=error_index,
  143. error_pid=failed_process.pid,
  144. exit_code=exitcode,
  145. signal_name=name,
  146. )
  147. else:
  148. raise ProcessExitedException(
  149. "process %d terminated with exit code %d" % (error_index, exitcode),
  150. error_index=error_index,
  151. error_pid=failed_process.pid,
  152. exit_code=exitcode,
  153. )
  154. with open(self.error_files[error_index], "rb") as fh:
  155. original_trace = pickle.load(fh)
  156. msg = "\n\n-- Process %d terminated with the following error:\n" % error_index
  157. msg += original_trace
  158. raise ProcessRaisedException(msg, error_index, failed_process.pid)
  159. class SpawnContext(ProcessContext):
  160. def __init__(self, processes, error_files):
  161. warnings.warn("SpawnContext is renamed to ProcessContext since 1.4 release.")
  162. super().__init__(processes, error_files)
  163. # Note: [start_processes]
  164. # mp.start_processes handles both start_method='spawn' and 'fork'. It's supposed to be a
  165. # more generalized API than mp.spawn. Currently we only document mp.spawn as it's the
  166. # CUDA compatible start_method. However, in environments like Ipython notebooks, 'fork'
  167. # works better than 'spawn'. Every helper function we created for mp.spawn is indeed
  168. # general enough, and backends like XLA can reuse them in Colab notebooks as well.
  169. # Currently we only add this API first, we can consider adding it to documentation as
  170. # needed in the future.
  171. def start_processes(
  172. fn, args=(), nprocs=1, join=True, daemon=False, start_method="spawn"
  173. ):
  174. mp = multiprocessing.get_context(start_method)
  175. error_files = []
  176. processes = []
  177. for i in range(nprocs):
  178. # Each process is assigned a file to write tracebacks to. We
  179. # use the file being non-empty to indicate an exception
  180. # occurred (vs an expected shutdown). Note: this previously
  181. # used a multiprocessing.Queue but that can be prone to
  182. # deadlocks, so we went with a simpler solution for a one-shot
  183. # message between processes.
  184. tf = tempfile.NamedTemporaryFile(
  185. prefix="pytorch-errorfile-", suffix=".pickle", delete=False
  186. )
  187. tf.close()
  188. os.unlink(tf.name)
  189. process = mp.Process(
  190. target=_wrap,
  191. args=(fn, i, args, tf.name),
  192. daemon=daemon,
  193. )
  194. process.start()
  195. error_files.append(tf.name)
  196. processes.append(process)
  197. context = ProcessContext(processes, error_files)
  198. if not join:
  199. return context
  200. # Loop on join until it returns True or raises an exception.
  201. while not context.join():
  202. pass
  203. def spawn(fn, args=(), nprocs=1, join=True, daemon=False, start_method="spawn"):
  204. r"""Spawns ``nprocs`` processes that run ``fn`` with ``args``.
  205. If one of the processes exits with a non-zero exit status, the
  206. remaining processes are killed and an exception is raised with the
  207. cause of termination. In the case an exception was caught in the
  208. child process, it is forwarded and its traceback is included in
  209. the exception raised in the parent process.
  210. Args:
  211. fn (function): Function is called as the entrypoint of the
  212. spawned process. This function must be defined at the top
  213. level of a module so it can be pickled and spawned. This
  214. is a requirement imposed by multiprocessing.
  215. The function is called as ``fn(i, *args)``, where ``i`` is
  216. the process index and ``args`` is the passed through tuple
  217. of arguments.
  218. args (tuple): Arguments passed to ``fn``.
  219. nprocs (int): Number of processes to spawn.
  220. join (bool): Perform a blocking join on all processes.
  221. daemon (bool): The spawned processes' daemon flag. If set to True,
  222. daemonic processes will be created.
  223. start_method (str): (deprecated) this method will always use ``spawn``
  224. as the start method. To use a different start method
  225. use ``start_processes()``.
  226. Returns:
  227. None if ``join`` is ``True``,
  228. :class:`~ProcessContext` if ``join`` is ``False``
  229. """
  230. if start_method != "spawn":
  231. msg = (
  232. f"This method only supports start_method=spawn (got: {start_method}).\n"
  233. "To use a different start_method use:\n\t\t"
  234. " torch.multiprocessing.start_processes(...)"
  235. )
  236. warnings.warn(msg, FutureWarning, stacklevel=2)
  237. return start_processes(fn, args, nprocs, join, daemon, start_method="spawn")