api.py 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948
  1. # mypy: allow-untyped-defs
  2. __all__ = ["shutdown", "get_worker_info", "remote", "rpc_sync",
  3. "rpc_async", "RRef", "AllGatherStates", "method_factory", "new_method"]
  4. import collections
  5. import contextlib
  6. import functools
  7. import inspect
  8. import logging
  9. import threading
  10. from typing import Dict, Generic, TypeVar, Set, Any, TYPE_CHECKING
  11. import torch
  12. from torch.futures import Future
  13. from torch._C._distributed_rpc import (
  14. PyRRef,
  15. RemoteProfilerManager,
  16. WorkerInfo,
  17. TensorPipeAgent,
  18. get_rpc_timeout,
  19. _cleanup_python_rpc_handler,
  20. _delete_all_user_and_unforked_owner_rrefs,
  21. _destroy_rref_context,
  22. _get_current_rpc_agent,
  23. _invoke_remote_builtin,
  24. _invoke_remote_python_udf,
  25. _invoke_remote_torchscript,
  26. _invoke_rpc_builtin,
  27. _invoke_rpc_python_udf,
  28. _invoke_rpc_torchscript,
  29. _is_current_rpc_agent_set,
  30. _reset_current_rpc_agent,
  31. _set_and_start_rpc_agent,
  32. )
  33. from .internal import (
  34. PythonUDF,
  35. RPCExecMode,
  36. _internal_rpc_pickler,
  37. _build_rpc_profiling_key,
  38. )
  39. from .constants import DEFAULT_SHUTDOWN_TIMEOUT, UNSET_RPC_TIMEOUT
  40. from ._utils import _group_membership_management, _update_group_membership
  41. logger = logging.getLogger(__name__)
  42. # NB: Ignoring RRef leaks during shutdown. Without this, applications have to
  43. # make sure there is no references to any RRef in the application code and
  44. # Python GC has done its job to delete those RRefs. This is could result in bad
  45. # debugging experiences especially when for large applications. Therefore, by
  46. # default, we are going to ignore RRef leaks during shutdown. This is usually
  47. # fine as shutdown means applications have done training and no longer care
  48. # about states.
  49. #
  50. # To enable RRef leak checking, set this _ignore_rref_leak to False
  51. _ignore_rref_leak = True
  52. _default_pickler = _internal_rpc_pickler
  53. @contextlib.contextmanager
  54. def _use_rpc_pickler(rpc_pickler):
  55. r"""
  56. rpc_pickler: (.internal._InternalRPCPickler) Overrides the default RPC pickler
  57. """
  58. global _default_pickler
  59. _default_pickler = rpc_pickler
  60. try:
  61. yield
  62. finally:
  63. _default_pickler = _internal_rpc_pickler
  64. def _require_initialized(func):
  65. @functools.wraps(func)
  66. def wrapper(*args, **kwargs):
  67. if not _is_current_rpc_agent_set():
  68. raise RuntimeError(
  69. "RPC has not been initialized. Call "
  70. "torch.distributed.rpc.init_rpc first."
  71. )
  72. return func(*args, **kwargs)
  73. return wrapper
  74. class AllGatherStates:
  75. def __init__(self):
  76. # Each `gathered_objects` is an empty dict at beginning.
  77. # The leader worker is elected as the first worker in a sorted worker
  78. # name list. Whenever there is a worker entering `_all_gather()`, it
  79. # runs `_gather_to_leader()` on the leader to add its own name and
  80. # data obj to this dict. The leader also adds itself's name to the dict
  81. # on calling `_all_gather()`.
  82. # Once `set(gathered_objects.keys()) == _ALL_WORKER_NAMES`, the leader
  83. # will broadcast the gathered dict to all follower workers and set their
  84. # `gathered_objects` field and the `proceed_signal` field.
  85. self.gathered_objects = {}
  86. # All workers wait on this signal until it receives all gathered
  87. # objects.
  88. self.proceed_signal = threading.Event()
  89. # States used by `def _all_gather()`.
  90. # `_ALL_WORKER_NAMES` is initialized on initializing RPC layer.
  91. _ALL_WORKER_NAMES: Set[Any] = set()
  92. _all_gather_dict_lock = threading.RLock()
  93. _all_gather_sequence_id: Dict[str, int] = {}
  94. _all_gather_sequence_id_to_states: collections.defaultdict = collections.defaultdict(AllGatherStates)
  95. def _init_rpc_states(agent):
  96. worker_infos = agent.get_worker_infos()
  97. global _ALL_WORKER_NAMES
  98. _ALL_WORKER_NAMES = {worker_info.name for worker_info in worker_infos}
  99. # NB: backend implementation might have already set the rpc_agent.
  100. if not _is_current_rpc_agent_set():
  101. _set_and_start_rpc_agent(agent)
  102. def _gather_to_leader(sequence_id, worker_name, obj, worker_names=None):
  103. with _all_gather_dict_lock:
  104. if not worker_names:
  105. worker_names = _ALL_WORKER_NAMES
  106. assert (
  107. worker_name in worker_names
  108. ), f"{worker_name} is not expected by leader."
  109. states = _all_gather_sequence_id_to_states[sequence_id]
  110. assert (
  111. worker_name not in states.gathered_objects
  112. ), f"{worker_name} reported intent sequence id {sequence_id} twice. "
  113. states.gathered_objects[worker_name] = obj
  114. if worker_names == set(states.gathered_objects.keys()):
  115. states.proceed_signal.set()
  116. def _broadcast_to_followers(sequence_id, objects_map):
  117. with _all_gather_dict_lock:
  118. states = _all_gather_sequence_id_to_states[sequence_id]
  119. assert (
  120. not states.proceed_signal.is_set()
  121. ), f"Termination signal sequence id {sequence_id} got set twice."
  122. states.gathered_objects = objects_map
  123. states.proceed_signal.set()
  124. _thread_local_var = threading.local()
  125. @contextlib.contextmanager
  126. def _wait_all():
  127. r"""
  128. A context manager that collects all futures returned by ``rpc_async`` and
  129. waits them on the context manager's exit; relieving the user of needing
  130. to explicitly call wait.
  131. Example::
  132. >>> # xdoctest: +SKIP("distributed")
  133. >>> # On worker 0:
  134. >>> import torch
  135. >>> import torch.distributed.rpc as rpc
  136. >>> rpc.init_rpc("worker0", rank=0, world_size=2)
  137. >>> with rpc._wait_all():
  138. >>> fut_1 = rpc.rpc_async(dst, torch.add, (torch.ones(2, 2), 1))
  139. >>> fut_2 = rpc.rpc_async(dst, torch.add, (torch.ones(2, 2), 1))
  140. >>> #fut_1 and fut_2 are waited on
  141. """
  142. _thread_local_var.future_list = []
  143. try:
  144. yield
  145. finally:
  146. try:
  147. torch.futures.wait_all(_thread_local_var.future_list)
  148. finally:
  149. del _thread_local_var.future_list
  150. @_require_initialized
  151. def _all_gather(obj, worker_names=None, timeout: float = UNSET_RPC_TIMEOUT):
  152. r"""
  153. This is similar to torch.distributed.all_gather(), but is using RPC. It
  154. picks the worker with the smallest name (alphabetic order) as the leader.
  155. Then all followers send their data ``obj`` to the leader. After the leader
  156. has received all, it will broadcast the results back to all followers. This
  157. function blocks until all workers have received the gathered results.
  158. """
  159. if not worker_names:
  160. assert (
  161. _ALL_WORKER_NAMES is not None
  162. ), "`_ALL_WORKER_NAMES` is not initialized for `def _all_gather`."
  163. worker_names = _ALL_WORKER_NAMES
  164. leader_name = min(worker_names)
  165. self_name = _get_current_rpc_agent().get_worker_info().name
  166. with _all_gather_dict_lock:
  167. concat_names = "".join(sorted(worker_names))
  168. sequence_num = _all_gather_sequence_id.get(concat_names, 0)
  169. _all_gather_sequence_id[concat_names] = sequence_num + 1
  170. sequence_id = concat_names + str(sequence_num)
  171. is_leader = leader_name == self_name
  172. if timeout == UNSET_RPC_TIMEOUT:
  173. # Timeout is specified by agent for RPC calls
  174. rpc_timeout = get_rpc_timeout()
  175. # No timeout for signal
  176. signal_timeout = None
  177. elif timeout == DEFAULT_SHUTDOWN_TIMEOUT:
  178. # No timeout for RPC
  179. rpc_timeout = timeout
  180. # No timeout for signal
  181. signal_timeout = None
  182. else:
  183. # Signal and RPC timeout use the same timeout
  184. signal_timeout = rpc_timeout = timeout
  185. # Phase 1: Followers send it's object to the leader
  186. if is_leader:
  187. _gather_to_leader(sequence_id, self_name, obj, worker_names)
  188. else:
  189. rpc_sync(
  190. leader_name,
  191. _gather_to_leader,
  192. args=(sequence_id, self_name, obj, worker_names),
  193. timeout=rpc_timeout,
  194. )
  195. with _all_gather_dict_lock:
  196. states = _all_gather_sequence_id_to_states[sequence_id]
  197. # Timeout is either set by function parameter or None (which is indefinite)
  198. states.proceed_signal.wait(timeout=signal_timeout)
  199. # Phase 2: Leader broadcast gathered results to all followers
  200. # Leader's signal is the first to be unblocked, after receiving all
  201. # followers' data objects.
  202. if is_leader:
  203. worker_name_to_response_future_dict = {}
  204. for follower_name in worker_names - {leader_name}:
  205. fut = rpc_async(
  206. follower_name,
  207. _broadcast_to_followers,
  208. args=(sequence_id, states.gathered_objects),
  209. timeout=rpc_timeout
  210. )
  211. worker_name_to_response_future_dict[follower_name] = fut
  212. errors = []
  213. for follower_name, fut in worker_name_to_response_future_dict.items():
  214. try:
  215. fut.wait()
  216. except RuntimeError as ex:
  217. errors.append((follower_name, ex))
  218. if errors:
  219. raise RuntimeError(
  220. f"Followers {[e[0] for e in errors]} timed out in _all_gather "
  221. f"after {rpc_timeout:.2f} seconds. The first exception is {errors[0][1]}"
  222. )
  223. # Clean up for the states using the sequence_id
  224. with _all_gather_dict_lock:
  225. states = _all_gather_sequence_id_to_states.pop(sequence_id)
  226. return states.gathered_objects
  227. @_require_initialized
  228. def _barrier(worker_names):
  229. r"""
  230. Synchronizes local and remote RPC processes.
  231. This will block until all local and remote RPC processes specified under worker_names
  232. reach this method to wait for all outstanding work to complete.
  233. Args:
  234. worker_names (List[str]): The set of workers to synchronize.
  235. """
  236. try:
  237. _all_gather(None, set(worker_names))
  238. except RuntimeError as ex:
  239. logger.error(
  240. "Failed to complete barrier, got error %s", ex
  241. )
  242. @_require_initialized
  243. def _wait_all_workers(timeout=DEFAULT_SHUTDOWN_TIMEOUT):
  244. r"""
  245. Block until all local and remote RPC processes reach this method and wait
  246. for all outstanding work to complete. Every RPC process must call this
  247. method before exit to perform a graceful shutdown. This should be used to
  248. terminate the RPC framework, and there is no guarantee that the RPC
  249. framework will work after this method returns.
  250. """
  251. try:
  252. _all_gather(None, timeout=timeout)
  253. except RuntimeError as ex:
  254. logger.error(
  255. "Failed to respond to 'Shutdown Proceed' in time, got error %s", ex
  256. )
  257. raise ex
  258. @_require_initialized
  259. def shutdown(graceful=True, timeout=DEFAULT_SHUTDOWN_TIMEOUT):
  260. r"""
  261. Perform a shutdown of the RPC agent, and then destroy the RPC agent. This
  262. stops the local agent from accepting outstanding requests, and shuts
  263. down the RPC framework by terminating all RPC threads. If ``graceful=True``,
  264. this will block until all local and remote RPC processes reach this method
  265. and wait for all outstanding work to complete. Otherwise, if
  266. ``graceful=False``, this is a local shutdown, and it does not wait for other
  267. RPC processes to reach this method.
  268. .. warning::
  269. For :class:`~torch.futures.Future` objects returned by
  270. :meth:`~torch.distributed.rpc.rpc_async`, ``future.wait()`` should not
  271. be called after ``shutdown()``.
  272. Args:
  273. graceful (bool): Whether to do a graceful shutdown or not. If True,
  274. this will 1) wait until there is no pending system
  275. messages for ``UserRRefs`` and delete them; 2) block
  276. until all local and remote RPC processes have reached
  277. this method and wait for all outstanding work to
  278. complete.
  279. Example::
  280. Make sure that ``MASTER_ADDR`` and ``MASTER_PORT`` are set properly
  281. on both workers. Refer to :meth:`~torch.distributed.init_process_group`
  282. API for more details. For example,
  283. export MASTER_ADDR=localhost
  284. export MASTER_PORT=5678
  285. Then run the following code in two different processes:
  286. >>> # xdoctest: +SKIP
  287. >>> # On worker 0:
  288. >>> import torch
  289. >>> import torch.distributed.rpc as rpc
  290. >>> rpc.init_rpc("worker0", rank=0, world_size=2)
  291. >>> # do some work
  292. >>> result = rpc.rpc_sync("worker1", torch.add, args=(torch.ones(1), 1))
  293. >>> # ready to shutdown
  294. >>> rpc.shutdown()
  295. >>> # On worker 1:
  296. >>> import torch.distributed.rpc as rpc
  297. >>> rpc.init_rpc("worker1", rank=1, world_size=2)
  298. >>> # wait for worker 0 to finish work, and then shutdown.
  299. >>> rpc.shutdown()
  300. """
  301. if graceful:
  302. try:
  303. agent = _get_current_rpc_agent()
  304. if not isinstance(agent, TensorPipeAgent) or agent.is_static_group:
  305. _wait_all_workers(timeout)
  306. _delete_all_user_and_unforked_owner_rrefs()
  307. agent.join(shutdown=True, timeout=timeout)
  308. else:
  309. # This is a dynamic group so we need to grab the token for the operation
  310. my_worker_info = agent.get_worker_info()
  311. my_name = my_worker_info.name
  312. with _group_membership_management(agent.store, my_name, False):
  313. all_worker_infos = agent.get_worker_infos()
  314. for worker in all_worker_infos:
  315. if worker.name != my_name:
  316. rpc_sync(worker.name, _update_group_membership, args=(my_worker_info, [], {}, False))
  317. agent.join(shutdown=True, timeout=timeout)
  318. finally:
  319. # In case of errors, continue to complete the local shutdown.
  320. _finalize_shutdown()
  321. else:
  322. _finalize_shutdown()
  323. def _finalize_shutdown():
  324. try:
  325. # This raises a `TORCH_CHECK()` exception on RRef leak detected.
  326. _destroy_rref_context(_ignore_rref_leak)
  327. finally:
  328. _get_current_rpc_agent().shutdown()
  329. # clean up python rpc handler in shutdown(), see comments in
  330. # PythonRpcHandler::cleanup(), call it in python API because the
  331. # cleanup() function has python dependency, it assumes python
  332. # interpreter exists.
  333. # No matter if RRef leak exception is raised, this clean-up code
  334. # must run to avoid destruction segfault in Python 3.5.
  335. #
  336. # future.wait() should not be called after shutdown().
  337. # pythonRpcHandler is cleaned up in shutdown(), after
  338. # shutdown(), python objects returned from rpc python call can not be
  339. # resolved.
  340. _cleanup_python_rpc_handler()
  341. _reset_current_rpc_agent()
  342. @_require_initialized
  343. def get_worker_info(worker_name=None):
  344. r"""
  345. Get :class:`~torch.distributed.rpc.WorkerInfo` of a given worker name.
  346. Use this :class:`~torch.distributed.rpc.WorkerInfo` to avoid passing an
  347. expensive string on every invocation.
  348. Args:
  349. worker_name (str): the string name of a worker. If ``None``, return the
  350. the id of the current worker. (default ``None``)
  351. Returns:
  352. :class:`~torch.distributed.rpc.WorkerInfo` instance for the given
  353. ``worker_name`` or :class:`~torch.distributed.rpc.WorkerInfo` of the
  354. current worker if ``worker_name`` is ``None``.
  355. """
  356. if worker_name is not None:
  357. return _get_current_rpc_agent().get_worker_info(worker_name)
  358. else:
  359. return _get_current_rpc_agent().get_worker_info()
  360. def _to_worker_info(to):
  361. if isinstance(to, WorkerInfo):
  362. return to
  363. elif isinstance(to, (str, int)):
  364. return get_worker_info(to)
  365. else:
  366. raise ValueError(f"Cannot get WorkerInfo from name {to}")
  367. def _rref_typeof_on_owner(rref, blocking: bool = True):
  368. rref_type = type(rref.local_value())
  369. if blocking:
  370. return rref_type
  371. else:
  372. # Wrap result into a completed Future. This is so that if blocking=`False`
  373. # is specified, we return a future regardless of if this call is on user
  374. # or owner.
  375. future = Future[type]()
  376. future.set_result(rref_type)
  377. return future
  378. def _rref_typeof_on_user(rref, timeout: float = UNSET_RPC_TIMEOUT, blocking: bool = True):
  379. fut = rpc_async(
  380. rref.owner(),
  381. _rref_typeof_on_owner,
  382. args=(rref,),
  383. timeout=timeout
  384. )
  385. if blocking:
  386. return fut.wait()
  387. else:
  388. return fut
  389. T = TypeVar("T")
  390. GenericWithOneTypeVar = Generic[T]
  391. if TYPE_CHECKING:
  392. class RRef(PyRRef[T], Generic[T]):
  393. pass
  394. else:
  395. try:
  396. # Combine the implementation class and the type class.
  397. class RRef(PyRRef, Generic[T]):
  398. pass
  399. except TypeError:
  400. # TypeError: metaclass conflict: the metaclass of a derived class
  401. # must be a (non-strict) subclass of the metaclasses of all its bases
  402. # Mypy doesn't understand __class__ (mypy bug #4177)
  403. class RRefMeta(PyRRef.__class__, GenericWithOneTypeVar.__class__): # type: ignore[name-defined, misc, valid-type]
  404. pass
  405. # Combine the implementation class and the type class.
  406. # Types for classes expecting a certain generic parameter (mypy bug #7791)
  407. class RRef(PyRRef, GenericWithOneTypeVar, metaclass=RRefMeta): # type: ignore[misc, no-redef, valid-type]
  408. pass
  409. # Install docstrings from `PyRRef` to `RRef`.
  410. #
  411. # This is for the fact that pybind11 generates the parameter
  412. # `self` as type `rpc.PyRRef`, so a `:inherited-members:`
  413. # under `.. autoclass:: RRef` does not work.
  414. # we have to do the following process to replace `rpc.PyRRef` with `rpc.RRef`.
  415. #
  416. def method_factory(method_name, docstring):
  417. def method(self, *args, **kwargs):
  418. return getattr(super(RRef, self), method_name)(*args, **kwargs)
  419. if method.__doc__:
  420. method.__doc__ = docstring
  421. return method
  422. for method_name, method in inspect.getmembers(PyRRef):
  423. # Ignore magic methods, except "__str__".
  424. if method_name.startswith("_") and method_name != "__str__":
  425. continue
  426. # Get pybind11 generated docstring.
  427. # It's like,
  428. """
  429. to_here(self: torch.distributed.rpc.PyRRef, timeout: float=-1.0) -> object
  430. Blocking call that copies the value of the RRef from the owner
  431. to the local node and returns it. If the current node is the
  432. owner, returns a reference to the local value.
  433. """
  434. docstring = getattr(method, "__doc__", None)
  435. assert docstring is not None, "RRef user-facing methods should all have docstrings."
  436. # Do surgery on pybind11 generated docstrings.
  437. docstring = docstring.replace("torch.distributed.rpc.PyRRef", "torch.distributed.rpc.RRef")
  438. # Attach user-facing RRef method with modified docstring.
  439. new_method = method_factory(method_name, docstring)
  440. setattr(RRef, method_name, new_method)
  441. @_require_initialized
  442. def remote(to, func, args=None, kwargs=None, timeout=UNSET_RPC_TIMEOUT):
  443. r"""
  444. Make a remote call to run ``func`` on worker ``to`` and return an
  445. :class:`~torch.distributed.rpc.RRef` to the result value immediately.
  446. Worker ``to`` will be the owner of the returned
  447. :class:`~torch.distributed.rpc.RRef`, and the worker calling ``remote`` is
  448. a user. The owner manages the global reference count of its
  449. :class:`~torch.distributed.rpc.RRef`, and the owner
  450. :class:`~torch.distributed.rpc.RRef` is only destructed when globally there
  451. are no living references to it.
  452. Args:
  453. to (str or WorkerInfo or int): name/rank/``WorkerInfo`` of the destination worker.
  454. func (Callable): a callable function, such as Python callables, builtin
  455. operators (e.g. :meth:`~torch.add`) and annotated
  456. TorchScript functions.
  457. args (tuple): the argument tuple for the ``func`` invocation.
  458. kwargs (dict): is a dictionary of keyword arguments for the ``func``
  459. invocation.
  460. timeout (float, optional): timeout in seconds for this remote call. If the
  461. creation of this
  462. :class:`~torch.distributed.rpc.RRef` on worker
  463. ``to`` is not successfully processed on this
  464. worker within this timeout, then the next time
  465. there is an attempt to use the RRef (such as
  466. ``to_here()``), a timeout will be raised
  467. indicating this failure. A value of 0 indicates
  468. an infinite timeout, i.e. a timeout error will
  469. never be raised. If not provided, the default
  470. value set during initialization or with
  471. ``_set_rpc_timeout`` is used.
  472. Returns:
  473. A user :class:`~torch.distributed.rpc.RRef` instance to the result
  474. value. Use the blocking API :meth:`torch.distributed.rpc.RRef.to_here`
  475. to retrieve the result value locally.
  476. .. warning ::
  477. The ``remote`` API does not copy storages of argument tensors until
  478. sending them over the wire, which could be done by a different thread
  479. depending on the RPC backend type. The caller should make sure that the
  480. contents of those tensors stay intact until the returned RRef is
  481. confirmed by the owner, which can be checked using the
  482. :meth:`torch.distributed.rpc.RRef.confirmed_by_owner` API.
  483. .. warning ::
  484. Errors such as timeouts for the ``remote`` API are handled on a
  485. best-effort basis. This means that when remote calls initiated by
  486. ``remote`` fail, such as with a timeout error, we take a best-effort
  487. approach to error handling. This means that errors are handled and set
  488. on the resulting RRef on an asynchronous basis. If the RRef has not been
  489. used by the application before this handling (such as ``to_here`` or
  490. fork call), then future uses of the ``RRef`` will appropriately raise
  491. errors. However, it is possible that the user application will use the
  492. ``RRef`` before the errors are handled. In this case, errors may not be
  493. raised as they have not yet been handled.
  494. Example::
  495. Make sure that ``MASTER_ADDR`` and ``MASTER_PORT`` are set properly
  496. on both workers. Refer to :meth:`~torch.distributed.init_process_group`
  497. API for more details. For example,
  498. export MASTER_ADDR=localhost
  499. export MASTER_PORT=5678
  500. Then run the following code in two different processes:
  501. >>> # xdoctest: +SKIP
  502. >>> # On worker 0:
  503. >>> import torch
  504. >>> import torch.distributed.rpc as rpc
  505. >>> rpc.init_rpc("worker0", rank=0, world_size=2)
  506. >>> rref1 = rpc.remote("worker1", torch.add, args=(torch.ones(2), 3))
  507. >>> rref2 = rpc.remote("worker1", torch.add, args=(torch.ones(2), 1))
  508. >>> x = rref1.to_here() + rref2.to_here()
  509. >>> rpc.shutdown()
  510. >>> # On worker 1:
  511. >>> import torch.distributed.rpc as rpc
  512. >>> rpc.init_rpc("worker1", rank=1, world_size=2)
  513. >>> rpc.shutdown()
  514. Below is an example of running a TorchScript function using RPC.
  515. >>> # On both workers:
  516. >>> @torch.jit.script
  517. >>> def my_script_add(tensor: torch.Tensor, scalar: int):
  518. >>> return torch.add(tensor, scalar)
  519. >>> # On worker 0:
  520. >>> import torch.distributed.rpc as rpc
  521. >>> rpc.init_rpc("worker0", rank=0, world_size=2)
  522. >>> rref = rpc.remote("worker1", my_script_add, args=(torch.ones(2), 3))
  523. >>> rref.to_here()
  524. >>> rpc.shutdown()
  525. >>> # On worker 1:
  526. >>> import torch.distributed.rpc as rpc
  527. >>> rpc.init_rpc("worker1", rank=1, world_size=2)
  528. >>> rpc.shutdown()
  529. """
  530. torch._C._log_api_usage_once("torch.distributed.rpc_remote")
  531. qualified_name = torch.jit._builtins._find_builtin(func)
  532. dst_worker_info = _to_worker_info(to)
  533. should_profile = _get_should_profile()
  534. ctx_manager = _enable_rpc_profiler(should_profile, qualified_name, func, RPCExecMode.REMOTE, dst_worker_info)
  535. with ctx_manager as rf:
  536. args = args if args else ()
  537. kwargs = kwargs if kwargs else {}
  538. is_async_exec = hasattr(func, "_wrapped_async_rpc_function")
  539. if is_async_exec:
  540. wrapped = func._wrapped_async_rpc_function
  541. if isinstance(wrapped, torch.jit.ScriptFunction):
  542. func = wrapped
  543. if qualified_name is not None:
  544. rref = _invoke_remote_builtin(dst_worker_info, qualified_name, timeout, *args, **kwargs)
  545. elif isinstance(func, torch.jit.ScriptFunction):
  546. rref = _invoke_remote_torchscript(
  547. dst_worker_info.name,
  548. torch._jit_internal._qualified_name(func),
  549. timeout,
  550. is_async_exec,
  551. *args,
  552. **kwargs,
  553. )
  554. else:
  555. (pickled_python_udf, tensors) = _default_pickler.serialize(
  556. PythonUDF(func, args, kwargs)
  557. )
  558. rref = _invoke_remote_python_udf(
  559. dst_worker_info,
  560. pickled_python_udf,
  561. tensors,
  562. timeout,
  563. is_async_exec
  564. )
  565. # attach profiling information
  566. if should_profile:
  567. assert torch.autograd._profiler_enabled()
  568. assert rf is not None
  569. fut = rf._call_end_callbacks_on_future(rref._get_future())
  570. rref._set_profiling_future(fut)
  571. return rref
  572. def _invoke_rpc(to, func, rpc_type, args=None, kwargs=None, rpc_timeout: float = UNSET_RPC_TIMEOUT):
  573. if not callable(func):
  574. raise TypeError("function should be callable.")
  575. qualified_name = torch.jit._builtins._find_builtin(func)
  576. dst_worker_info = _to_worker_info(to)
  577. should_profile = _get_should_profile()
  578. ctx_manager = _enable_rpc_profiler(should_profile, qualified_name, func, rpc_type, dst_worker_info)
  579. with ctx_manager as rf:
  580. args = args if args else ()
  581. kwargs = kwargs if kwargs else {}
  582. is_async_exec = hasattr(func, "_wrapped_async_rpc_function")
  583. if is_async_exec:
  584. wrapped = func._wrapped_async_rpc_function
  585. if isinstance(wrapped, torch.jit.ScriptFunction):
  586. func = wrapped
  587. if qualified_name is not None:
  588. fut = _invoke_rpc_builtin(
  589. dst_worker_info,
  590. qualified_name,
  591. rpc_timeout,
  592. *args,
  593. **kwargs
  594. )
  595. elif isinstance(func, torch.jit.ScriptFunction):
  596. fut = _invoke_rpc_torchscript(
  597. dst_worker_info.name,
  598. torch._jit_internal._qualified_name(func),
  599. args,
  600. kwargs,
  601. rpc_timeout,
  602. is_async_exec
  603. )
  604. else:
  605. (pickled_python_udf, tensors) = _default_pickler.serialize(
  606. PythonUDF(func, args, kwargs)
  607. )
  608. fut = _invoke_rpc_python_udf(
  609. dst_worker_info,
  610. pickled_python_udf,
  611. tensors,
  612. rpc_timeout,
  613. is_async_exec
  614. )
  615. if should_profile:
  616. assert torch.autograd._profiler_enabled()
  617. assert rf is not None
  618. # Schedule profiling callbacks to run when the future completes.
  619. # This returns a future that is completed when the original future
  620. # completes and the profiling callbacks have been completed as well,
  621. # to guarantee that fut.wait() completes the profiling. This new
  622. # future will contain the same value as the original future.
  623. fut = rf._call_end_callbacks_on_future(fut)
  624. return fut
  625. @_require_initialized
  626. def rpc_sync(to, func, args=None, kwargs=None, timeout: float = UNSET_RPC_TIMEOUT):
  627. r"""
  628. Make a blocking RPC call to run function ``func`` on worker ``to``. RPC
  629. messages are sent and received in parallel to execution of Python code. This
  630. method is thread-safe.
  631. Args:
  632. to (str or WorkerInfo or int): name/rank/``WorkerInfo`` of the destination worker.
  633. func (Callable): a callable function, such as Python callables, builtin
  634. operators (e.g. :meth:`~torch.add`) and annotated
  635. TorchScript functions.
  636. args (tuple): the argument tuple for the ``func`` invocation.
  637. kwargs (dict): is a dictionary of keyword arguments for the ``func``
  638. invocation.
  639. timeout (float, optional): timeout in seconds to use for this RPC. If
  640. the RPC does not complete in this amount of
  641. time, an exception indicating it has
  642. timed out will be raised. A value of 0
  643. indicates an infinite timeout, i.e. a timeout
  644. error will never be raised. If not provided,
  645. the default value set during initialization
  646. or with ``_set_rpc_timeout`` is used.
  647. Returns:
  648. Returns the result of running ``func`` with ``args`` and ``kwargs``.
  649. Example::
  650. Make sure that ``MASTER_ADDR`` and ``MASTER_PORT`` are set properly
  651. on both workers. Refer to :meth:`~torch.distributed.init_process_group`
  652. API for more details. For example,
  653. export MASTER_ADDR=localhost
  654. export MASTER_PORT=5678
  655. Then run the following code in two different processes:
  656. >>> # xdoctest: +SKIP
  657. >>> # On worker 0:
  658. >>> import torch
  659. >>> import torch.distributed.rpc as rpc
  660. >>> rpc.init_rpc("worker0", rank=0, world_size=2)
  661. >>> ret = rpc.rpc_sync("worker1", torch.add, args=(torch.ones(2), 3))
  662. >>> rpc.shutdown()
  663. >>> # On worker 1:
  664. >>> import torch.distributed.rpc as rpc
  665. >>> rpc.init_rpc("worker1", rank=1, world_size=2)
  666. >>> rpc.shutdown()
  667. Below is an example of running a TorchScript function using RPC.
  668. >>> # On both workers:
  669. >>> @torch.jit.script
  670. >>> def my_script_add(tensor: torch.Tensor, scalar: int):
  671. >>> return torch.add(tensor, scalar)
  672. >>> # On worker 0:
  673. >>> import torch.distributed.rpc as rpc
  674. >>> rpc.init_rpc("worker0", rank=0, world_size=2)
  675. >>> ret = rpc.rpc_sync("worker1", my_script_add, args=(torch.ones(2), 3))
  676. >>> rpc.shutdown()
  677. >>> # On worker 1:
  678. >>> import torch.distributed.rpc as rpc
  679. >>> rpc.init_rpc("worker1", rank=1, world_size=2)
  680. >>> rpc.shutdown()
  681. """
  682. torch._C._log_api_usage_once("torch.distributed.rpc_sync")
  683. fut = _invoke_rpc(to, func, RPCExecMode.SYNC, args, kwargs, timeout)
  684. return fut.wait()
  685. @_require_initialized
  686. def rpc_async(to, func, args=None, kwargs=None, timeout=UNSET_RPC_TIMEOUT):
  687. r"""
  688. Make a non-blocking RPC call to run function ``func`` on worker ``to``. RPC
  689. messages are sent and received in parallel to execution of Python code. This
  690. method is thread-safe. This method will immediately return a
  691. :class:`~torch.futures.Future` that can be awaited on.
  692. Args:
  693. to (str or WorkerInfo or int): name/rank/``WorkerInfo`` of the destination worker.
  694. func (Callable): a callable function, such as Python callables, builtin
  695. operators (e.g. :meth:`~torch.add`) and annotated
  696. TorchScript functions.
  697. args (tuple): the argument tuple for the ``func`` invocation.
  698. kwargs (dict): is a dictionary of keyword arguments for the ``func``
  699. invocation.
  700. timeout (float, optional): timeout in seconds to use for this RPC. If
  701. the RPC does not complete in this amount of
  702. time, an exception indicating it has
  703. timed out will be raised. A value of 0
  704. indicates an infinite timeout, i.e. a timeout
  705. error will never be raised. If not provided,
  706. the default value set during initialization
  707. or with ``_set_rpc_timeout`` is used.
  708. Returns:
  709. Returns a :class:`~torch.futures.Future` object that can be waited
  710. on. When completed, the return value of ``func`` on ``args`` and
  711. ``kwargs`` can be retrieved from the :class:`~torch.futures.Future`
  712. object.
  713. .. warning ::
  714. Using GPU tensors as arguments or return values of ``func`` is not
  715. supported since we don't support sending GPU tensors over the wire. You
  716. need to explicitly copy GPU tensors to CPU before using them as
  717. arguments or return values of ``func``.
  718. .. warning ::
  719. The ``rpc_async`` API does not copy storages of argument tensors until
  720. sending them over the wire, which could be done by a different thread
  721. depending on the RPC backend type. The caller should make sure that the
  722. contents of those tensors stay intact until the returned
  723. :class:`~torch.futures.Future` completes.
  724. Example::
  725. Make sure that ``MASTER_ADDR`` and ``MASTER_PORT`` are set properly
  726. on both workers. Refer to :meth:`~torch.distributed.init_process_group`
  727. API for more details. For example,
  728. export MASTER_ADDR=localhost
  729. export MASTER_PORT=5678
  730. Then run the following code in two different processes:
  731. >>> # xdoctest: +SKIP
  732. >>> # On worker 0:
  733. >>> import torch
  734. >>> import torch.distributed.rpc as rpc
  735. >>> rpc.init_rpc("worker0", rank=0, world_size=2)
  736. >>> fut1 = rpc.rpc_async("worker1", torch.add, args=(torch.ones(2), 3))
  737. >>> fut2 = rpc.rpc_async("worker1", min, args=(1, 2))
  738. >>> result = fut1.wait() + fut2.wait()
  739. >>> rpc.shutdown()
  740. >>> # On worker 1:
  741. >>> import torch.distributed.rpc as rpc
  742. >>> rpc.init_rpc("worker1", rank=1, world_size=2)
  743. >>> rpc.shutdown()
  744. Below is an example of running a TorchScript function using RPC.
  745. >>> # On both workers:
  746. >>> @torch.jit.script
  747. >>> def my_script_add(tensor: torch.Tensor, scalar: int):
  748. >>> return torch.add(tensor, scalar)
  749. >>> # On worker 0:
  750. >>> import torch.distributed.rpc as rpc
  751. >>> rpc.init_rpc("worker0", rank=0, world_size=2)
  752. >>> fut = rpc.rpc_async("worker1", my_script_add, args=(torch.ones(2), 3))
  753. >>> ret = fut.wait()
  754. >>> rpc.shutdown()
  755. >>> # On worker 1:
  756. >>> import torch.distributed.rpc as rpc
  757. >>> rpc.init_rpc("worker1", rank=1, world_size=2)
  758. >>> rpc.shutdown()
  759. """
  760. torch._C._log_api_usage_once("torch.distributed.rpc_async")
  761. fut = _invoke_rpc(to, func, RPCExecMode.ASYNC, args, kwargs, timeout)
  762. if hasattr(_thread_local_var, "future_list"):
  763. _thread_local_var.future_list.append(fut)
  764. return fut
  765. def _get_should_profile():
  766. # Legacy profiler should be enabled. RPC profiling is not supported with
  767. # Kineto profiler.
  768. ActiveProfilerType = torch._C._profiler.ActiveProfilerType
  769. return (
  770. torch.autograd._profiler_enabled() and
  771. torch._C._autograd._profiler_type() == ActiveProfilerType.LEGACY # type: ignore[attr-defined]
  772. )
  773. def _enable_rpc_profiler(should_profile, qualified_name, func, rpc_type, dst_worker_info):
  774. ctx_manager = contextlib.nullcontext()
  775. if should_profile:
  776. # Create appropriate string representation based on type of func
  777. # (builtin, script, python)
  778. if qualified_name is None:
  779. func_name = (
  780. torch._jit_internal._qualified_name(func)
  781. if isinstance(func, torch.jit.ScriptFunction)
  782. else func.__qualname__
  783. )
  784. else:
  785. func_name = qualified_name
  786. # Build RPC profiling key.
  787. rpc_profiling_key = _build_rpc_profiling_key(
  788. rpc_type,
  789. func_name,
  790. get_worker_info().name,
  791. dst_worker_info.name,
  792. )
  793. RemoteProfilerManager.set_current_profiling_key(rpc_profiling_key)
  794. # Mypy doesn't support re-def of a variable not in the same block (#1174)
  795. ctx_manager = torch.autograd.profiler.record_function(rpc_profiling_key) # type: ignore[assignment]
  796. return ctx_manager