options.py 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. # mypy: allow-untyped-defs
  2. from typing import Dict, List, Optional, Union
  3. import torch
  4. from torch._C._distributed_rpc import _TensorPipeRpcBackendOptionsBase
  5. from . import constants as rpc_contants
  6. DeviceType = Union[int, str, torch.device]
  7. __all__ = ["TensorPipeRpcBackendOptions"]
  8. def _to_device(device: DeviceType) -> torch.device:
  9. device = torch.device(device)
  10. if device.type != "cuda":
  11. raise ValueError(
  12. "`set_devices` expect a list of CUDA devices, but got "
  13. f"device type {device.type}."
  14. )
  15. return device
  16. def _to_device_map(
  17. device_map: Dict[DeviceType, DeviceType]
  18. ) -> Dict[torch.device, torch.device]:
  19. full_device_map: Dict[torch.device, torch.device] = {}
  20. reverse_map: Dict[torch.device, torch.device] = {}
  21. for k, v in device_map.items():
  22. k, v = torch.device(k), torch.device(v)
  23. if v in reverse_map:
  24. raise ValueError(
  25. "`device_map` only supports 1-to-1 mapping, "
  26. f"trying to map {k} and {reverse_map[v]} to {v}"
  27. )
  28. full_device_map[k] = v
  29. reverse_map[v] = k
  30. return full_device_map
  31. def _to_device_list(devices: List[DeviceType]) -> List[torch.device]:
  32. return list(map(_to_device, devices))
  33. class TensorPipeRpcBackendOptions(_TensorPipeRpcBackendOptionsBase):
  34. r"""
  35. The backend options for
  36. :class:`~torch.distributed.rpc.TensorPipeAgent`, derived from
  37. :class:`~torch.distributed.rpc.RpcBackendOptions`.
  38. Args:
  39. num_worker_threads (int, optional): The number of threads in the
  40. thread-pool used by
  41. :class:`~torch.distributed.rpc.TensorPipeAgent` to execute
  42. requests (default: 16).
  43. rpc_timeout (float, optional): The default timeout, in seconds,
  44. for RPC requests (default: 60 seconds). If the RPC has not
  45. completed in this timeframe, an exception indicating so will
  46. be raised. Callers can override this timeout for individual
  47. RPCs in :meth:`~torch.distributed.rpc.rpc_sync` and
  48. :meth:`~torch.distributed.rpc.rpc_async` if necessary.
  49. init_method (str, optional): The URL to initialize the distributed
  50. store used for rendezvous. It takes any value accepted for the
  51. same argument of :meth:`~torch.distributed.init_process_group`
  52. (default: ``env://``).
  53. device_maps (Dict[str, Dict], optional): Device placement mappings from
  54. this worker to the callee. Key is the callee worker name and value
  55. the dictionary (``Dict`` of ``int``, ``str``, or ``torch.device``)
  56. that maps this worker's devices to the callee worker's devices.
  57. (default: ``None``)
  58. devices (List[int, str, or ``torch.device``], optional): all local
  59. CUDA devices used by RPC agent. By Default, it will be initialized
  60. to all local devices from its own ``device_maps`` and corresponding
  61. devices from its peers' ``device_maps``. When processing CUDA RPC
  62. requests, the agent will properly synchronize CUDA streams for
  63. all devices in this ``List``.
  64. """
  65. def __init__(
  66. self,
  67. *,
  68. num_worker_threads: int = rpc_contants.DEFAULT_NUM_WORKER_THREADS,
  69. rpc_timeout: float = rpc_contants.DEFAULT_RPC_TIMEOUT_SEC,
  70. init_method: str = rpc_contants.DEFAULT_INIT_METHOD,
  71. device_maps: Optional[Dict[str, Dict[DeviceType, DeviceType]]] = None,
  72. devices: Optional[List[DeviceType]] = None,
  73. _transports: Optional[List] = None,
  74. _channels: Optional[List] = None,
  75. ):
  76. full_device_maps = (
  77. {}
  78. if device_maps is None
  79. else {k: _to_device_map(v) for k, v in device_maps.items()}
  80. )
  81. full_device_list = [] if devices is None else _to_device_list(devices)
  82. super().__init__(
  83. num_worker_threads,
  84. _transports,
  85. _channels,
  86. rpc_timeout,
  87. init_method,
  88. full_device_maps,
  89. full_device_list,
  90. )
  91. def set_device_map(self, to: str, device_map: Dict[DeviceType, DeviceType]):
  92. r"""
  93. Set device mapping between each RPC caller and callee pair. This
  94. function can be called multiple times to incrementally add
  95. device placement configurations.
  96. Args:
  97. to (str): Callee name.
  98. device_map (Dict of int, str, or torch.device): Device placement
  99. mappings from this worker to the callee. This map must be
  100. invertible.
  101. Example:
  102. >>> # xdoctest: +SKIP("distributed")
  103. >>> # both workers
  104. >>> def add(x, y):
  105. >>> print(x) # tensor([1., 1.], device='cuda:1')
  106. >>> return x + y, (x + y).to(2)
  107. >>>
  108. >>> # on worker 0
  109. >>> options = TensorPipeRpcBackendOptions(
  110. >>> num_worker_threads=8,
  111. >>> device_maps={"worker1": {0: 1}}
  112. >>> # maps worker0's cuda:0 to worker1's cuda:1
  113. >>> )
  114. >>> options.set_device_map("worker1", {1: 2})
  115. >>> # maps worker0's cuda:1 to worker1's cuda:2
  116. >>>
  117. >>> rpc.init_rpc(
  118. >>> "worker0",
  119. >>> rank=0,
  120. >>> world_size=2,
  121. >>> backend=rpc.BackendType.TENSORPIPE,
  122. >>> rpc_backend_options=options
  123. >>> )
  124. >>>
  125. >>> x = torch.ones(2)
  126. >>> rets = rpc.rpc_sync("worker1", add, args=(x.to(0), 1))
  127. >>> # The first argument will be moved to cuda:1 on worker1. When
  128. >>> # sending the return value back, it will follow the invert of
  129. >>> # the device map, and hence will be moved back to cuda:0 and
  130. >>> # cuda:1 on worker0
  131. >>> print(rets[0]) # tensor([2., 2.], device='cuda:0')
  132. >>> print(rets[1]) # tensor([2., 2.], device='cuda:1')
  133. """
  134. full_device_map = _to_device_map(device_map)
  135. curr_device_maps = super().device_maps
  136. if to in curr_device_maps:
  137. for k, v in full_device_map.items():
  138. if k in curr_device_maps[to] and v != curr_device_maps[to][k]:
  139. raise ValueError(
  140. "`set_device_map` only supports 1-to-1 mapping, trying"
  141. f" to map {k} to {v} and {curr_device_maps[to][k]}"
  142. )
  143. super()._set_device_map(to, full_device_map)
  144. def set_devices(self, devices: List[DeviceType]):
  145. r"""
  146. Set local devices used by the TensorPipe RPC agent. When processing
  147. CUDA RPC requests, the TensorPipe RPC agent will properly synchronize
  148. CUDA streams for all devices in this ``List``.
  149. Args:
  150. devices (List of int, str, or torch.device): local devices used by
  151. the TensorPipe RPC agent.
  152. """
  153. self.devices = _to_device_list(devices)