reductions.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595
  1. # mypy: allow-untyped-defs
  2. import multiprocessing
  3. import os
  4. import threading
  5. from multiprocessing.reduction import ForkingPickler
  6. from multiprocessing.util import register_after_fork
  7. from typing import Union
  8. import torch
  9. import torch.utils.hooks
  10. from torch._namedtensor_internals import check_serializing_named_tensor
  11. try:
  12. # Early load resource_sharer to prevent a partially initialized instance
  13. # from being inherited in a forked child process. The reduce_storage method
  14. # requires this module indirectly through DupFd(). The built-in mp.Queue
  15. # class pickles arguments in a background thread which may overlap with the
  16. # fork.
  17. import multiprocessing.resource_sharer
  18. except ImportError:
  19. pass
  20. class StorageWeakRef:
  21. r"""A weak reference to a Storage.
  22. The cdata member is a Python number containing the integer representation of
  23. the Storage pointer.
  24. """
  25. __slots__ = ["cdata", "_free_weak_ref"]
  26. def __init__(self, storage):
  27. self.cdata = storage._weak_ref()
  28. # Save a direct reference to _free_weak_ref because the `torch` module
  29. # might be cleared during Python shutdown before this module is cleared.
  30. self._free_weak_ref = torch.Storage._free_weak_ref # type: ignore[attr-defined]
  31. @classmethod
  32. def from_weakref(cls, cdata):
  33. instance = cls.__new__(cls)
  34. instance.cdata = cdata
  35. instance._free_weak_ref = torch.Storage._free_weak_ref # type: ignore[attr-defined]
  36. return instance
  37. def expired(self):
  38. return torch.Storage._expired(self.cdata) # type: ignore[attr-defined]
  39. def __del__(self):
  40. self._free_weak_ref(self.cdata)
  41. def __hash__(self):
  42. return self.cdata
  43. def __eq__(self, other):
  44. if id(self) == id(other):
  45. return True
  46. return self.cdata == other.cdata
  47. class SharedCache(dict):
  48. """Dictionary from multiprocessing handles to StorageWeakRef."""
  49. def __init__(self):
  50. # free_dead_references() is called if the len exceeds the current
  51. # limit. The limit scales with the number of remaining live objects.
  52. self.limit = 128
  53. # `fork` inherits lock state, so in case we fork when the lock is held,
  54. # we register a function to reset the lock to a new object to avoid
  55. # possible deadlocks, following python multiprocessing library design.
  56. self._after_fork()
  57. register_after_fork(self, SharedCache._after_fork)
  58. def _after_fork(self):
  59. self.lock = threading.Lock()
  60. def get(self, key):
  61. with self.lock:
  62. return dict.get(self, key)
  63. def __setitem__(self, key, storage_ref):
  64. with self.lock:
  65. dict.__setitem__(self, key, storage_ref)
  66. if len(self) > self.limit:
  67. self.free_dead_references()
  68. def free_dead_references(self):
  69. live = 0
  70. for key, storage_ref in list(self.items()):
  71. if storage_ref.expired():
  72. del self[key]
  73. else:
  74. live += 1
  75. self.limit = max(128, live * 2)
  76. # mapping from handles to StorageWeakRef objects
  77. shared_cache = SharedCache()
  78. def rebuild_event(device, handle):
  79. return torch.cuda.Event.from_ipc_handle(device, handle)
  80. def reduce_event(event):
  81. handle = event.ipc_handle()
  82. return (rebuild_event, (event.device, handle))
  83. def rebuild_tensor(cls, storage, metadata):
  84. storage_offset, size, stride, requires_grad = metadata
  85. t = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
  86. if cls == torch.nn.parameter.Parameter:
  87. # we have to pass requires_grad into constructor, rather than set it as an
  88. # attribute later, because it's an important check for Integer Tensors to
  89. # have requires_grad=False (or else they raise an error)
  90. t = torch.nn.parameter.Parameter(t, requires_grad=requires_grad)
  91. else:
  92. t.requires_grad = requires_grad
  93. return t
  94. def rebuild_cuda_tensor(
  95. tensor_cls,
  96. tensor_size,
  97. tensor_stride,
  98. tensor_offset,
  99. storage_cls,
  100. dtype,
  101. storage_device,
  102. storage_handle,
  103. storage_size_bytes,
  104. storage_offset_bytes,
  105. requires_grad,
  106. ref_counter_handle,
  107. ref_counter_offset,
  108. event_handle,
  109. event_sync_required,
  110. ):
  111. # If storage_handle is None, storage points to nullptr.
  112. if storage_handle is None or storage_size_bytes == 0:
  113. storage = storage_cls(0, dtype=dtype, device=storage_device, _internal=True)
  114. else:
  115. storage = storage_from_cache(
  116. storage_cls, (storage_handle, storage_offset_bytes)
  117. )
  118. if storage is None:
  119. torch.cuda._lazy_init()
  120. storage = storage_cls._new_shared_cuda(
  121. storage_device,
  122. storage_handle,
  123. storage_size_bytes,
  124. storage_offset_bytes,
  125. ref_counter_handle,
  126. ref_counter_offset,
  127. event_handle,
  128. event_sync_required,
  129. )
  130. shared_cache[(storage_handle, storage_offset_bytes)] = StorageWeakRef(
  131. storage
  132. )
  133. else:
  134. # We already ref counting this Storage, but producer needs new ref-counters to be released.
  135. storage_cls._release_ipc_counter(
  136. ref_counter_handle, ref_counter_offset, device=storage_device
  137. )
  138. _storage = (
  139. storage
  140. if isinstance(storage, torch.UntypedStorage)
  141. else storage._untyped_storage
  142. )
  143. t = torch._utils._rebuild_tensor(
  144. torch.storage.TypedStorage(wrap_storage=_storage, dtype=dtype, _internal=True),
  145. tensor_offset,
  146. tensor_size,
  147. tensor_stride,
  148. )
  149. if tensor_cls == torch.nn.parameter.Parameter:
  150. # It is crucial for integer tensors to receive
  151. # the requires_grad=False as an argument in the constructor
  152. t = torch.nn.parameter.Parameter(t, requires_grad=requires_grad)
  153. else:
  154. t.requires_grad = requires_grad
  155. return t
  156. def reduce_tensor(tensor):
  157. if tensor.requires_grad and not tensor.is_leaf:
  158. raise RuntimeError(
  159. "Cowardly refusing to serialize non-leaf tensor which requires_grad, "
  160. "since autograd does not support crossing process boundaries. "
  161. "If you just want to transfer the data, call detach() on the tensor "
  162. "before serializing (e.g., putting it on the queue)."
  163. )
  164. check_serializing_named_tensor(tensor)
  165. torch.utils.hooks.warn_if_has_hooks(tensor)
  166. # Note [CUDA IPC and the caching allocator]
  167. # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  168. # When you send a CUDA tensor over IPC, you might expect that you will
  169. # get out the same storage from the other end. However, the CUDA caching
  170. # allocator makes it difficult to preserve this invariant. Consider
  171. # the following situation: a tensor of size 0x100 points to offset 0x20 of
  172. # a storage at 0xA100 of size 0x100. (For simplicity, all of these
  173. # sizes are given in bytes). HOWEVER, with the caching allocator, this storage
  174. # might be part of a larger cudaMalloc allocation 0xA000 of size 0x4000.
  175. #
  176. # When we want to send this CUDA tensor over IPC, we must send the
  177. # *entire* cudaMalloc allocation, i.e., the 0xA000 region, not just
  178. # the storage 0xA100 (because that is what CUDA supports). So, on the
  179. # other end, there simply isn't any way to say, "Wait, you gave me
  180. # a bigger region (0xA000) than the one I wanted (0xA100)".
  181. #
  182. # OK, so if you sent the cudaMalloc allocation, can you just wrap that up as
  183. # one storage itself? No, because this cudaMalloc allocation might contain
  184. # storages of mixed types: float, bytes, double... If you make the entire
  185. # allocation a single storage of a type A, we'll hit an error when constructing
  186. # a tensor of type B on the storage.
  187. #
  188. # cudaIpcMemHandle is an identifier to access the sender cudaMalloc allocation on the
  189. # receiver side. However, cudaIpcMemHandles from each device in a given process may
  190. # only be opened by one context per device per other process.
  191. # If we open and close a memory handle multiples times in a process, CUDA is allowed
  192. # to give it a different address; similarly, once we close the memory, we're not
  193. # allowed to access it(and the storage/tensor built on top of it), even if it is
  194. # still live in the original process. As we cannot make a cudaMalloc allocation
  195. # to a single storage in one go, this requires us to cache the device pointer for
  196. # each cudaIpcMemHandle on C++ side to reconstruct types of storages, while keep
  197. # the old ones alives.
  198. # See [https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__DEVICE.html]
  199. #
  200. # This is fine, because all we need to do is to save our position in the allocation,
  201. # and reconstruct storage and tensor from it.
  202. # 0xA000 -> -------CUDA Allocation------
  203. # | |
  204. # | |
  205. # | |
  206. # | |
  207. # 0xA100 -> --------storage1 begin------
  208. # | |
  209. # 0xA120 -> --------tensor1 begin ------
  210. # | |
  211. # | |
  212. # | |
  213. # | |
  214. # | |
  215. # 0xA160 -> --------tensor1 end---------
  216. # | |
  217. # | |
  218. # | |
  219. # 0xA200 -> --------storage1 end--------
  220. # | |
  221. # 0xE000 -> --------CUDA allocation-----
  222. #
  223. # To send tensor1, the following info are required from sender to receiver for
  224. # storage recontruction.
  225. # 1. cudaIpcMemHandle of 0xA000(which can be mapped to a basePtr in receiver process).
  226. # basePtr may not be exactly 0xA000 since it's a different process.
  227. # 2. offset(0xA100) of storage1 in the CUDA allocation.
  228. # 3. size of storage1(0x100).
  229. #
  230. # On receiver side:
  231. # 1. Get the devPtr of the MemHandle to access the memory, reconstruct a storage
  232. # of the same type using (basePtr, offset, size).
  233. # 2. we can reconstruct the tensor on top of the reconstructed storage
  234. # Tensor(size=0x040, offset=0x020, storage=Storage(data=basePtr+0xA100, size=0x0100))
  235. #
  236. # This strategy has a few implications:
  237. #
  238. # 1. When we serialize a CUDA tensor for IPC, we cannot do it all in one
  239. # go (non-compositionally), and this requires to have a global map
  240. # memHandle -> devPtr for each process.
  241. #
  242. # 2. We MUST NOT let the new IPC tensor be resizable. Originally, a resize
  243. # of the storage beyond 0x100 would merely have caused us to do a
  244. # reallocation. You don't really want to do this, but if you did,
  245. # all that would happen is that you would lose IPC sharing. But if
  246. # you do this in the new world, we will happily let you write out of
  247. # bounds of your "allocation", clobbering unrelated data in the cached
  248. # allocator block. BAD!
  249. #
  250. # By the way, in old versions of PyTorch, we supported this situation
  251. # natively using a "storage view", which permitted multiple storages to be
  252. # views on each other. But this was the *only* use of storage views, so we
  253. # eliminated it so that we could just use tensor views to implement the same
  254. # thing.
  255. #
  256. # TODO: Handle distinguishing between subclass and non-subclass versions of NT better
  257. # https://github.com/pytorch/pytorch/issues/110543
  258. from torch.nested._internal.nested_tensor import NestedTensor
  259. if tensor.is_nested and not isinstance(tensor, NestedTensor):
  260. return reduce_nested_tensor(tensor)
  261. if tensor.layout in {
  262. torch.sparse_coo,
  263. torch.sparse_csr,
  264. torch.sparse_bsr,
  265. torch.sparse_csc,
  266. torch.sparse_bsc,
  267. }:
  268. return reduce_sparse_tensor(tensor)
  269. storage = tensor._typed_storage()
  270. if storage._untyped_storage.device.type == "cuda":
  271. (
  272. device,
  273. handle,
  274. storage_size_bytes,
  275. storage_offset_bytes,
  276. ref_counter_handle,
  277. ref_counter_offset,
  278. event_handle,
  279. event_sync_required,
  280. ) = storage._share_cuda_()
  281. tensor_offset = tensor.storage_offset()
  282. shared_cache[handle] = StorageWeakRef(storage)
  283. # _backward_hooks purposely omitted here, see
  284. # Note [Don't serialize hooks]
  285. return (
  286. rebuild_cuda_tensor,
  287. (
  288. type(tensor),
  289. tensor.size(),
  290. tensor.stride(),
  291. tensor_offset, # tensor offset in its storage
  292. type(storage),
  293. tensor.dtype,
  294. device,
  295. handle, # identifier which CUDA allocation is the storage in.
  296. storage_size_bytes, # size(in bytes) of the storage
  297. storage_offset_bytes, # offset(in bytes) of the storage in the CUDA allocation
  298. tensor.requires_grad,
  299. ref_counter_handle,
  300. ref_counter_offset,
  301. event_handle,
  302. event_sync_required,
  303. ),
  304. )
  305. # _backward_hooks purposely omitted here, see Note [Don't serialize hooks]
  306. metadata = (
  307. tensor.storage_offset(),
  308. tensor.size(),
  309. tensor.stride(),
  310. tensor.requires_grad,
  311. )
  312. return (rebuild_tensor, (type(tensor), storage, metadata))
  313. def rebuild_nested_tensor(
  314. rebuild_buffer_func,
  315. rebuild_buffer_args,
  316. rebuild_sizes_func,
  317. rebuild_sizes_args,
  318. rebuild_strides_func,
  319. rebuild_strides_args,
  320. rebuild_offsets_func,
  321. rebuild_offsets_args,
  322. ):
  323. buffer = rebuild_buffer_func(*rebuild_buffer_args)
  324. sizes = rebuild_sizes_func(*rebuild_sizes_args)
  325. strides = rebuild_strides_func(*rebuild_strides_args)
  326. offsets = rebuild_offsets_func(*rebuild_offsets_args)
  327. return torch._nested_view_from_buffer_copy(buffer, sizes, strides, offsets)
  328. def reduce_nested_tensor(nt):
  329. rebuild_buffer_func, rebuild_buffer_args = reduce_tensor(nt.values())
  330. rebuild_sizes_func, rebuild_sizes_args = reduce_tensor(nt._nested_tensor_size())
  331. rebuild_strides_func, rebuild_strides_args = reduce_tensor(
  332. nt._nested_tensor_strides()
  333. )
  334. rebuild_offsets_func, rebuild_offsets_args = reduce_tensor(
  335. nt._nested_tensor_storage_offsets()
  336. )
  337. return (
  338. rebuild_nested_tensor,
  339. (
  340. rebuild_buffer_func,
  341. rebuild_buffer_args,
  342. rebuild_sizes_func,
  343. rebuild_sizes_args,
  344. rebuild_strides_func,
  345. rebuild_strides_args,
  346. rebuild_offsets_func,
  347. rebuild_offsets_args,
  348. ),
  349. )
  350. def rebuild_sparse_coo_tensor(
  351. rebuild_indices_func,
  352. rebuild_indices_args,
  353. rebuild_values_func,
  354. rebuild_values_args,
  355. shape,
  356. is_coalesced,
  357. ):
  358. indices = rebuild_indices_func(*rebuild_indices_args)
  359. values = rebuild_values_func(*rebuild_values_args)
  360. return torch.sparse_coo_tensor(indices, values, shape, is_coalesced=is_coalesced)
  361. def rebuild_sparse_compressed_tensor(
  362. rebuild_compressed_indices_func,
  363. rebuild_compressed_indices_args,
  364. rebuild_plain_indices_func,
  365. rebuild_plain_indices_args,
  366. rebuild_values_func,
  367. rebuild_values_args,
  368. shape,
  369. layout,
  370. ):
  371. compressed_indices = rebuild_compressed_indices_func(
  372. *rebuild_compressed_indices_args
  373. )
  374. plain_indices = rebuild_plain_indices_func(*rebuild_plain_indices_args)
  375. values = rebuild_values_func(*rebuild_values_args)
  376. return torch.sparse_compressed_tensor(
  377. compressed_indices, plain_indices, values, shape, layout=layout
  378. )
  379. def reduce_sparse_tensor(sparse):
  380. if sparse.layout is torch.sparse_coo:
  381. rebuild_indices_func, rebuild_indices_args = reduce_tensor(sparse._indices())
  382. rebuild_values_func, rebuild_values_args = reduce_tensor(sparse._values())
  383. return (
  384. rebuild_sparse_coo_tensor,
  385. (
  386. rebuild_indices_func,
  387. rebuild_indices_args,
  388. rebuild_values_func,
  389. rebuild_values_args,
  390. sparse.shape,
  391. sparse.is_coalesced(),
  392. ),
  393. )
  394. else:
  395. if sparse.layout in {torch.sparse_csr, torch.sparse_bsr}:
  396. compressed_indices = sparse.crow_indices()
  397. plain_indices = sparse.col_indices()
  398. elif sparse.layout in {torch.sparse_csc, torch.sparse_bsc}:
  399. compressed_indices = sparse.ccol_indices()
  400. plain_indices = sparse.row_indices()
  401. else:
  402. raise NotImplementedError(sparse.layout)
  403. (
  404. rebuild_compressed_indices_func,
  405. rebuild_compressed_indices_args,
  406. ) = reduce_tensor(compressed_indices)
  407. rebuild_plain_indices_func, rebuild_plain_indices_args = reduce_tensor(
  408. plain_indices
  409. )
  410. rebuild_values_func, rebuild_values_args = reduce_tensor(sparse.values())
  411. return (
  412. rebuild_sparse_compressed_tensor,
  413. (
  414. rebuild_compressed_indices_func,
  415. rebuild_compressed_indices_args,
  416. rebuild_plain_indices_func,
  417. rebuild_plain_indices_args,
  418. rebuild_values_func,
  419. rebuild_values_args,
  420. sparse.shape,
  421. sparse.layout,
  422. ),
  423. )
  424. def fd_id(fd):
  425. # Returns a tuple which uniquely identifies a file descriptor. In Mac OS,
  426. # this doesn't work with shared memory handles, which is why we don't
  427. # support the "file_descriptor" sharing method on that platform.
  428. stat = os.fstat(fd)
  429. return (stat.st_ino, stat.st_dev)
  430. def storage_from_cache(cls, key):
  431. storage_ref = shared_cache.get(key)
  432. if storage_ref is None:
  433. return None
  434. return torch.UntypedStorage._new_with_weak_ptr(storage_ref.cdata)
  435. def rebuild_storage_fd(cls, df, size):
  436. fd = df.detach()
  437. try:
  438. storage = storage_from_cache(cls, fd_id(fd))
  439. if storage is not None:
  440. return storage
  441. storage = cls._new_shared_fd_cpu(fd, size)
  442. shared_cache[fd_id(fd)] = StorageWeakRef(storage)
  443. return storage
  444. finally:
  445. os.close(fd)
  446. def rebuild_storage_filename(cls, manager, handle, size, dtype=None):
  447. storage: Union[torch.TypedStorage, torch.UntypedStorage] = storage_from_cache(
  448. cls, handle
  449. )
  450. if storage is not None:
  451. return storage._shared_decref()
  452. if dtype is None:
  453. storage = torch.UntypedStorage._new_shared_filename_cpu(manager, handle, size)
  454. else:
  455. byte_size = size * torch._utils._element_size(dtype)
  456. untyped_storage: torch.UntypedStorage = (
  457. torch.UntypedStorage._new_shared_filename_cpu(manager, handle, byte_size)
  458. )
  459. storage = torch.TypedStorage(
  460. wrap_storage=untyped_storage, dtype=dtype, _internal=True
  461. )
  462. shared_cache[handle] = StorageWeakRef(storage)
  463. return storage._shared_decref()
  464. def rebuild_storage_empty(cls):
  465. return cls()
  466. def rebuild_typed_storage(storage, dtype):
  467. return torch.storage.TypedStorage(wrap_storage=storage, dtype=dtype, _internal=True)
  468. # Use for torch.storage.TypedStorage
  469. def reduce_typed_storage(storage):
  470. return (rebuild_typed_storage, (storage._untyped_storage, storage.dtype))
  471. def rebuild_typed_storage_child(storage, storage_type):
  472. return storage_type(wrap_storage=storage, _internal=True)
  473. # Use for child classes of torch.storage.TypedStorage, like torch.FloatStorage
  474. def reduce_typed_storage_child(storage):
  475. return (rebuild_typed_storage_child, (storage._untyped_storage, type(storage)))
  476. def reduce_storage(storage):
  477. from . import get_sharing_strategy
  478. if storage.is_cuda:
  479. raise RuntimeError(
  480. "Cannot pickle CUDA storage; try pickling a CUDA tensor instead"
  481. )
  482. elif get_sharing_strategy() == "file_system":
  483. metadata = storage._share_filename_cpu_()
  484. cache_key = metadata[1]
  485. rebuild = rebuild_storage_filename
  486. if isinstance(storage, torch.TypedStorage):
  487. metadata += (storage.dtype,)
  488. storage._shared_incref()
  489. elif storage.size() == 0:
  490. # This is special cased because Empty tensors
  491. # (with size 0) cannot be mmapped.
  492. return (rebuild_storage_empty, (type(storage),))
  493. else:
  494. fd, size = storage._share_fd_cpu_()
  495. df = multiprocessing.reduction.DupFd(fd)
  496. cache_key = fd_id(fd)
  497. metadata = (df, size)
  498. rebuild = rebuild_storage_fd # type: ignore[assignment]
  499. shared_cache[cache_key] = StorageWeakRef(storage)
  500. return (rebuild, (type(storage),) + metadata)
  501. def init_reductions():
  502. ForkingPickler.register(torch.cuda.Event, reduce_event)
  503. for t in torch._storage_classes:
  504. if t.__name__ == "UntypedStorage":
  505. ForkingPickler.register(t, reduce_storage)
  506. else:
  507. ForkingPickler.register(t, reduce_typed_storage_child)
  508. ForkingPickler.register(torch.storage.TypedStorage, reduce_typed_storage)
  509. for t in torch._tensor_classes:
  510. ForkingPickler.register(t, reduce_tensor)
  511. # TODO: Maybe this should be in tensor_classes? :)
  512. ForkingPickler.register(torch.Tensor, reduce_tensor)
  513. ForkingPickler.register(torch.nn.parameter.Parameter, reduce_tensor)