_utils.py 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008
  1. # mypy: allow-untyped-defs
  2. import copyreg
  3. import functools
  4. import logging
  5. import sys
  6. import threading
  7. import traceback
  8. import warnings
  9. from collections import defaultdict
  10. from typing import Any, Callable, DefaultDict, Generic, List, Optional
  11. from typing_extensions import ParamSpec
  12. import torch
  13. def _type(self, dtype=None, non_blocking=False, **kwargs):
  14. """Returns the type if `dtype` is not provided, else casts this object to
  15. the specified type.
  16. If this is already of the correct type, no copy is performed and the
  17. original object is returned.
  18. Args:
  19. dtype (type or string): The desired type
  20. non_blocking (bool): If ``True``, and the source is in pinned memory
  21. and destination is on the GPU or vice versa, the copy is performed
  22. asynchronously with respect to the host. Otherwise, the argument
  23. has no effect.
  24. **kwargs: For compatibility, may contain the key ``async`` in place of
  25. the ``non_blocking`` argument. The ``async`` arg is deprecated.
  26. """
  27. non_blocking = _get_async_or_non_blocking("type", non_blocking, kwargs)
  28. if dtype is None:
  29. return self.__module__ + "." + self.__class__.__name__
  30. if isinstance(dtype, str):
  31. dtype = _import_dotted_name(dtype)
  32. if dtype == type(self):
  33. return self
  34. if self.is_sparse:
  35. if not dtype.is_sparse:
  36. raise RuntimeError("Cannot cast sparse tensor to dense tensor")
  37. new_module_name = dtype.__module__.replace(".sparse", "")
  38. new_values_type_name = new_module_name + "." + dtype.__name__
  39. new_values = torch.Tensor._values(self).type(new_values_type_name, non_blocking)
  40. new_indices_type_name = new_module_name + ".LongTensor"
  41. new_indices = torch.Tensor._indices(self).type(
  42. new_indices_type_name, non_blocking
  43. )
  44. return dtype(new_indices, new_values, self.size())
  45. if dtype.is_sparse:
  46. raise RuntimeError("Cannot cast dense tensor to sparse tensor")
  47. return dtype(self.size()).copy_(self, non_blocking)
  48. def _to(self, device, non_blocking=False):
  49. """Returns a copy of this object in device memory.
  50. If this object is already on the correct device, then no copy is performed
  51. and the original object is returned.
  52. Args:
  53. device (int): The destination device.
  54. non_blocking (bool): If ``True`` and the source is in pinned memory,
  55. the copy will be asynchronous with respect to the host. Otherwise,
  56. the argument has no effect.
  57. """
  58. if self.device == device:
  59. return self
  60. device_module = getattr(torch, device.type, None)
  61. assert (
  62. device_module is not None
  63. ), f"{device.type.upper()} device module is not loaded"
  64. with device_module.device(device):
  65. if self.is_sparse and hasattr(device_module, "sparse"):
  66. new_type = getattr(device_module.sparse, self.__class__.__name__)
  67. indices = getattr(torch.Tensor._indices(self), device.type)(
  68. device, non_blocking
  69. )
  70. values = getattr(torch.Tensor._values(self), device.type)(
  71. device, non_blocking
  72. )
  73. return new_type(indices, values, self.size())
  74. else:
  75. assert (
  76. not self.is_sparse
  77. ), f"sparse storage is not supported for {device.type.upper()} tensors"
  78. untyped_storage = torch.UntypedStorage(self.size(), device=device)
  79. untyped_storage.copy_(self, non_blocking)
  80. return untyped_storage
  81. def _get_async_or_non_blocking(function_name, non_blocking, kwargs):
  82. """Return the non-blocking flag given the function name and kwargs.
  83. Args:
  84. function_name (str): the name of the function being used.
  85. non_blocking (bool): the default value.
  86. **kwargs (dict): the kwargs passed to the function.
  87. """
  88. if not kwargs:
  89. return non_blocking
  90. if len(kwargs) != 1 or "async" not in kwargs:
  91. message = "{}() got an unexpected keyword argument '{}'"
  92. argument = list(kwargs.keys()).pop()
  93. raise TypeError(message.format(function_name, argument))
  94. warnings.warn("'async' is deprecated; use 'non_blocking'")
  95. return kwargs["async"]
  96. _thread_local_state = threading.local()
  97. def _get_restore_location(device):
  98. """Return the map_location location.
  99. Used for rebuild functions where the tensor device is distinct from the storage
  100. """
  101. map_location = getattr(_thread_local_state, "map_location", None)
  102. if map_location is None:
  103. return device
  104. else:
  105. if isinstance(map_location, dict):
  106. return map_location.get(device, device)
  107. elif isinstance(map_location, (str, torch.device)):
  108. return map_location
  109. else:
  110. assert callable(map_location)
  111. raise RuntimeError(
  112. "Callable map_location not supported with _rebuild_wrapper_subclass "
  113. "or _rebuild_device_tensor_from_numpy"
  114. )
  115. # Note [Don't serialize hooks]
  116. # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  117. # Since time immemorial, we have serialized the backward hooks associated with
  118. # variables. This kind of half-worked--Python can pickle global functions
  119. # (but not closures!)--but there were problems.
  120. #
  121. # - It's fragile. If you serialize a backward hook into a saved
  122. # model, and then you rename the function associated with the hook,
  123. # now your saved model is broken and you can't load it anymore.
  124. #
  125. # - It's not actually used. The standard recommendation is to
  126. # serialize the *state_dict* of a model, not the model itself
  127. # (since this is more stable to code changes affecting the model
  128. # serialization), and the state dict saves "data" only, thus
  129. # stripping the backward hooks. In some cases, hooks are
  130. # essential to the well-functioning of a model (e.g., DDP),
  131. # but DDP already manages readding the hooks!
  132. #
  133. # - We didn't serialize them in many cases. Prior to #10220, we
  134. # were dropping backward hooks in ForkingPickler. We "fixed" this
  135. # to be convenient with other serialization sites, but lack of
  136. # serializing backward hooks wasn't actually the root cause of
  137. # the bug.
  138. #
  139. # With these cases in mind, we have decided that a better strategy
  140. # is to just NOT serialize hooks at all.
  141. #
  142. # Since this is a BC-breaking change, we should warn when we previously
  143. # serialized a hook, but no longer do so. This will be done by adding a special
  144. # sentinel property to hooks will be used to suppress this warning. If a hook
  145. # has the property _torch_serialize_ignore, we will not emit a warning if we
  146. # attempt to serialize a Tensor with this hook attached to it.
  147. #
  148. # By the way, when _backward_hooks is skipped, we must give an EMPTY
  149. # OrderedDict(), if you pass a None you'll run afoul #12219.
  150. # TODO: Once we decide to break serialization FC, `storage` no longer needs to
  151. # be a TypedStorage
  152. def _rebuild_tensor(storage, storage_offset, size, stride):
  153. # first construct a tensor with the correct dtype/device
  154. t = torch.empty((0,), dtype=storage.dtype, device=storage._untyped_storage.device)
  155. return t.set_(storage._untyped_storage, storage_offset, size, stride)
  156. def get_tensor_metadata(tensor):
  157. # Tensor's Metadata for serializing.
  158. # Currently, this only returns a dict[string, bool] specifing whether
  159. # `conj` or `neg` bit is set.
  160. assert isinstance(tensor, torch.Tensor)
  161. return torch._C._get_tensor_metadata(tensor) # type: ignore[attr-defined]
  162. def set_tensor_metadata(tensor, metadata):
  163. # See `get_tensor_metadata` above
  164. assert isinstance(metadata, dict)
  165. assert isinstance(tensor, torch.Tensor)
  166. torch._C._set_tensor_metadata(tensor, metadata) # type: ignore[attr-defined]
  167. def _rebuild_tensor_v2(
  168. storage, storage_offset, size, stride, requires_grad, backward_hooks, metadata=None
  169. ):
  170. tensor = _rebuild_tensor(storage, storage_offset, size, stride)
  171. tensor.requires_grad = requires_grad
  172. if metadata:
  173. set_tensor_metadata(tensor, metadata)
  174. # NB: This line exists only for backwards compatibility; the
  175. # general expectation is that backward_hooks is an empty
  176. # OrderedDict. See Note [Don't serialize hooks]
  177. tensor._backward_hooks = backward_hooks
  178. return tensor
  179. def _rebuild_tensor_v3(
  180. storage,
  181. storage_offset,
  182. size,
  183. stride,
  184. requires_grad,
  185. backward_hooks,
  186. dtype,
  187. metadata=None,
  188. ):
  189. t = torch.empty(
  190. (0,),
  191. dtype=dtype,
  192. device=storage._untyped_storage.device,
  193. requires_grad=requires_grad,
  194. )
  195. t.set_(storage._untyped_storage, storage_offset, size, stride)
  196. if metadata:
  197. set_tensor_metadata(t, metadata)
  198. t._backward_hooks = backward_hooks
  199. return t
  200. _sparse_tensors_to_validate: List["torch.Tensor"] = []
  201. # In _legacy_load() in serialization.py we unpickle storages after the sparse
  202. # tensors have been already unpickled. Those storages contain data necessary for
  203. # validating sparse tensors: indices and values. That's why sparse tensors are
  204. # first unpickled without any validation, and then this function is called just
  205. # before _legacy_load() returns, so that all the sparse tensors can be validated
  206. # in bulk.
  207. #
  208. # The same procedure must be followed by _load() in serialization.py because due
  209. # to Pickler semantics, we have to use the same (non-validating) function for
  210. # unpickling sparse tensors, regardless of the caller.
  211. def _validate_loaded_sparse_tensors():
  212. try:
  213. for t in _sparse_tensors_to_validate:
  214. if t.layout is torch.sparse_coo:
  215. torch._validate_sparse_coo_tensor_args(
  216. t._indices(), t._values(), t.size(), t.is_coalesced()
  217. )
  218. elif t.layout in {
  219. torch.sparse_csr,
  220. torch.sparse_csc,
  221. torch.sparse_bsr,
  222. torch.sparse_bsc,
  223. }:
  224. # TODO: Validation currently involves an expensive traversal
  225. # on CPU, which may include a device transfer.
  226. if t.layout in {torch.sparse_csr, torch.sparse_bsr}:
  227. compressed_indices, plain_indices = (
  228. t.crow_indices(),
  229. t.col_indices(),
  230. )
  231. else:
  232. compressed_indices, plain_indices = (
  233. t.ccol_indices(),
  234. t.row_indices(),
  235. )
  236. torch._validate_sparse_compressed_tensor_args(
  237. compressed_indices, plain_indices, t.values(), t.size(), t.layout
  238. )
  239. else:
  240. raise NotImplementedError(
  241. f"_validate_loaded_sparse_tensors for layout `{t.layout}`"
  242. )
  243. finally:
  244. _sparse_tensors_to_validate.clear()
  245. def _rebuild_sparse_tensor(layout, data):
  246. """
  247. Rebuilds a sparse tensor from its sparse storage representation.
  248. Args:
  249. layout (str): The sparse storage layout of the tensor.
  250. data (tuple): The tensor's sparse storage representation.
  251. """
  252. if layout == torch.sparse_coo:
  253. if len(data) == 3:
  254. # For BC:
  255. indices, values, size = data
  256. is_coalesced = None
  257. else:
  258. indices, values, size, is_coalesced = data
  259. result = torch.sparse_coo_tensor(
  260. indices, values, size, check_invariants=False, is_coalesced=is_coalesced
  261. )
  262. _sparse_tensors_to_validate.append(result)
  263. return result
  264. elif layout in {
  265. torch.sparse_csr,
  266. torch.sparse_csc,
  267. torch.sparse_bsr,
  268. torch.sparse_bsc,
  269. }:
  270. compressed_indices, plain_indices, values, size = data
  271. result = torch.sparse_compressed_tensor(
  272. compressed_indices,
  273. plain_indices,
  274. values,
  275. size,
  276. layout=layout,
  277. check_invariants=False,
  278. )
  279. _sparse_tensors_to_validate.append(result)
  280. return result
  281. raise NotImplementedError(f"rebuilding sparse tensor for layout {layout}")
  282. def _rebuild_nested_tensor(buffer, sizes, strides, storage_offsets):
  283. return torch._nested_view_from_buffer(buffer, sizes, strides, storage_offsets)
  284. def _rebuild_device_tensor_from_numpy(data, dtype, device, requires_grad):
  285. device = _get_restore_location(device)
  286. tensor = torch.from_numpy(data).to(dtype=dtype, device=device)
  287. tensor.requires_grad = requires_grad
  288. return tensor
  289. # Should not be used, only here to be able to load Tensors serialized with older versions of pytorch
  290. _rebuild_xla_tensor = _rebuild_device_tensor_from_numpy
  291. def _rebuild_meta_tensor_no_storage(dtype, size, stride, requires_grad):
  292. return torch.empty_strided(
  293. size, stride, dtype=dtype, device="meta", requires_grad=requires_grad
  294. )
  295. def _rebuild_wrapper_subclass(
  296. cls, dtype, size, stride, storage_offset, layout, device, requires_grad
  297. ):
  298. device = _get_restore_location(device)
  299. return torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined]
  300. cls,
  301. size,
  302. strides=stride,
  303. dtype=dtype,
  304. storage_offset=storage_offset,
  305. layout=layout,
  306. device=device,
  307. requires_grad=requires_grad,
  308. )
  309. # TODO: Once we decide to break serialization FC, `storage` no longer needs to
  310. # be a TypedStorage
  311. def _rebuild_qtensor(
  312. storage,
  313. storage_offset,
  314. size,
  315. stride,
  316. quantizer_params,
  317. requires_grad,
  318. backward_hooks,
  319. ):
  320. qscheme = quantizer_params[0]
  321. if qscheme == torch.per_tensor_affine:
  322. _, scale, zero_point = quantizer_params
  323. tensor = torch._empty_affine_quantized(
  324. size,
  325. scale=scale,
  326. zero_point=zero_point,
  327. dtype=storage.dtype,
  328. device=storage.device,
  329. )
  330. elif qscheme in (torch.per_channel_affine, torch.per_channel_affine_float_qparams):
  331. _, scales, zero_points, axis = quantizer_params
  332. if type(scales) is list and type(zero_points) is list:
  333. if qscheme == torch.per_channel_affine:
  334. scales = torch.tensor(scales, dtype=torch.double, device=storage.device)
  335. zero_points = torch.tensor(
  336. zero_points, dtype=torch.long, device=storage.device
  337. )
  338. else:
  339. scales = torch.tensor(scales, dtype=torch.float, device=storage.device)
  340. zero_points = torch.tensor(
  341. zero_points, dtype=torch.float, device=storage.device
  342. )
  343. tensor = torch._empty_per_channel_affine_quantized(
  344. size,
  345. scales=scales,
  346. zero_points=zero_points,
  347. axis=axis,
  348. dtype=storage.dtype,
  349. device=storage.device,
  350. )
  351. else:
  352. raise RuntimeError(f"Can't deserialize quantized tensor with qscheme {qscheme}")
  353. tensor.set_(storage, storage_offset, size, stride)
  354. tensor.requires_grad = requires_grad
  355. # NB: This line exists only for backwards compatibility; the
  356. # general expectation is that backward_hooks is an empty
  357. # OrderedDict. See Note [Don't serialize hooks]
  358. tensor._backward_hooks = backward_hooks
  359. return tensor
  360. def _rebuild_parameter(data, requires_grad, backward_hooks):
  361. param = torch.nn.Parameter(data, requires_grad)
  362. # NB: This line exists only for backwards compatibility; the
  363. # general expectation is that backward_hooks is an empty
  364. # OrderedDict. See Note [Don't serialize hooks]
  365. param._backward_hooks = backward_hooks
  366. return param
  367. def _rebuild_parameter_with_state(data, requires_grad, backward_hooks, state):
  368. param = torch.nn.Parameter(data, requires_grad)
  369. # NB: This line exists only for backwards compatibility; the
  370. # general expectation is that backward_hooks is an empty
  371. # OrderedDict. See Note [Don't serialize hooks]
  372. param._backward_hooks = backward_hooks
  373. # Restore state on Parameter like python attr.
  374. param = _set_obj_state(param, state)
  375. return param
  376. def _get_obj_state(obj):
  377. # Get the state of the python subclass
  378. # This loosely mimicks the function on the object class but since Tensor do not inherit
  379. # from it, we cannot call that function directly
  380. # https://github.com/python/cpython/blob/c83919bd635f4433f1c6ae8504996a9fe3c215e5/Objects/typeobject.c#L4891
  381. # Note that starting with Python 3.11, this `__getstate__` is always defined and thus
  382. # the else branch will never be taken.
  383. getstate_fn = getattr(obj, "__getstate__", None)
  384. if getstate_fn:
  385. state = getstate_fn()
  386. else:
  387. slots_to_save = copyreg._slotnames(obj.__class__) # type: ignore[attr-defined]
  388. if slots_to_save:
  389. state = (
  390. obj.__dict__,
  391. {
  392. name: getattr(obj, name)
  393. for name in slots_to_save
  394. if hasattr(obj, name)
  395. },
  396. )
  397. else:
  398. state = obj.__dict__
  399. return state
  400. def _set_obj_state(obj, state):
  401. if isinstance(state, tuple):
  402. if not len(state) == 2:
  403. raise RuntimeError(f"Invalid serialized state: {state}")
  404. dict_state = state[0]
  405. slots_state = state[1]
  406. else:
  407. dict_state = state
  408. slots_state = None
  409. # Starting with Python 3.11, the __dict__ attribute is lazily created
  410. # and is serialized as None when not needed.
  411. if dict_state:
  412. for k, v in dict_state.items():
  413. setattr(obj, k, v)
  414. if slots_state:
  415. for k, v in slots_state.items():
  416. setattr(obj, k, v)
  417. return obj
  418. def _import_dotted_name(name):
  419. components = name.split(".")
  420. obj = __import__(components[0])
  421. for component in components[1:]:
  422. obj = getattr(obj, component)
  423. return obj
  424. def _flatten_dense_tensors(tensors):
  425. """Flatten dense tensors into a contiguous 1D buffer. Assume tensors are of
  426. same dense type.
  427. Since inputs are dense, the resulting tensor will be a concatenated 1D
  428. buffer. Element-wise operation on this buffer will be equivalent to
  429. operating individually.
  430. Args:
  431. tensors (Iterable[Tensor]): dense tensors to flatten.
  432. Returns:
  433. A contiguous 1D buffer containing input tensors.
  434. """
  435. return torch._C._nn.flatten_dense_tensors(tensors)
  436. def _flatten_sparse_tensors(tensors):
  437. """Flatten sparse tensors into two contiguous 1D buffers, one of indices and
  438. one of values. Assume tensors are of same sparse type.
  439. Args:
  440. tensors (Iterable[Tensor]): sparse tensors to flatten.
  441. Returns:
  442. A tuple of two contiguous 1D buffers, one containing input tensors'
  443. indices and the other containing the values.
  444. """
  445. flat_indices = torch._C._nn.flatten_dense_tensors(
  446. [torch.Tensor._indices(t) for t in tensors]
  447. )
  448. flat_values = torch._C._nn.flatten_dense_tensors(
  449. [torch.Tensor._values(t) for t in tensors]
  450. )
  451. return flat_indices, flat_values
  452. def _unflatten_dense_tensors(flat, tensors):
  453. """View a flat buffer using the sizes of tensors. Assume that tensors are of
  454. same dense type, and that flat is given by _flatten_dense_tensors.
  455. Args:
  456. flat (Tensor): flattened dense tensors to unflatten.
  457. tensors (Iterable[Tensor]): dense tensors whose sizes will be used to
  458. unflatten flat.
  459. Returns:
  460. Unflattened dense tensors with sizes same as tensors and values from
  461. flat.
  462. """
  463. return torch._C._nn.unflatten_dense_tensors(flat, tensors)
  464. def _unflatten_sparse_tensors(flat, tensors):
  465. """View flat buffer (containing indices and values) using the sizes of
  466. tensors. Assume that tensors are of same sparse type, and that flat is given
  467. by _flatten_sparse_tensors.
  468. Args:
  469. flat (tuple(Tensor, Tensor)): flattened indices and values of sparse
  470. tensors to unflatten.
  471. tensors (Iterable[Tensor]): sparse tensors whose sizes will be used to
  472. unflatten flat.
  473. Returns:
  474. Unflattened sparse tensors with sizes same as tensors and values from
  475. flat.
  476. """
  477. flat_indices, flat_values = flat
  478. indices = torch._C._nn.unflatten_dense_tensors(
  479. flat_indices, [torch.Tensor._indices(t) for t in tensors]
  480. )
  481. values = torch._C._nn.unflatten_dense_tensors(
  482. flat_values, [torch.Tensor._values(t) for t in tensors]
  483. )
  484. outputs = []
  485. for t, i, v in zip(tensors, indices, values):
  486. outputs.append(t.new(i, v, t.size()))
  487. return tuple(outputs)
  488. def _reorder_tensors_as(tensors, ordered_tensors):
  489. """Assume that tensors are of same order as ordered_tensors within their
  490. types, e.g., from _take_tensors. Reorder them to be of same order as
  491. ordered_tensors.
  492. Args:
  493. tensors (Iterable[Tensor]): tensors to be reordered. They should be of
  494. the same order as ordered_tensors within their own types.
  495. ordered_tensors (Iterable[Tensor]): tensors whose order will be the
  496. reference.
  497. Returns:
  498. Ordered tuple of tensors with contents from tensors and order of
  499. ordered_tensors.
  500. """
  501. type_dict = defaultdict(list)
  502. for tensor in tensors:
  503. type_dict[tensor.type()].append(tensor)
  504. type_dict_ = {t: iter(coll) for t, coll in type_dict.items()}
  505. return tuple(next(type_dict_[tensor.type()]) for tensor in ordered_tensors)
  506. def _take_tensors(tensors, size_limit):
  507. """Group tensors into chunks. This generator yields a chunk at each time,
  508. each containing tensors of same type up to certain byte limit in total size.
  509. Args:
  510. tensors (Sequence): A sequence of tensors to be separated into chunks.
  511. size_limit (int): The limit of each chunk in bytes.
  512. Yields:
  513. Blocks of tensors of same type and within size_limit. The yielded
  514. tensors are only ordered as the original sequence within its types.
  515. """
  516. buf_dict: DefaultDict[str, List] = defaultdict(lambda: [[], 0])
  517. for tensor in tensors:
  518. t = tensor.type()
  519. if tensor.is_sparse:
  520. indices = torch.Tensor._indices(tensor)
  521. values = torch.Tensor._values(tensor)
  522. size = (
  523. indices.numel() * indices.element_size()
  524. + values.numel() * values.element_size()
  525. )
  526. else:
  527. size = tensor.numel() * tensor.element_size()
  528. buf_and_size = buf_dict[t]
  529. if buf_and_size[1] + size > size_limit and buf_and_size[1] > 0:
  530. yield buf_and_size[0]
  531. buf_and_size = buf_dict[t] = [[], 0]
  532. buf_and_size[0].append(tensor)
  533. buf_and_size[1] += size
  534. for buf, _ in buf_dict.values():
  535. if len(buf) > 0:
  536. yield buf
  537. # annotation decorator to get annotations in a way that is compatible
  538. # with both Python 2 and 3
  539. def annotate(ret, **kwargs):
  540. def dec(fun):
  541. fun.__annotations__ = dict(kwargs)
  542. fun.__annotations__["return"] = ret
  543. return fun
  544. return dec
  545. def render_call(fn, args, kwargs):
  546. str_fn = torch.overrides.resolve_name(fn)
  547. if str_fn is None:
  548. str_fn = str(fn)
  549. str_args: List[str] = []
  550. with torch._tensor_str.printoptions(threshold=0, edgeitems=0):
  551. str_args.extend(repr(a) for a in args)
  552. str_args.extend(f"{k}={repr(v)}" for k, v in kwargs.items())
  553. r = f"{str_fn}({', '.join(str_args)})"
  554. return r
  555. # NOTE [ Python Traceback Reference Cycle Problem ]
  556. #
  557. # When using sys.exc_info(), it is important to **not** store the exc_info[2],
  558. # which is the traceback, because otherwise you will run into the traceback
  559. # reference cycle problem, i.e., the traceback holding reference to the frame,
  560. # and the frame (which holds reference to all the object in its temporary scope)
  561. # holding reference the traceback.
  562. class KeyErrorMessage(str):
  563. r"""str subclass that returns itself in repr"""
  564. def __repr__(self):
  565. return self
  566. class ExceptionWrapper:
  567. r"""Wraps an exception plus traceback to communicate across threads"""
  568. def __init__(self, exc_info=None, where="in background"):
  569. # It is important that we don't store exc_info, see
  570. # NOTE [ Python Traceback Reference Cycle Problem ]
  571. if exc_info is None:
  572. exc_info = sys.exc_info()
  573. self.exc_type = exc_info[0]
  574. self.exc_msg = "".join(traceback.format_exception(*exc_info))
  575. self.where = where
  576. def reraise(self):
  577. r"""Reraises the wrapped exception in the current thread"""
  578. # Format a message such as: "Caught ValueError in DataLoader worker
  579. # process 2. Original Traceback:", followed by the traceback.
  580. msg = f"Caught {self.exc_type.__name__} {self.where}.\nOriginal {self.exc_msg}"
  581. if self.exc_type == KeyError:
  582. # KeyError calls repr() on its argument (usually a dict key). This
  583. # makes stack traces unreadable. It will not be changed in Python
  584. # (https://bugs.python.org/issue2651), so we work around it.
  585. msg = KeyErrorMessage(msg)
  586. elif getattr(self.exc_type, "message", None):
  587. # Some exceptions have first argument as non-str but explicitly
  588. # have message field
  589. raise self.exc_type(message=msg)
  590. try:
  591. exception = self.exc_type(msg)
  592. except TypeError:
  593. # If the exception takes multiple arguments, don't try to
  594. # instantiate since we don't know how to
  595. raise RuntimeError(msg) from None
  596. raise exception
  597. def _get_available_device_type():
  598. if torch.cuda.is_available():
  599. return "cuda"
  600. if hasattr(torch, "xpu") and torch.xpu.is_available(): # type: ignore[attr-defined]
  601. return "xpu"
  602. if hasattr(torch, "mtia") and torch.mtia.is_available():
  603. return "mtia"
  604. custom_backend_name = torch._C._get_privateuse1_backend_name()
  605. custom_device_mod = getattr(torch, custom_backend_name, None)
  606. if custom_device_mod and custom_device_mod.is_available():
  607. return custom_backend_name
  608. # add more available device types here
  609. return None
  610. def _get_device_attr(get_member):
  611. device_type = _get_available_device_type()
  612. if device_type and device_type.lower() == "cuda":
  613. return get_member(torch.cuda)
  614. if device_type and device_type.lower() == "xpu":
  615. return get_member(torch.xpu) # type: ignore[attr-defined]
  616. if device_type and device_type.lower() == "mtia":
  617. return get_member(torch.mtia)
  618. if device_type == torch._C._get_privateuse1_backend_name():
  619. return get_member(getattr(torch, device_type))
  620. # add more available device types here
  621. return None
  622. def _get_current_device_index():
  623. # current device index
  624. return _get_device_attr(lambda m: m.current_device())
  625. def _get_all_device_indices():
  626. # all device index
  627. return _get_device_attr(lambda m: list(range(m.device_count())))
  628. def _get_devices_properties(device_ids):
  629. # all device properties
  630. return [_get_device_attr(lambda m: m.get_device_properties(i)) for i in device_ids]
  631. def get_current_device_index() -> int:
  632. r"""Checks if there are CUDA devices available and
  633. returns the device index of the current default CUDA device.
  634. Returns -1 in case there are no CUDA devices available.
  635. Arguments: ``None``
  636. """
  637. if torch.cuda.device_count() > 0:
  638. return torch.cuda.current_device()
  639. return -1
  640. def _get_device_index(
  641. device: Any, optional: bool = False, allow_cpu: bool = False
  642. ) -> int:
  643. r"""Gets the device index from :attr:`device`, which can be a torch.device
  644. object, a Python integer, or ``None``.
  645. If :attr:`device` is a torch.device object, returns the device index if it
  646. has index. Note that for a device without a specified index,
  647. i.e., ``torch.device('xxx')``, this will return the current default
  648. device of that type if :attr:`optional` is ``True``. If :attr:`allow_cpu` is ``True``,
  649. CPU devices will be accepted and ``-1`` will be returned in this case.
  650. If :attr:`device` is a Python integer, it is returned as is.
  651. If :attr:`device` is ``None``, this will return the current default
  652. device of the supported runtime platform if :attr:`optional` is ``True``.
  653. i.e., the current default CUDA device will be returned if CUDA runtime is supported.
  654. """
  655. if isinstance(device, str):
  656. device = torch.device(device)
  657. device_idx: Optional[int] = None
  658. if isinstance(device, torch.device):
  659. if not allow_cpu and device.type == "cpu":
  660. raise ValueError(f"Expected a non cpu device, but got: {device}")
  661. device_idx = -1 if device.type == "cpu" else device.index
  662. if isinstance(device, int):
  663. device_idx = device
  664. if device_idx is None:
  665. if optional:
  666. # The eager API _get_current_device_index uses `lambda` functions which are
  667. # not supported in JIT and hence not scriptable. The JIT equivalent API to get
  668. # the current device index is `get_current_device_index()` which can
  669. # be scripted. We use is_scripting to check the mode we are in and call the
  670. # appropriate API.
  671. if torch.jit.is_scripting():
  672. device_idx = get_current_device_index()
  673. else:
  674. device_idx = _get_current_device_index()
  675. else:
  676. raise ValueError(
  677. f"Expected a torch.device with a specified index or an integer, but got:{device}"
  678. )
  679. return device_idx
  680. def _handle_complex(tensor):
  681. """
  682. Returns a real view of a tensor if complex dtype else just the tensor
  683. need to check if a UninitializedParameter because otherwise checking is_complex is an error for a LazyModule
  684. """
  685. return (
  686. torch.view_as_real(tensor)
  687. if not isinstance(tensor, torch.nn.UninitializedParameter)
  688. and tensor.is_complex()
  689. else tensor
  690. )
  691. def _element_size(dtype):
  692. """
  693. Returns the element size for a dtype, in bytes
  694. """
  695. if not isinstance(dtype, torch.dtype):
  696. raise RuntimeError(f"expected torch.dtype, but got {type(dtype)}")
  697. if dtype.is_complex:
  698. return torch.finfo(dtype).bits >> 2
  699. elif dtype.is_floating_point:
  700. return torch.finfo(dtype).bits >> 3
  701. elif dtype == torch.bool:
  702. # NOTE: torch.bool is not supported in torch.iinfo()
  703. return 1
  704. else:
  705. return torch.iinfo(dtype).bits >> 3
  706. class _ClassPropertyDescriptor:
  707. def __init__(self, fget, fset=None):
  708. self.fget = fget
  709. def __get__(self, instance, owner=None):
  710. if owner is None:
  711. owner = type(instance)
  712. return self.fget.__get__(instance, owner)()
  713. def classproperty(func):
  714. if not isinstance(func, (classmethod, staticmethod)):
  715. func = classmethod(func)
  716. return _ClassPropertyDescriptor(func)
  717. def is_compiling() -> bool:
  718. """
  719. Indicates whether we are tracing/compiling with torch.compile() or torch.export().
  720. TODO(khabinov): we should deprecate this function and use torch.compiler.is_compiling().
  721. """
  722. return torch.compiler.is_compiling()
  723. def _functionalize_sync(t):
  724. # This code lives in python instead of C++ since conditioning on a certain python subclass
  725. # is much more of a pain in C++.
  726. from torch._subclasses.functional_tensor import FunctionalTensor
  727. if isinstance(t, FunctionalTensor):
  728. # If a FunctionalTensorMode is active while syncing, we don't want it to intercept any ops that get called
  729. # when we sync our inner tensor.
  730. # Why?
  731. # (1) If there are input mutations in the graph, then they will be re-applied during
  732. # AOTAutograd when we call _sync() from inside of our functionalization kernels.
  733. # (2) _sync() causes us to regenerate our updated the tensor from the updated base,
  734. # which dispatches to a bunch of view ops
  735. # (3) The input to these view ops is our inner FunctionalTensorWrapper
  736. # (since the sync was called from C++), not the python FunctionalTensor
  737. # (4) if a python FunctionalTensorMode is active, it will complain when it intercepts
  738. # the view op, since it will see an input that is a C++ FunctionalTensorWrapper
  739. # (aka a normal torch.Tensor) instead of a python `FunctionalTensor).
  740. maybe_functional_mode = torch._C._unset_dispatch_mode(
  741. torch._C._TorchDispatchModeKey.FUNCTIONAL
  742. )
  743. try:
  744. torch._functionalize_sync(t.elem) # type: ignore[attr-defined]
  745. finally:
  746. if maybe_functional_mode is not None:
  747. torch._C._set_dispatch_mode(maybe_functional_mode)
  748. else:
  749. torch._functionalize_sync(t) # type: ignore[attr-defined]
  750. @functools.lru_cache(2)
  751. def _get_device_module(device_type: str):
  752. device_module = getattr(torch, device_type, None)
  753. if device_module is None:
  754. raise RuntimeError(
  755. f"Device '{device_type}' does not have a corresponding module registered as 'torch.{device_type}'."
  756. )
  757. return device_module
  758. def _dummy_type(name: str) -> type:
  759. def get_err_fn(is_init: bool):
  760. def err_fn(obj, *args, **kwargs):
  761. if is_init:
  762. class_name = obj.__class__.__name__
  763. else:
  764. class_name = obj.__name__
  765. raise RuntimeError(f"Tried to instantiate dummy base class {class_name}")
  766. return err_fn
  767. return type(
  768. name, (object,), {"__init__": get_err_fn(True), "__new__": get_err_fn(False)}
  769. )
  770. class _LazySeedTracker:
  771. # Since seeding is memory-less, only track the latest seed.
  772. # Note: `manual_seed_all` followed by `manual_seed` overwrites
  773. # the seed on current device. We track the order of **latest**
  774. # calls between these two API.
  775. def __init__(self):
  776. self.manual_seed_all_cb = None
  777. self.manual_seed_cb = None
  778. self.call_order = []
  779. def queue_seed_all(self, cb, traceback):
  780. self.manual_seed_all_cb = (cb, traceback)
  781. # update seed_all to be latest
  782. self.call_order = [self.manual_seed_cb, self.manual_seed_all_cb]
  783. def queue_seed(self, cb, traceback):
  784. self.manual_seed_cb = (cb, traceback)
  785. # update seed to be latest
  786. self.call_order = [self.manual_seed_all_cb, self.manual_seed_cb]
  787. def get_calls(self) -> List:
  788. return self.call_order
  789. logger = logging.getLogger(__name__)
  790. P = ParamSpec("P")
  791. class CallbackRegistry(Generic[P]):
  792. def __init__(self, name: str):
  793. self.name = name
  794. self.callback_list: List[Callable[P, None]] = []
  795. def add_callback(self, cb: Callable[P, None]) -> None:
  796. self.callback_list.append(cb)
  797. def fire_callbacks(self, *args: P.args, **kwargs: P.kwargs) -> None:
  798. for cb in self.callback_list:
  799. try:
  800. cb(*args, **kwargs)
  801. except Exception as e:
  802. logger.exception(
  803. "Exception in callback for %s registered with gpu trace", self.name
  804. )
  805. # IMPORT_MAPPING and NAME_MAPPING are adapted from https://github.com/python/cpython/blob/main/Lib/_compat_pickle.py
  806. # for use in the weights_only Unpickler.
  807. IMPORT_MAPPING = {
  808. "__builtin__": "builtins",
  809. "copy_reg": "copyreg",
  810. "Queue": "queue",
  811. "repr": "reprlib",
  812. "_abcoll": "collections.abc",
  813. # Non-mutual mappings.
  814. "UserDict": "collections",
  815. "UserList": "collections",
  816. "UserString": "collections",
  817. "whichdb": "dbm",
  818. "StringIO": "io",
  819. "cStringIO": "io",
  820. }
  821. # This contains rename rules that are easy to handle. We ignore the more
  822. # complex stuff (e.g. mapping the names in the urllib and types modules).
  823. # These rules should be run before import names are fixed.
  824. NAME_MAPPING = {
  825. ("__builtin__", "xrange"): ("builtins", "range"),
  826. ("__builtin__", "reduce"): ("functools", "reduce"),
  827. ("__builtin__", "intern"): ("sys", "intern"),
  828. ("__builtin__", "unichr"): ("builtins", "chr"),
  829. ("__builtin__", "unicode"): ("builtins", "str"),
  830. ("__builtin__", "long"): ("builtins", "int"),
  831. ("itertools", "izip"): ("builtins", "zip"),
  832. ("itertools", "imap"): ("builtins", "map"),
  833. ("itertools", "ifilter"): ("builtins", "filter"),
  834. ("itertools", "ifilterfalse"): ("itertools", "filterfalse"),
  835. ("itertools", "izip_longest"): ("itertools", "zip_longest"),
  836. ("UserDict", "IterableUserDict"): ("collections", "UserDict"),
  837. ("UserList", "UserList"): ("collections", "UserList"),
  838. ("UserString", "UserString"): ("collections", "UserString"),
  839. # Non-mutual mappings.
  840. ("__builtin__", "basestring"): ("builtins", "str"),
  841. ("exceptions", "StandardError"): ("builtins", "Exception"),
  842. ("UserDict", "UserDict"): ("collections", "UserDict"),
  843. }