vmap.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534
  1. # mypy: ignore-errors
  2. # Copyright (c) Facebook, Inc. and its affiliates.
  3. # All rights reserved.
  4. #
  5. # This source code is licensed under the BSD-style license found in the
  6. # LICENSE file in the root directory of this source tree.
  7. import contextlib
  8. import functools
  9. import itertools
  10. import os
  11. import threading
  12. from functools import partial
  13. from typing import Any, Callable, List, Optional, Tuple, Union
  14. import torch
  15. from torch import Tensor
  16. from torch._C._functorch import (
  17. _add_batch_dim,
  18. _remove_batch_dim,
  19. _vmap_decrement_nesting,
  20. _vmap_increment_nesting,
  21. is_batchedtensor,
  22. )
  23. from torch.utils._pytree import (
  24. _broadcast_to_and_flatten,
  25. tree_flatten,
  26. tree_map_,
  27. tree_unflatten,
  28. TreeSpec,
  29. )
  30. in_dims_t = Union[int, Tuple]
  31. out_dims_t = Union[int, Tuple[int, ...]]
  32. def doesnt_support_saved_tensors_hooks(f):
  33. message = (
  34. "torch.func transforms don't yet support saved tensor hooks. "
  35. "Please open an issue with your use case."
  36. )
  37. @functools.wraps(f)
  38. def fn(*args, **kwargs):
  39. with torch.autograd.graph.disable_saved_tensors_hooks(message):
  40. return f(*args, **kwargs)
  41. return fn
  42. # Checks that all args-to-be-batched have the same batch dim size
  43. def _validate_and_get_batch_size(
  44. flat_in_dims: List[Optional[int]], flat_args: List
  45. ) -> int:
  46. batch_sizes = [
  47. arg.size(in_dim)
  48. for in_dim, arg in zip(flat_in_dims, flat_args)
  49. if in_dim is not None
  50. ]
  51. if len(batch_sizes) == 0:
  52. raise ValueError("vmap: Expected at least one Tensor to vmap over")
  53. if batch_sizes and any(size != batch_sizes[0] for size in batch_sizes):
  54. raise ValueError(
  55. f"vmap: Expected all tensors to have the same size in the mapped "
  56. f"dimension, got sizes {batch_sizes} for the mapped dimension"
  57. )
  58. return batch_sizes[0]
  59. def _num_outputs(batched_outputs: Union[Tensor, Tuple[Tensor, ...]]) -> int:
  60. if isinstance(batched_outputs, tuple):
  61. return len(batched_outputs)
  62. return 1
  63. # If value is a tuple, check it has length `num_elements`.
  64. # If value is not a tuple, make a tuple with `value` repeated `num_elements` times
  65. def _as_tuple(
  66. value: Any, num_elements: int, error_message_lambda: Callable[[], str]
  67. ) -> Tuple:
  68. if not isinstance(value, tuple):
  69. return (value,) * num_elements
  70. if len(value) != num_elements:
  71. raise ValueError(error_message_lambda())
  72. return value
  73. def _process_batched_inputs(
  74. in_dims: in_dims_t, args: Tuple, func: Callable
  75. ) -> Tuple[int, List[Any], List[Any], TreeSpec]:
  76. if not isinstance(in_dims, int) and not isinstance(in_dims, tuple):
  77. raise ValueError(
  78. f"vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): "
  79. f"expected `in_dims` to be int or a (potentially nested) tuple "
  80. f"matching the structure of inputs, got: {type(in_dims)}."
  81. )
  82. if len(args) == 0:
  83. raise ValueError(
  84. f"vmap({_get_name(func)})(<inputs>): got no inputs. Maybe you forgot to add "
  85. f"inputs, or you are trying to vmap over a function with no inputs. "
  86. f"The latter is unsupported."
  87. )
  88. flat_args, args_spec = tree_flatten(args)
  89. flat_in_dims = _broadcast_to_and_flatten(in_dims, args_spec)
  90. if flat_in_dims is None:
  91. raise ValueError(
  92. f"vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): "
  93. f"in_dims is not compatible with the structure of `inputs`. "
  94. f"in_dims has structure {tree_flatten(in_dims)[1]} but inputs "
  95. f"has structure {args_spec}."
  96. )
  97. for i, (arg, in_dim) in enumerate(zip(flat_args, flat_in_dims)):
  98. if not isinstance(in_dim, int) and in_dim is not None:
  99. raise ValueError(
  100. f"vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): "
  101. f"Got in_dim={in_dim} for an input but in_dim must be either "
  102. f"an integer dimension or None."
  103. )
  104. if isinstance(in_dim, int) and not isinstance(arg, Tensor):
  105. raise ValueError(
  106. f"vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): "
  107. f"Got in_dim={in_dim} for an input but the input is of type "
  108. f"{type(arg)}. We cannot vmap over non-Tensor arguments, "
  109. f"please use None as the respective in_dim"
  110. )
  111. if in_dim is not None and (in_dim < -arg.dim() or in_dim >= arg.dim()):
  112. raise ValueError(
  113. f"vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): "
  114. f"Got in_dim={in_dim} for some input, but that input is a Tensor "
  115. f"of dimensionality {arg.dim()} so expected in_dim to satisfy "
  116. f"-{arg.dim()} <= in_dim < {arg.dim()}."
  117. )
  118. if in_dim is not None and in_dim < 0:
  119. flat_in_dims[i] = in_dim % arg.dim()
  120. return (
  121. _validate_and_get_batch_size(flat_in_dims, flat_args),
  122. flat_in_dims,
  123. flat_args,
  124. args_spec,
  125. )
  126. # Creates BatchedTensors for every Tensor in arg that should be batched.
  127. # Returns the (potentially) batched arguments and the batch_size.
  128. def _create_batched_inputs(
  129. flat_in_dims: List[Any], flat_args: List[Any], vmap_level: int, args_spec
  130. ) -> Tuple:
  131. # See NOTE [Ignored _remove_batch_dim, _add_batch_dim]
  132. batched_inputs = [
  133. arg if in_dim is None else _add_batch_dim(arg, in_dim, vmap_level)
  134. for in_dim, arg in zip(flat_in_dims, flat_args)
  135. ]
  136. return tree_unflatten(batched_inputs, args_spec)
  137. def _maybe_remove_batch_dim(name, batched_output, vmap_level, batch_size, out_dim):
  138. if out_dim is None:
  139. if isinstance(batched_output, torch.Tensor) and is_batchedtensor(
  140. batched_output
  141. ):
  142. raise ValueError(
  143. f"vmap({name}, ...): `{name}` can not return a "
  144. f"BatchedTensor when out_dim is None"
  145. )
  146. return batched_output
  147. # out_dim is non None
  148. if not isinstance(batched_output, torch.Tensor):
  149. raise ValueError(
  150. f"vmap({name}, ...): `{name}` must only return "
  151. f"Tensors, got type {type(batched_output)}. "
  152. "Did you mean to set out_dims= to None for output?"
  153. )
  154. return _remove_batch_dim(batched_output, vmap_level, batch_size, out_dim)
  155. # Undos the batching (and any batch dimensions) associated with the `vmap_level`.
  156. def _unwrap_batched(
  157. batched_outputs: Union[Tensor, Tuple[Tensor, ...]],
  158. out_dims: out_dims_t,
  159. vmap_level: int,
  160. batch_size: int,
  161. func: Callable,
  162. ) -> Tuple:
  163. flat_batched_outputs, output_spec = tree_flatten(batched_outputs)
  164. def incompatible_error():
  165. raise ValueError(
  166. f"vmap({_get_name(func)}, ..., out_dims={out_dims})(<inputs>): "
  167. f"out_dims is not compatible with the structure of `outputs`. "
  168. f"out_dims has structure {tree_flatten(out_dims)[1]} but outputs "
  169. f"has structure {output_spec}."
  170. )
  171. if isinstance(batched_outputs, torch.Tensor):
  172. # Some weird edge case requires us to spell out the following
  173. # see test_out_dims_edge_case
  174. if isinstance(out_dims, int):
  175. flat_out_dims = [out_dims]
  176. elif isinstance(out_dims, tuple) and len(out_dims) == 1:
  177. flat_out_dims = out_dims
  178. elif out_dims is None:
  179. flat_out_dims = [out_dims]
  180. else:
  181. incompatible_error()
  182. else:
  183. flat_out_dims = _broadcast_to_and_flatten(out_dims, output_spec)
  184. if flat_out_dims is None:
  185. incompatible_error()
  186. flat_outputs = [
  187. _maybe_remove_batch_dim(
  188. _get_name(func), batched_output, vmap_level, batch_size, out_dim
  189. )
  190. for batched_output, out_dim in zip(flat_batched_outputs, flat_out_dims)
  191. ]
  192. return tree_unflatten(flat_outputs, output_spec)
  193. def _check_int_or_none(x, func, out_dims):
  194. if isinstance(x, int):
  195. return
  196. if x is None:
  197. return
  198. raise ValueError(
  199. f"vmap({_get_name(func)}, ..., out_dims={out_dims}): `out_dims` must be "
  200. f"an int, None or a python collection of ints representing where in the outputs the "
  201. f"vmapped dimension should appear."
  202. )
  203. def _check_out_dims_is_int_or_int_pytree(out_dims: out_dims_t, func: Callable) -> None:
  204. if isinstance(out_dims, int):
  205. return
  206. tree_map_(partial(_check_int_or_none, func=func, out_dims=out_dims), out_dims)
  207. def _get_name(func: Callable):
  208. if hasattr(func, "__name__"):
  209. return func.__name__
  210. # Not all callables have __name__, in fact, only static functions/methods do.
  211. # A callable created via functools.partial or an nn.Module, to name some
  212. # examples, don't have a __name__.
  213. return repr(func)
  214. DECOMPOSITIONS_LOADED = False
  215. DECOMPOSITIONS_LOCK = threading.Lock()
  216. VMAP_DECOMPOSITIONS_LIB = None
  217. # torch.package, Python 3.11, and torch.jit-less environments are unhappy with
  218. # decompositions. Only load them when needed if possible.
  219. def lazy_load_decompositions():
  220. global DECOMPOSITIONS_LOADED
  221. if DECOMPOSITIONS_LOADED:
  222. return
  223. with DECOMPOSITIONS_LOCK:
  224. if DECOMPOSITIONS_LOADED:
  225. return
  226. if not (os.environ.get("PYTORCH_JIT", "1") == "1" and __debug__):
  227. DECOMPOSITIONS_LOADED = True
  228. return
  229. # use an alternate way to register an operator into the decomposition table
  230. # _register_jit_decomposition doesn't work for some operators, e.g. addr,
  231. # because the Tensor types generated cannot be unioned by torchscript
  232. # decomp should be type OpOverload
  233. global VMAP_DECOMPOSITIONS_LIB
  234. VMAP_DECOMPOSITIONS_LIB = torch.library.Library(
  235. "aten", "IMPL", "FuncTorchBatched"
  236. )
  237. from torch._decomp import decomposition_table
  238. def _register_python_decomposition_vmap(decomp):
  239. if decomp in decomposition_table:
  240. VMAP_DECOMPOSITIONS_LIB.impl(decomp, decomposition_table[decomp])
  241. else:
  242. raise RuntimeError(f"could not find decomposition for {decomp}")
  243. _register_python_decomposition_vmap(torch.ops.aten.mse_loss_backward.default)
  244. _register_python_decomposition_vmap(
  245. torch.ops.aten.smooth_l1_loss_backward.default
  246. )
  247. _register_python_decomposition_vmap(torch.ops.aten.huber_loss_backward.default)
  248. _register_python_decomposition_vmap(torch.ops.aten.nll_loss_forward.default)
  249. _register_python_decomposition_vmap(torch.ops.aten.nll_loss2d_forward.default)
  250. _register_python_decomposition_vmap(torch.ops.aten.nll_loss_backward.default)
  251. _register_python_decomposition_vmap(torch.ops.aten.nll_loss2d_backward.default)
  252. _register_python_decomposition_vmap(torch.ops.aten.addr.default)
  253. DECOMPOSITIONS_LOADED = True
  254. def vmap_impl(func, in_dims, out_dims, randomness, chunk_size, *args, **kwargs):
  255. lazy_load_decompositions()
  256. _check_out_dims_is_int_or_int_pytree(out_dims, func)
  257. batch_size, flat_in_dims, flat_args, args_spec = _process_batched_inputs(
  258. in_dims, args, func
  259. )
  260. if chunk_size is not None:
  261. chunks_flat_args = _get_chunked_inputs(
  262. flat_args, flat_in_dims, batch_size, chunk_size
  263. )
  264. return _chunked_vmap(
  265. func,
  266. flat_in_dims,
  267. chunks_flat_args,
  268. args_spec,
  269. out_dims,
  270. randomness,
  271. **kwargs,
  272. )
  273. # If chunk_size is not specified.
  274. return _flat_vmap(
  275. func,
  276. batch_size,
  277. flat_in_dims,
  278. flat_args,
  279. args_spec,
  280. out_dims,
  281. randomness,
  282. **kwargs,
  283. )
  284. def get_chunk_sizes(total_elems, chunk_size):
  285. n_chunks = n_chunks = total_elems // chunk_size
  286. chunk_sizes = [chunk_size] * n_chunks
  287. # remainder chunk
  288. remainder = total_elems % chunk_size
  289. if remainder != 0:
  290. chunk_sizes.append(remainder)
  291. return chunk_sizes
  292. def _get_chunked_inputs(flat_args, flat_in_dims, batch_size, chunk_size):
  293. split_idxs = (batch_size,)
  294. if chunk_size is not None:
  295. chunk_sizes = get_chunk_sizes(batch_size, chunk_size)
  296. split_idxs = tuple(itertools.accumulate(chunk_sizes))
  297. flat_args_chunks = tuple(
  298. t.tensor_split(split_idxs, dim=in_dim)
  299. if in_dim is not None
  300. else [
  301. t,
  302. ]
  303. * len(split_idxs)
  304. for t, in_dim in zip(flat_args, flat_in_dims)
  305. )
  306. # transpose chunk dim and flatten structure
  307. # chunks_flat_args is a list of flatten args
  308. chunks_flat_args = zip(*flat_args_chunks)
  309. return chunks_flat_args
  310. def _flatten_chunks_output(chunks_output_):
  311. # chunks_output is a list of chunked outputs
  312. # flatten chunked outputs:
  313. flat_chunks_output = []
  314. arg_spec = None
  315. for output in chunks_output_:
  316. flat_output, arg_specs = tree_flatten(output)
  317. flat_chunks_output.append(flat_output)
  318. if arg_spec is None:
  319. arg_spec = arg_specs
  320. # transpose chunk dim and flatten structure
  321. # flat_output_chunks is flat list of chunks
  322. flat_output_chunks = list(zip(*flat_chunks_output))
  323. return flat_output_chunks, arg_spec
  324. def _concat_chunked_outputs(out_dims, arg_spec, flat_output_chunks):
  325. # concat chunks on out_dim
  326. flat_out_dims = _broadcast_to_and_flatten(out_dims, arg_spec)
  327. assert len(flat_out_dims) == len(flat_output_chunks)
  328. flat_output = []
  329. for idx, out_dim in enumerate(flat_out_dims):
  330. flat_output.append(torch.cat(flat_output_chunks[idx], dim=out_dim))
  331. # release tensors
  332. flat_output_chunks[idx] = None
  333. return flat_output
  334. # Applies vmap on chunked_input and returns concatenated output over the chunks.
  335. def _chunked_vmap(
  336. func, flat_in_dims, chunks_flat_args, args_spec, out_dims, randomness, **kwargs
  337. ):
  338. chunks_output = []
  339. rs = torch.get_rng_state() if randomness == "same" else None
  340. for flat_args in chunks_flat_args:
  341. batch_size = _validate_and_get_batch_size(flat_in_dims, flat_args)
  342. # The way we compute split the input in `_get_chunked_inputs`,
  343. # we may get a tensor with `0` batch-size. We skip any computation
  344. # in that case.
  345. # Eg.
  346. # >>> chunk_size = 1
  347. # >>> batch_size = 6
  348. # >>> t = torch.zeros(batch_size, 1)
  349. # >>> t.tensor_split([1, 2, 3, 4, 5, 6])
  350. # (tensor([[0.]]), tensor([[0.]]), tensor([[0.]]), tensor([[0.]]),
  351. # tensor([[0.]]), tensor([[0.]]), tensor([], size=(0, 1)))
  352. if batch_size == 0:
  353. continue
  354. if rs is not None:
  355. torch.set_rng_state(rs)
  356. chunks_output.append(
  357. _flat_vmap(
  358. func,
  359. batch_size,
  360. flat_in_dims,
  361. flat_args,
  362. args_spec,
  363. out_dims,
  364. randomness,
  365. **kwargs,
  366. )
  367. )
  368. flat_output_chunks, arg_spec = _flatten_chunks_output(chunks_output)
  369. # chunked output tensors are held by both `flat_output_chunks` and `chunks_output`.
  370. # eagerly remove the reference from `chunks_output`.
  371. del chunks_output
  372. # concat chunks on out_dim
  373. flat_output = _concat_chunked_outputs(out_dims, arg_spec, flat_output_chunks)
  374. # finally unflatten the output
  375. return tree_unflatten(flat_output, arg_spec)
  376. # Vmap refactored helper functions:
  377. def _check_randomness_arg(randomness):
  378. if randomness not in ["error", "different", "same"]:
  379. raise RuntimeError(
  380. f"Only allowed values for randomness are 'error', 'different', or 'same'. Got {randomness}"
  381. )
  382. @contextlib.contextmanager
  383. def vmap_increment_nesting(batch_size, randomness):
  384. try:
  385. vmap_level = _vmap_increment_nesting(batch_size, randomness)
  386. yield vmap_level
  387. finally:
  388. _vmap_decrement_nesting()
  389. @doesnt_support_saved_tensors_hooks
  390. def _flat_vmap(
  391. func, batch_size, flat_in_dims, flat_args, args_spec, out_dims, randomness, **kwargs
  392. ):
  393. with vmap_increment_nesting(batch_size, randomness) as vmap_level:
  394. batched_inputs = _create_batched_inputs(
  395. flat_in_dims, flat_args, vmap_level, args_spec
  396. )
  397. batched_outputs = func(*batched_inputs, **kwargs)
  398. return _unwrap_batched(batched_outputs, out_dims, vmap_level, batch_size, func)
  399. # `restore_vmap` is a private helper function. It is vmap but has the following
  400. # differences:
  401. # - instead of returning outputs, it returns an (outputs, out_dims) tuple.
  402. # out_dims is a pytree of same shape as outputs and contains Optional[int]
  403. # specifying where the vmapped dimension, if it exists, is in the corresponding output.
  404. # - does no validation on in_dims or inputs (vmap expects at least one Tensor to be vmapped).
  405. # restore_vmap allows for no inputs to have the vmap dimension
  406. # - does no validation on outputs (vmap expects only Tensor outputs)
  407. # restore_vmap allows for return of arbitrary outputs (not just Tensors)
  408. #
  409. # The TL;DR is that restore_vmap is more general than vmap and has a slightly
  410. # different API. The relaxations are so that we can "pause" vmap in the middle
  411. # of its execution and then "restore" it later (this is what we do in
  412. # the generate_vmap_rule=True implementation of autograd.Function).
  413. #
  414. # restore_vmap can be technically used in the implementation of vmap, but doing
  415. # that refactor is a bit technically challenging because:
  416. # - vmap couples the tensor-wrapping code with error checking
  417. # - vmap's tensor unwrapping code is in C++; we would need to rewrite part of it
  418. # in python because it overlaps with unwrap_batched
  419. @doesnt_support_saved_tensors_hooks
  420. def restore_vmap(func, in_dims, batch_size, randomness):
  421. def inner(*args, **kwargs):
  422. with vmap_increment_nesting(batch_size, randomness) as vmap_level:
  423. batched_inputs = wrap_batched(args, in_dims, vmap_level)
  424. batched_outputs = func(*batched_inputs, **kwargs)
  425. return unwrap_batched(batched_outputs, vmap_level)
  426. return inner
  427. def wrap_batched(args, bdims, level):
  428. flat_args, spec = tree_flatten(args)
  429. flat_bdims = _broadcast_to_and_flatten(bdims, spec)
  430. assert flat_bdims is not None
  431. result = _create_batched_inputs(flat_bdims, flat_args, level, spec)
  432. return result
  433. def unwrap_batched(args, level):
  434. flat_args, spec = tree_flatten(args)
  435. if len(flat_args) == 0:
  436. return args, ()
  437. result = [
  438. torch._C._functorch._unwrap_batched(arg, level)
  439. if isinstance(arg, torch.Tensor)
  440. else (arg, None)
  441. for arg in flat_args
  442. ]
  443. output, bdims = zip(*result)
  444. return tree_unflatten(output, spec), tree_unflatten(bdims, spec)