microbatch.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469
  1. # mypy: allow-untyped-defs
  2. # Copyright (c) Meta Platforms, Inc. and affiliates
  3. import logging
  4. from typing import Any, Dict, List, Optional, Tuple
  5. import torch
  6. from torch.fx.node import map_aggregate
  7. from torch.utils._pytree import tree_flatten, tree_unflatten
  8. __all__ = [
  9. "TensorChunkSpec",
  10. "split_args_kwargs_into_chunks",
  11. "merge_chunks",
  12. ]
  13. logger = logging.getLogger(__name__)
  14. """
  15. _debug_mask_minibatches specifies to send masked versions of the mini-batch
  16. through instead of micro-batch slices--this can be used for more stable
  17. numerical testing (see [A Note About Correctness Testing])
  18. """
  19. _debug_mask_minibatches = False
  20. class _CustomReducer:
  21. """
  22. Custom reducer class that can be used to specify a custom operation that
  23. reduces losses of multiple microbatches into one value.
  24. Example:
  25. >>> # xdoctest: +SKIP
  26. >>> sum_reducer = _CustomReducer(
  27. >>> torch.tensor(0.0),
  28. >>> lambda a, b: a + b
  29. >>> )
  30. """
  31. def __init__(self, init_value, reduce_fn):
  32. self.init_value = init_value
  33. self.reduce_fn = reduce_fn
  34. class _LossReducer(_CustomReducer):
  35. pass
  36. sum_reducer = _LossReducer(torch.tensor(0.0), lambda a, b: a + b)
  37. # Default chunking dimension is 0. This is used for the case where the user did
  38. # not specify a chunking dimension.
  39. DEFAULT_CHUNK_DIM = 0
  40. class TensorChunkSpec:
  41. """
  42. Class used to specify chunking of inputs
  43. """
  44. def __init__(self, split_dim):
  45. self.split_dim = split_dim
  46. split_dim: int
  47. def __repr__(self):
  48. return (
  49. f"{self.__class__.__module__}.{self.__class__.__name__}({self.split_dim})"
  50. )
  51. def __str__(self):
  52. return f"TensorChunkSpec({self.split_dim})"
  53. @staticmethod
  54. def from_tuple(
  55. chunk_dims: Tuple[int, ...],
  56. ):
  57. """
  58. A helper for creating a tuple of `TensorChunkSpec` from a tuple of chunk
  59. dimensions (int's).
  60. Example:
  61. >>> # xdoctest: +SKIP
  62. >>> # There are three positional arguments to the model, and
  63. >>> # we are chunking them along dimension 0, 0 and 1, respectively
  64. >>> args_chunk_spec = TensorChunkSpec.from_tuple((0, 0, 1))
  65. """
  66. args_chunk_spec = map_aggregate(
  67. chunk_dims,
  68. lambda dim: TensorChunkSpec(dim),
  69. )
  70. return args_chunk_spec
  71. @staticmethod
  72. def from_dict(
  73. chunk_dims: Dict[str, int],
  74. ):
  75. """
  76. A helper for creating a dictionary of `TensorChunkSpec` from a
  77. dictionary of chunk dimensions (int's).
  78. Example:
  79. >>> # xdoctest: +SKIP
  80. >>> # Chunk dimension 0 for the "id" argument, 1 for the "mask" argument
  81. >>> kwargs_chunk_spec = TensorChunkSpec.from_dict({"id": 0, "mask": 1})
  82. """
  83. kwargs_chunk_spec = map_aggregate(
  84. chunk_dims,
  85. lambda dim: TensorChunkSpec(dim),
  86. )
  87. return kwargs_chunk_spec
  88. # Class used to specify replication of inputs
  89. class _Replicate:
  90. pass
  91. def _shard_dict_of_args(
  92. args_dict,
  93. args_chunk_spec,
  94. num_chunks,
  95. ):
  96. """
  97. Given a dictionary of args, and a dictionary of chunking specs, shard the
  98. args according to the chunking specs.
  99. Args:
  100. args_dict: Dictionary of args
  101. args_chunk_spec: Dictionary of chunking specs
  102. num_chunks: Number of chunks to shard the args into
  103. Returns:
  104. args_split: List of sharded args
  105. """
  106. # Stage 1+2: flatten and shard/replicate
  107. # args_sharded_replicated : [num args, num flat values, num chunks]
  108. args_sharded_replicated = {}
  109. arg_specs = []
  110. real_num_chunks = num_chunks
  111. first_tensor = True
  112. assert len(args_dict) == len(
  113. args_chunk_spec
  114. ), f"args_dict.keys() = {list(args_dict.keys())} args_chunk_spec.keys() = {list(args_chunk_spec.keys())}"
  115. for arg_key, arg in args_dict.items():
  116. flat, spec = tree_flatten(arg)
  117. arg_specs.append(spec)
  118. chunk_spec = args_chunk_spec[arg_key]
  119. assert chunk_spec is not None # Should have been set by caller
  120. chunk_spec_flat, _ = tree_flatten(chunk_spec)
  121. if len(flat) != len(chunk_spec_flat):
  122. raise ValueError(
  123. f"Argument value {arg} did not have the same number of "
  124. f"values as as chunk spec {chunk_spec}"
  125. )
  126. sharded_arg_flat = []
  127. for v, chunk_v in zip(flat, chunk_spec_flat):
  128. if chunk_v is _Replicate or not isinstance(v, torch.Tensor):
  129. sharded_arg_flat.append([v] * real_num_chunks)
  130. elif isinstance(chunk_v, TensorChunkSpec):
  131. # TODO: check type of v. If it's a tensor, use chunk (or debug mask).
  132. # If it's a collection type, split it as you would expect. Otherwise,
  133. # Throw an error
  134. assert isinstance(v, torch.Tensor), f"{v} is not a tensor"
  135. v_split_dim_size = v.size(chunk_v.split_dim)
  136. if v_split_dim_size < real_num_chunks:
  137. if first_tensor:
  138. # We can only adjust number of chunks when we hit this
  139. # issue at the first tensor encountered
  140. logger.warning(
  141. f"Tensor size on chunking dimension is {v_split_dim_size}, " # noqa: G004
  142. f"downsizing the number of chunks from {num_chunks} to {v_split_dim_size}."
  143. )
  144. real_num_chunks = v_split_dim_size
  145. else:
  146. raise RuntimeError(
  147. f"Arg {arg_key} on chunking dimension has a size of {v_split_dim_size}, "
  148. f"smaller than the number of chunks {num_chunks}. "
  149. "PiPPy cannot reduce the number of chunks because "
  150. "other arguments have bigger chunk-dimension sizes. "
  151. "Please adjust your num_chunks setting."
  152. )
  153. chunk_tensors = torch.tensor_split(
  154. v, real_num_chunks, chunk_v.split_dim
  155. )
  156. if _debug_mask_minibatches:
  157. expanded_chunks = []
  158. split_dim_idx = 0
  159. for chunk_tensor in chunk_tensors:
  160. new_val = torch.zeros_like(v)
  161. upper_idx = split_dim_idx + chunk_tensor.size(chunk_v.split_dim)
  162. slice_indices = [slice(None, None, None)] * new_val.ndim
  163. slice_indices[chunk_v.split_dim] = slice(
  164. split_dim_idx, upper_idx
  165. )
  166. new_val[slice_indices] = chunk_tensor
  167. expanded_chunks.append(new_val)
  168. split_dim_idx += chunk_tensor.size(chunk_v.split_dim)
  169. sharded_arg_flat.append(expanded_chunks)
  170. else:
  171. sharded_arg_flat.append(chunk_tensors) # type: ignore[arg-type]
  172. first_tensor = False
  173. else:
  174. raise TypeError(f"Unrecognized chunk spec: {chunk_v}")
  175. args_sharded_replicated[arg_key] = sharded_arg_flat
  176. # chunks_flat : [num chunks, num args, num flat values]
  177. chunks_flat = []
  178. for chunk_idx in range(real_num_chunks):
  179. chunk_args = {}
  180. for key, arg in args_sharded_replicated.items():
  181. arg_single_chunk = []
  182. for v_flat in arg:
  183. arg_single_chunk.append(v_flat[chunk_idx])
  184. chunk_args[key] = arg_single_chunk
  185. chunks_flat.append(chunk_args)
  186. # args_split : [num chunks, num args]
  187. args_split = []
  188. for chunk in chunks_flat:
  189. per_chunk_args = {}
  190. assert len(arg_specs) == len(chunk)
  191. for (key, arg), arg_spec in zip(chunk.items(), arg_specs):
  192. per_chunk_args[key] = tree_unflatten(arg, arg_spec)
  193. args_split.append(per_chunk_args)
  194. return args_split
  195. def split_args_kwargs_into_chunks(
  196. args: Tuple[Any, ...],
  197. kwargs: Optional[Dict[str, Any]],
  198. chunks: int,
  199. args_chunk_spec: Optional[Tuple[TensorChunkSpec, ...]] = None,
  200. kwargs_chunk_spec: Optional[Dict[str, TensorChunkSpec]] = None,
  201. ) -> Tuple[List[Tuple], List[Dict]]:
  202. """
  203. Given a sequence of args and kwargs, split them into a number of chunks
  204. according to their respective chunking specs.
  205. Args:
  206. args: Tuple of args
  207. kwargs: Dict of kwargs
  208. chunks: Number of chunks to split the args and kwargs into
  209. args_chunk_spec: chunking specs for args, in same shape as args
  210. kwargs_chunk_spec: chunking specs for kwargs, in same shape as kwargs
  211. Returns:
  212. args_split: List of sharded args
  213. kwargs_split: List of sharded kwargs
  214. """
  215. # Given `args` and `kwargs`, we want to yield a set of `chunks` args and kwargs such that
  216. # the constituent Tensor values have been sharded/replicated according to the `args_chunk_spec`
  217. # and `kwargs_chunk_spec` specifications. The steps are as follows:
  218. #
  219. # 1. Use pytree.tree_flatten to flatten each arg and its spec into nto a 1d array of values.
  220. # To use a running example: suppose our inputs look like
  221. #
  222. # args = ([A, [B, C]], D) args_spec = ([None, [None, TensorChunkSpec]], None)
  223. # (kwargs not shown but it's a similar process)
  224. #
  225. # Then for this step we would end up with
  226. #
  227. # args = ([A, B, C], D) args_spec = ([None, None, TensorChunkSpec], None)
  228. #
  229. # 2. Shard or replicate the arguments subject to the policy in the spec. Suppose chunks = 2
  230. #
  231. # args = ([[A, A], [B, B], [C_1, C_2]], [D, D])
  232. #
  233. # 3. Rotate the nesting order such that chunks are the outer dimension
  234. #
  235. # args_chunks = [
  236. # ([A, B, C_1], D),
  237. # ([A, B, C_2], D),
  238. # ]
  239. #
  240. # 4. Unflatten each chunk according to the spec
  241. #
  242. # args_chunks = [
  243. # ([A, [B, C_1]], D),
  244. # ([A, [B, C_2]], D),
  245. # ]
  246. # TODO: _debug_mask_minibatches
  247. # Handle the case where kwargs is None
  248. if kwargs is None:
  249. kwargs = {}
  250. # If user did not provide args_chunk_spec or kwargs_chunk_spec, we extend
  251. # their format and use default chunking along dim 0
  252. if args_chunk_spec is None:
  253. args_chunk_spec = (TensorChunkSpec(DEFAULT_CHUNK_DIM),) * len(args)
  254. if kwargs_chunk_spec is None:
  255. kwargs_chunk_spec = dict.fromkeys(kwargs, TensorChunkSpec(DEFAULT_CHUNK_DIM))
  256. args_split_dict = _shard_dict_of_args(
  257. dict(enumerate(args)),
  258. dict(enumerate(args_chunk_spec)),
  259. chunks,
  260. )
  261. real_num_chunks = len(args_split_dict)
  262. kwargs_split = _shard_dict_of_args(
  263. kwargs,
  264. kwargs_chunk_spec,
  265. real_num_chunks,
  266. )
  267. if len(kwargs_split) < real_num_chunks:
  268. # In case kwargs are sharded into less chunks
  269. # e.g. when `args` has no tensor, just values
  270. real_num_chunks = len(kwargs_split)
  271. # Re-shard args
  272. args_split_dict = _shard_dict_of_args(
  273. dict(enumerate(args)),
  274. dict(enumerate(args_chunk_spec)),
  275. real_num_chunks,
  276. )
  277. if len(args_split_dict) != len(kwargs_split):
  278. raise RuntimeError(
  279. "args and kwargs are split into different number of chunks: "
  280. f"{len(args_split_dict)}, {len(kwargs_split)}"
  281. )
  282. args_split = []
  283. for chunk_args in args_split_dict:
  284. args_split.append(tuple(chunk_args[i] for i in range(len(chunk_args))))
  285. return args_split, kwargs_split
  286. def merge_chunks(
  287. chunks: List[Any],
  288. chunk_spec,
  289. ):
  290. """
  291. Given a list of chunks, merge them into a single value according to
  292. the chunk spec.
  293. Args:
  294. chunks: list of chunks
  295. chunk_spec: Chunking spec for the chunks
  296. Returns:
  297. value: Merged value
  298. """
  299. # This is essentially the inverse of `split_args_kwargs_into_chunks`, so the
  300. # steps are similar to the steps in that function but in reverse. Given the
  301. # input values:
  302. #
  303. # chunks = [
  304. # ([A, [B, C_1]], D),
  305. # ([A, [B, C_2]], D),
  306. # ]
  307. # args_spec = ([None, [None, TensorChunkSpec]], None)
  308. #
  309. # 1. Flatten the chunks according to the chunk_spec
  310. #
  311. # chunks_flat = [
  312. # ([A, B, C_1], D),
  313. # ([A, B, C_2], D),
  314. # ]
  315. #
  316. # 2. Rotate the nesting order such that chunks are the inner dimension
  317. #
  318. # value_inner = ([A, B, [C_1, C_2]], D)
  319. #
  320. # 3. Concatenate sharded arguments
  321. #
  322. # value_combined = ([A, B, C], D)
  323. #
  324. # 4. Unflatten the combined args given the spec
  325. #
  326. # value = ([A, [B, C]], D)
  327. # Preliminary: flatten the chunk spec
  328. if chunk_spec is not None:
  329. spec_flattened, flatten_spec = tree_flatten(chunk_spec)
  330. else:
  331. # If chunk_spec is not provided, we will merge chunks along the default dimension (0), for all output fields
  332. # We obtain the output structure by flattening chunk 0 and generate the chunk_spec
  333. chunk0_flat, flatten_spec = tree_flatten(chunks[0])
  334. spec_flattened = [TensorChunkSpec(DEFAULT_CHUNK_DIM)] * len(chunk0_flat)
  335. # Stage 1: flatten chunks
  336. # chunks_flattened : [num chunks, num args]
  337. chunks_flattened = []
  338. for chunk in chunks:
  339. chunk_flattened, _ = tree_flatten(chunk)
  340. if len(chunk_flattened) != len(spec_flattened):
  341. raise ValueError(f"Chunk {chunk} did not match chunk spec {chunk_spec}")
  342. chunks_flattened.append(chunk_flattened)
  343. # Stage 2 and 3: Rotate nesting order s.t. chunks are inner dimension and
  344. # concatenate sharded operands
  345. # args_flattened : [num args]
  346. args_flattened = []
  347. for arg_idx, arg in enumerate(spec_flattened):
  348. if isinstance(arg, TensorChunkSpec):
  349. partial_values = [
  350. chunks_flattened[chunk_idx][arg_idx]
  351. for chunk_idx in range(len(chunks_flattened))
  352. ]
  353. if _debug_mask_minibatches:
  354. # Infer size of individual chunks by running `tensor_split` again
  355. overall_shape = partial_values[0].shape
  356. for val in partial_values[1:]:
  357. assert val.shape == overall_shape
  358. meta_chunks = torch.tensor_split(
  359. torch.empty(*overall_shape, device="meta"),
  360. sections=len(partial_values),
  361. dim=arg.split_dim,
  362. )
  363. values_to_cat = []
  364. chunk_start_idx = 0
  365. assert len(partial_values) == len(meta_chunks)
  366. for partial_value, meta_chunk in zip(partial_values, meta_chunks):
  367. chunk_end_idx = chunk_start_idx + meta_chunk.size(arg.split_dim)
  368. slice_indices = [slice(None, None, None)] * partial_value.ndim
  369. slice_indices[arg.split_dim] = slice(chunk_start_idx, chunk_end_idx)
  370. sliced = partial_value[slice_indices]
  371. values_to_cat.append(sliced)
  372. chunk_start_idx = chunk_end_idx
  373. else:
  374. values_to_cat = partial_values
  375. args_flattened.append(torch.cat(values_to_cat, dim=arg.split_dim))
  376. elif isinstance(arg, _CustomReducer):
  377. reduced_val = arg.init_value
  378. for chunk_idx in range(len(chunks_flattened)):
  379. reduced_val = arg.reduce_fn(
  380. reduced_val, chunks_flattened[chunk_idx][arg_idx]
  381. )
  382. args_flattened.append(reduced_val)
  383. else:
  384. value = chunks_flattened[0][arg_idx]
  385. for chunk_idx in range(1, len(chunks_flattened)):
  386. assert chunks_flattened[chunk_idx][arg_idx] == value
  387. args_flattened.append(value)
  388. # Stage 4: Unflatten combined args
  389. return tree_unflatten(args_flattened, flatten_spec)