api.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410
  1. """
  2. This file includes public APIs for FSDP such as the classes used for the
  3. constructor arguments.
  4. """
  5. from dataclasses import dataclass
  6. from enum import auto, Enum
  7. from typing import Optional, Sequence, Type
  8. import torch
  9. from torch.nn.modules.batchnorm import _BatchNorm
  10. __all__ = [
  11. "ShardingStrategy",
  12. "BackwardPrefetch",
  13. "MixedPrecision",
  14. "CPUOffload",
  15. "StateDictType",
  16. "StateDictConfig",
  17. "FullStateDictConfig",
  18. "LocalStateDictConfig",
  19. "ShardedStateDictConfig",
  20. "OptimStateDictConfig",
  21. "FullOptimStateDictConfig",
  22. "LocalOptimStateDictConfig",
  23. "ShardedOptimStateDictConfig",
  24. "StateDictSettings",
  25. ]
  26. class ShardingStrategy(Enum):
  27. """
  28. This specifies the sharding strategy to be used for distributed training by
  29. :class:`FullyShardedDataParallel`.
  30. - ``FULL_SHARD``: Parameters, gradients, and optimizer states are sharded.
  31. For the parameters, this strategy unshards (via all-gather) before the
  32. forward, reshards after the forward, unshards before the backward
  33. computation, and reshards after the backward computation. For gradients,
  34. it synchronizes and shards them (via reduce-scatter) after the backward
  35. computation. The sharded optimizer states are updated locally per rank.
  36. - ``SHARD_GRAD_OP``: Gradients and optimizer states are sharded during
  37. computation, and additionally, parameters are sharded outside
  38. computation. For the parameters, this strategy unshards before the
  39. forward, does not reshard them after the forward, and only reshards them
  40. after the backward computation. The sharded optimizer states are updated
  41. locally per rank. Inside ``no_sync()``, the parameters are not resharded
  42. after the backward computation.
  43. - ``NO_SHARD``: Parameters, gradients, and optimizer states are not sharded
  44. but instead replicated across ranks similar to PyTorch's
  45. :class:`DistributedDataParallel` API. For gradients, this strategy
  46. synchronizes them (via all-reduce) after the backward computation. The
  47. unsharded optimizer states are updated locally per rank.
  48. - ``HYBRID_SHARD``: Apply ``FULL_SHARD`` within a node, and replicate parameters across
  49. nodes. This results in reduced communication volume as expensive all-gathers and
  50. reduce-scatters are only done within a node, which can be more performant for medium
  51. -sized models.
  52. - ``_HYBRID_SHARD_ZERO2``: Apply ``SHARD_GRAD_OP`` within a node, and replicate parameters across
  53. nodes. This is like ``HYBRID_SHARD``, except this may provide even higher throughput
  54. since the unsharded parameters are not freed after the forward pass, saving the
  55. all-gathers in the pre-backward.
  56. """
  57. FULL_SHARD = auto()
  58. SHARD_GRAD_OP = auto()
  59. NO_SHARD = auto()
  60. HYBRID_SHARD = auto()
  61. _HYBRID_SHARD_ZERO2 = auto()
  62. class BackwardPrefetch(Enum):
  63. """
  64. This configures explicit backward prefetching, which improves throughput by
  65. enabling communication and computation overlap in the backward pass at the
  66. cost of slightly increased memory usage.
  67. - ``BACKWARD_PRE``: This enables the most overlap but increases memory
  68. usage the most. This prefetches the next set of parameters *before* the
  69. current set of parameters' gradient computation. This overlaps the *next
  70. all-gather* and the *current gradient computation*, and at the peak, it
  71. holds the current set of parameters, next set of parameters, and current
  72. set of gradients in memory.
  73. - ``BACKWARD_POST``: This enables less overlap but requires less memory
  74. usage. This prefetches the next set of parameters *after* the current
  75. set of parameters' gradient computation. This overlaps the *current
  76. reduce-scatter* and the *next gradient computation*, and it frees the
  77. current set of parameters before allocating memory for the next set of
  78. parameters, only holding the next set of parameters and current set of
  79. gradients in memory at the peak.
  80. - FSDP's ``backward_prefetch`` argument accepts ``None``, which disables
  81. the backward prefetching altogether. This has no overlap and does not
  82. increase memory usage. In general, we do not recommend this setting since
  83. it may degrade throughput significantly.
  84. For more technical context: For a single process group using NCCL backend,
  85. any collectives, even if issued from different streams, contend for the
  86. same per-device NCCL stream, which implies that the relative order in which
  87. the collectives are issued matters for overlapping. The two backward
  88. prefetching values correspond to different issue orders.
  89. """
  90. # NOTE: For both modes, the ordering that defines "current" and "next" is
  91. # not always exact in the current implementation. A mistargeted prefetch
  92. # simply means that the parameter memory is allocated earlier than needed,
  93. # possibly increasing peak memory usage, but does not affect correctness.
  94. BACKWARD_PRE = auto()
  95. BACKWARD_POST = auto()
  96. @dataclass
  97. class MixedPrecision:
  98. """
  99. This configures FSDP-native mixed precision training.
  100. Attributes:
  101. param_dtype (Optional[torch.dtype]): This specifies the dtype for model
  102. parameters during forward and backward and thus the dtype for
  103. forward and backward computation. Outside forward and backward, the
  104. *sharded* parameters are kept in full precision (e.g. for the
  105. optimizer step), and for model checkpointing, the parameters are
  106. always saved in full precision. (Default: ``None``)
  107. reduce_dtype (Optional[torch.dtype]): This specifies the dtype for
  108. gradient reduction (i.e. reduce-scatter or all-reduce). If this is
  109. ``None`` but ``param_dtype`` is not ``None``, then this takes on
  110. the ``param_dtype`` value, still running gradient reduction in low
  111. precision. This is permitted to differ from ``param_dtype``, e.g.
  112. to force gradient reduction to run in full precision. (Default:
  113. ``None``)
  114. buffer_dtype (Optional[torch.dtype]): This specifies the dtype for
  115. buffers. FSDP does not shard buffers. Rather, FSDP casts them to
  116. ``buffer_dtype`` in the first forward pass and keeps them in that
  117. dtype thereafter. For model checkpointing, the buffers are saved
  118. in full precision except for ``LOCAL_STATE_DICT``. (Default:
  119. ``None``)
  120. keep_low_precision_grads (bool): If ``False``, then FSDP upcasts
  121. gradients to full precision after the backward pass in preparation
  122. for the optimizer step. If ``True``, then FSDP keeps the gradients
  123. in the dtype used for gradient reduction, which can save memory if
  124. using a custom optimizer that supports running in low precision.
  125. (Default: ``False``)
  126. cast_forward_inputs (bool): If ``True``, then this FSDP module casts
  127. its forward args and kwargs to ``param_dtype``. This is to ensure
  128. that parameter and input dtypes match for forward computation, as
  129. required by many ops. This may need to be set to ``True`` when only
  130. applying mixed precision to some but not all FSDP modules, in which
  131. case a mixed-precision FSDP submodule needs to recast its inputs.
  132. (Default: ``False``)
  133. cast_root_forward_inputs (bool): If ``True``, then the root FSDP module
  134. casts its forward args and kwargs to ``param_dtype``, overriding
  135. the value of ``cast_forward_inputs``. For non-root FSDP modules,
  136. this does not do anything. (Default: ``True``)
  137. _module_classes_to_ignore: (Sequence[Type[nn.Module]]): This specifies
  138. module classes to ignore for mixed precision when using an
  139. ``auto_wrap_policy``: Modules of these classes will have FSDP
  140. applied to them separately with mixed precision disabled (meaning
  141. that the final FSDP construction would deviate from the specified
  142. policy). If ``auto_wrap_policy`` is not specified, then this does
  143. not do anything. This API is experimental and subject to change.
  144. (Default: ``(_BatchNorm,)``)
  145. .. note:: This API is experimental and subject to change.
  146. .. note:: Only floating point tensors are cast to their specified dtypes.
  147. .. note:: In ``summon_full_params``, parameters are forced to full
  148. precision, but buffers are not.
  149. .. note:: Layer norm and batch norm accumulate in ``float32`` even when
  150. their inputs are in a low precision like ``float16`` or ``bfloat16``.
  151. Disabling FSDP's mixed precision for those norm modules only means that
  152. the affine parameters are kept in ``float32``. However, this incurs
  153. separate all-gathers and reduce-scatters for those norm modules, which
  154. may be inefficient, so if the workload permits, the user should prefer
  155. to still apply mixed precision to those modules.
  156. .. note:: By default, if the user passes a model with any ``_BatchNorm``
  157. modules and specifies an ``auto_wrap_policy``, then the batch norm
  158. modules will have FSDP applied to them separately with mixed precision
  159. disabled. See the ``_module_classes_to_ignore`` argument.
  160. .. note:: ``MixedPrecision`` has ``cast_root_forward_inputs=True`` and
  161. ``cast_forward_inputs=False`` by default. For the root FSDP instance,
  162. its ``cast_root_forward_inputs`` takes precedence over its
  163. ``cast_forward_inputs``. For non-root FSDP instances, their
  164. ``cast_root_forward_inputs`` values are ignored. The default setting is
  165. sufficient for the typical case where each FSDP instance has the same
  166. ``MixedPrecision`` configuration and only needs to cast inputs to the
  167. ``param_dtype`` at the beginning of the model's forward pass.
  168. .. note:: For nested FSDP instances with different ``MixedPrecision``
  169. configurations, we recommend setting individual ``cast_forward_inputs``
  170. values to configure casting inputs or not before each instance's
  171. forward. In such a case, since the casts happen before each FSDP
  172. instance's forward, a parent FSDP instance should have its non-FSDP
  173. submodules run before its FSDP submodules to avoid the activation dtype
  174. being changed due to a different ``MixedPrecision`` configuration.
  175. Example::
  176. >>> # xdoctest: +SKIP("undefined variables")
  177. >>> model = nn.Sequential(nn.Linear(3, 3), nn.Linear(3, 3))
  178. >>> model[1] = FSDP(
  179. >>> model[1],
  180. >>> mixed_precision=MixedPrecision(param_dtype=torch.float16, cast_forward_inputs=True),
  181. >>> )
  182. >>> model = FSDP(
  183. >>> model,
  184. >>> mixed_precision=MixedPrecision(param_dtype=torch.bfloat16, cast_forward_inputs=True),
  185. >>> )
  186. The above shows a working example. On the other hand, if ``model[1]``
  187. were replaced with ``model[0]``, meaning that the submodule using
  188. different ``MixedPrecision`` ran its forward first, then ``model[1]``
  189. would incorrectly see ``float16`` activations instead of ``bfloat16``
  190. ones.
  191. """
  192. param_dtype: Optional[torch.dtype] = None
  193. reduce_dtype: Optional[torch.dtype] = None
  194. buffer_dtype: Optional[torch.dtype] = None
  195. keep_low_precision_grads: bool = False
  196. cast_forward_inputs: bool = False
  197. cast_root_forward_inputs: bool = True
  198. _module_classes_to_ignore: Sequence[Type[torch.nn.Module]] = (_BatchNorm,)
  199. @dataclass
  200. class CPUOffload:
  201. """
  202. This configures CPU offloading.
  203. Attributes:
  204. offload_params (bool): This specifies whether to offload parameters to
  205. CPU when not involved in computation. If ``True``, then this
  206. offloads gradients to CPU as well, meaning that the optimizer step
  207. runs on CPU.
  208. """
  209. offload_params: bool = False
  210. class StateDictType(Enum):
  211. """
  212. This enum indicates that which type of ``state_dict`` the FSDP module is
  213. currently processing (returning or loading).
  214. The default value is FULL_STATE_DICT to comply the PyTorch convention.
  215. ..note::
  216. FSDP currently supports three types of ``state_dict``:
  217. 1. ``state_dict/load_state_dict`: this pair of APIs return and load
  218. the non-sharded, unflattened parameters. The semantics is the
  219. same as using DDP.
  220. 2. ``_local_state_dict/_load_local_state_dict``: this pair of APIs return
  221. and load local sharded, flattened parameters. The values returned
  222. by ``_local_state_dict`` can be directly used by FSDP and is only
  223. meaningful to FSDP (because parameters are flattened). Note that
  224. these APIs are meant for use via the :func:`state_dict_type`
  225. context manager as follows:
  226. >>> # xdoctest: +SKIP("undefined variables")
  227. >>> with fsdp.state_dict_type(StateDictType.LOCAL_STATE_DICT):
  228. ... state = fsdp.state_dict() # loads local state dict
  229. 3. ``_sharded_state_dict/_load_sharded_state_dict``: this pair of APIs
  230. return and load sharded, unflattened parameters. The ``state_dict``
  231. return by ``sharded_state_dict`` can be used by all other parallel
  232. schemes (resharding may be required).
  233. """
  234. FULL_STATE_DICT = auto()
  235. LOCAL_STATE_DICT = auto()
  236. SHARDED_STATE_DICT = auto()
  237. @dataclass
  238. class StateDictConfig:
  239. """
  240. ``StateDictConfig`` is the base class for all ``state_dict`` configuration
  241. classes. Users should instantiate a child class (e.g.
  242. ``FullStateDictConfig``) in order to configure settings for the
  243. corresponding ``state_dict`` type supported by FSDP.
  244. Attributes:
  245. offload_to_cpu (bool): If ``True``, then FSDP offloads the state dict
  246. values to CPU, and if ``False``, then FSDP keeps them on GPU.
  247. (Default: ``False``)
  248. """
  249. offload_to_cpu: bool = False
  250. @dataclass
  251. class FullStateDictConfig(StateDictConfig):
  252. """
  253. ``FullStateDictConfig`` is a config class meant to be used with
  254. ``StateDictType.FULL_STATE_DICT``. We recommend enabling both
  255. ``offload_to_cpu=True`` and ``rank0_only=True`` when saving full state
  256. dicts to save GPU memory and CPU memory, respectively. This config class
  257. is meant to be used via the :func:`state_dict_type` context manager as
  258. follows:
  259. >>> # xdoctest: +SKIP("undefined variables")
  260. >>> from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
  261. >>> fsdp = FSDP(model, auto_wrap_policy=...)
  262. >>> cfg = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
  263. >>> with FSDP.state_dict_type(fsdp, StateDictType.FULL_STATE_DICT, cfg):
  264. >>> state = fsdp.state_dict()
  265. >>> # `state` will be empty on non rank 0 and contain CPU tensors on rank 0.
  266. >>> # To reload checkpoint for inference, finetuning, transfer learning, etc:
  267. >>> model = model_fn() # Initialize model in preparation for wrapping with FSDP
  268. >>> if dist.get_rank() == 0:
  269. >>> # Load checkpoint only on rank 0 to avoid memory redundancy
  270. >>> state_dict = torch.load("my_checkpoint.pt")
  271. >>> model.load_state_dict(state_dict)
  272. >>> # All ranks initialize FSDP module as usual. `sync_module_states` argument
  273. >>> # communicates loaded checkpoint states from rank 0 to rest of the world.
  274. >>> fsdp = FSDP(model, device_id=torch.cuda.current_device(), auto_wrap_policy=..., sync_module_states=True)
  275. >>> # After this point, all ranks have FSDP model with loaded checkpoint.
  276. Attributes:
  277. rank0_only (bool): If ``True``, then only rank 0 saves the full state
  278. dict, and nonzero ranks save an empty dict. If ``False``, then all
  279. ranks save the full state dict. (Default: ``False``)
  280. """
  281. rank0_only: bool = False
  282. @dataclass
  283. class LocalStateDictConfig(StateDictConfig):
  284. pass
  285. @dataclass
  286. class ShardedStateDictConfig(StateDictConfig):
  287. """
  288. ``ShardedStateDictConfig`` is a config class meant to be used with
  289. ``StateDictType.SHARDED_STATE_DICT``.
  290. Attributes:
  291. _use_dtensor (bool): If ``True``, then FSDP saves the state dict values
  292. as ``DTensor``, and if ``False``, then FSDP saves them as
  293. ``ShardedTensor``. (Default: ``False``)
  294. .. warning:: ``_use_dtensor`` is a private field of :class:`ShardedStateDictConfig`
  295. and it is used by FSDP to determine the type of state dict values. Users should not
  296. manually modify ``_use_dtensor``.
  297. """
  298. _use_dtensor: bool = False
  299. @dataclass
  300. class OptimStateDictConfig:
  301. """
  302. ``OptimStateDictConfig`` is the base class for all ``optim_state_dict``
  303. configuration classes. Users should instantiate a child class (e.g.
  304. ``FullOptimStateDictConfig``) in order to configure settings for the
  305. corresponding ``optim_state_dict`` type supported by FSDP.
  306. Attributes:
  307. offload_to_cpu (bool): If ``True``, then FSDP offloads the state dict's
  308. tensor values to CPU, and if ``False``, then FSDP keeps them on the
  309. original device (which is GPU unless parameter CPU offloading is
  310. enabled). (Default: ``True``)
  311. """
  312. offload_to_cpu: bool = True
  313. @dataclass
  314. class FullOptimStateDictConfig(OptimStateDictConfig):
  315. """
  316. Attributes:
  317. rank0_only (bool): If ``True``, then only rank 0 saves the full state
  318. dict, and nonzero ranks save an empty dict. If ``False``, then all
  319. ranks save the full state dict. (Default: ``False``)
  320. """
  321. rank0_only: bool = False
  322. @dataclass
  323. class LocalOptimStateDictConfig(OptimStateDictConfig):
  324. offload_to_cpu: bool = False
  325. @dataclass
  326. class ShardedOptimStateDictConfig(OptimStateDictConfig):
  327. """
  328. ``ShardedOptimStateDictConfig`` is a config class meant to be used with
  329. ``StateDictType.SHARDED_STATE_DICT``.
  330. Attributes:
  331. _use_dtensor (bool): If ``True``, then FSDP saves the state dict values
  332. as ``DTensor``, and if ``False``, then FSDP saves them as
  333. ``ShardedTensor``. (Default: ``False``)
  334. .. warning:: ``_use_dtensor`` is a private field of :class:`ShardedOptimStateDictConfig`
  335. and it is used by FSDP to determine the type of state dict values. Users should not
  336. manually modify ``_use_dtensor``.
  337. """
  338. _use_dtensor: bool = False
  339. @dataclass
  340. class StateDictSettings:
  341. state_dict_type: StateDictType
  342. state_dict_config: StateDictConfig
  343. optim_state_dict_config: OptimStateDictConfig