optimizer.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348
  1. # Copyright (c) Meta Platforms, Inc. and affiliates
  2. import dataclasses
  3. from typing import cast, Dict, List, Optional, Sequence, Tuple, Union
  4. import torch
  5. import torch.distributed as dist
  6. from torch._utils import _get_device_module
  7. from torch.distributed._shard.sharded_tensor.api import ShardedTensor
  8. from torch.distributed._shard.sharded_tensor.metadata import (
  9. TensorProperties as ShardTensorProperties,
  10. )
  11. from torch.distributed._shard.sharded_tensor.shard import Shard
  12. from torch.distributed._shard.sharding_spec.chunk_sharding_spec import ChunkShardingSpec
  13. from torch.distributed._tensor import DTensor
  14. from torch.distributed.checkpoint._nested_dict import unflatten_state_dict
  15. from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner
  16. from torch.distributed.checkpoint.metadata import (
  17. BytesStorageMetadata,
  18. ChunkStorageMetadata,
  19. Metadata,
  20. MetadataIndex,
  21. STATE_DICT_TYPE,
  22. TensorProperties,
  23. TensorStorageMetadata,
  24. )
  25. from torch.distributed.checkpoint.planner import LoadPlan, LoadPlanner
  26. from torch.distributed.checkpoint.planner_helpers import (
  27. _create_read_items,
  28. create_read_items_for_chunk_list,
  29. )
  30. from torch.distributed.checkpoint.state_dict_loader import load_state_dict
  31. from torch.distributed.checkpoint.storage import StorageReader
  32. from torch.distributed.checkpoint.utils import (
  33. _element_wise_add,
  34. _element_wise_sub,
  35. _normalize_device_info,
  36. )
  37. from torch.distributed.distributed_c10d import _get_default_group
  38. from torch.distributed.fsdp._shard_utils import _create_chunk_sharded_tensor
  39. from torch.distributed.remote_device import _remote_device
  40. STATE_DICT_2D_LAYOUT = Dict[str, Tuple[Optional[Sequence[int]], Sequence[int]]]
  41. # TODO: Update docstrings for optimizer.py
  42. __all__ = [
  43. "load_sharded_optimizer_state_dict",
  44. ]
  45. def _gen_rank_device(global_rank: int, device_type: str = "cuda") -> str:
  46. if device_type == "cpu":
  47. return "cpu"
  48. device_module = _get_device_module(device_type)
  49. if device_module.is_available():
  50. return _normalize_device_info(
  51. device_type, global_rank % device_module.device_count()
  52. )
  53. return "cpu"
  54. def _create_colwise_spec(
  55. pg: Optional[dist.ProcessGroup] = None,
  56. ) -> ChunkShardingSpec:
  57. pg_device_type = dist.distributed_c10d._get_pg_default_device(pg).type
  58. if pg is None:
  59. placements = [
  60. f"rank:{idx}/{_gen_rank_device(idx, pg_device_type)}"
  61. for idx in range(dist.get_world_size())
  62. ]
  63. else:
  64. placements = [
  65. f"rank:{idx}/{_gen_rank_device(dist.get_global_rank(pg, idx), pg_device_type)}"
  66. for idx in range(pg.size())
  67. ]
  68. return ChunkShardingSpec(
  69. dim=0,
  70. placements=cast(List[Union[_remote_device, str]], placements),
  71. )
  72. def _is_nested_tensor(val: torch.Tensor) -> bool:
  73. if type(val) is ShardedTensor:
  74. if len(val.local_shards()) == 0:
  75. return False
  76. if type(val.local_shards()[0].tensor) is ShardedTensor:
  77. return True
  78. if type(val.local_shards()[0].tensor) is DTensor:
  79. raise ValueError("Cannot handle DTensor nested insided ShardedTensor")
  80. elif type(val) is DTensor and (
  81. type(val._local_tensor) is DTensor or type(val._local_tensor) is ShardedTensor
  82. ):
  83. raise ValueError("Cannot handle nested DTensor")
  84. return False
  85. def _alloc_tensor(
  86. props: TensorProperties, size: Sequence[int], device_type: str = "cuda"
  87. ) -> torch.Tensor:
  88. return torch.empty(
  89. size=size,
  90. dtype=props.dtype,
  91. layout=props.layout,
  92. requires_grad=props.requires_grad,
  93. pin_memory=props.pin_memory,
  94. device=cast(torch.device, _get_device_module(device_type).current_device()),
  95. )
  96. def _get_state_dict_2d_layout(
  97. state_dict: STATE_DICT_TYPE,
  98. ) -> Tuple[STATE_DICT_2D_LAYOUT, Optional[dist.ProcessGroup]]:
  99. """
  100. Load the right TP slice of the optimizer state.
  101. This is not easy since the per-tensor slicing can't be inferred from checkpoint metadata.
  102. We take advantage of the model state_dict producing a sliced ST to figure out what we need to load.
  103. This is pretty fragile and it might be easier for FSDP to compute this info for us.
  104. Returns a dictionary where keys are the same of the state_dict and the value is a tuple of
  105. (offset, size) for the current rank TP slice.
  106. N.B. The state_dict *MUST* come from FSDP.sharded_state_dict.
  107. """
  108. specs: STATE_DICT_2D_LAYOUT = {}
  109. dp_pg: Optional[dist.ProcessGroup] = None
  110. for key, value in state_dict.items():
  111. specs[key] = (None, value.size())
  112. if _is_nested_tensor(value):
  113. assert (
  114. len(value.local_shards()) == 1
  115. ), "Cannot handle ST with multiple shards"
  116. assert isinstance(
  117. value, ShardedTensor
  118. ), "Can only handle nested ShardedTensor"
  119. shard = value.local_shards()[0]
  120. specs[key] = (
  121. shard.metadata.shard_offsets,
  122. shard.metadata.shard_sizes,
  123. )
  124. dp_pg = shard.tensor._process_group # type: ignore[attr-defined]
  125. return (
  126. specs,
  127. dp_pg,
  128. )
  129. class _ReaderWithOffset(DefaultLoadPlanner):
  130. translation: Dict[MetadataIndex, MetadataIndex]
  131. state_dict: STATE_DICT_TYPE
  132. metadata: Metadata
  133. def __init__(self, fqn_to_offset: Dict[str, Sequence[int]]) -> None:
  134. super().__init__()
  135. self.fqn_to_offset = fqn_to_offset
  136. self.metadata = Metadata({})
  137. self.state_dict = {}
  138. self.translation = {}
  139. def create_local_plan(self) -> LoadPlan:
  140. requests = []
  141. self.translation = {}
  142. for fqn, obj in self.state_dict.items():
  143. md = self.metadata.state_dict_metadata[fqn]
  144. if not isinstance(obj, ShardedTensor):
  145. requests += _create_read_items(fqn, md, obj)
  146. continue
  147. if fqn not in self.fqn_to_offset:
  148. requests += _create_read_items(fqn, md, obj)
  149. continue
  150. offset = self.fqn_to_offset[fqn]
  151. assert len(obj.local_shards()) == 1
  152. original_shard = obj.local_shards()[0]
  153. local_chunks = [
  154. ChunkStorageMetadata(
  155. offsets=torch.Size(
  156. _element_wise_add(original_shard.metadata.shard_offsets, offset)
  157. ),
  158. sizes=torch.Size(original_shard.metadata.shard_sizes),
  159. )
  160. ]
  161. reqs = create_read_items_for_chunk_list(
  162. fqn, cast(TensorStorageMetadata, md), local_chunks
  163. )
  164. # TODO: The ReadItems will have a displaced MetadataIndex, fix it.
  165. # TODO: we should change _create_sharded_read_items to have more ergonomic API
  166. for ri in reqs:
  167. assert ri.dest_index.offset is not None
  168. original_offset = _element_wise_sub(ri.dest_index.offset, offset)
  169. original_index = dataclasses.replace(
  170. ri.dest_index, offset=torch.Size(original_offset)
  171. )
  172. self.translation[ri.dest_index] = original_index
  173. requests += reqs
  174. return LoadPlan(requests)
  175. def lookup_tensor(self, index: MetadataIndex) -> torch.Tensor:
  176. return super().lookup_tensor(self.translation.get(index, index))
  177. def load_sharded_optimizer_state_dict(
  178. model_state_dict: STATE_DICT_TYPE,
  179. optimizer_key: str,
  180. storage_reader: StorageReader,
  181. planner: Optional[LoadPlanner] = None,
  182. ) -> STATE_DICT_TYPE:
  183. """
  184. Load a state_dict in conjunction with FSDP sharded optimizer state.
  185. This is the current recommended way to checkpoint FSDP.
  186. >>> # xdoctest: +SKIP
  187. >>> import torch.distributed.checkpoint as dist_cp
  188. >>> # Save
  189. >>> model: torch.nn.Model
  190. >>> optim_params = model.parameters()
  191. >>> optim = torch.optim.SGD(optim_params, lr=0.01)
  192. >>> # Save
  193. >>> with FSDP.state_dict_type(model, StateDictType.SHARDED_STATE_DICT):
  194. >>> state_dict = {
  195. >>> "optimizer": FSDP.optim_state_dict(model, optim),
  196. >>> "model": model.state_dict()
  197. >>> }
  198. >>> dist_cp.save_state_dict(
  199. >>> state_dict=optim_state,
  200. >>> storage_writer=dist_cp.FileSystemWriter("checkpoint"),
  201. >>> planner=dist_cp.DefaultSavePlanner(),
  202. >>> )
  203. >>>
  204. >>> # Load
  205. >>> with FSDP.state_dict_type(model_tp, StateDictType.SHARDED_STATE_DICT):
  206. >>> model_state_dict = model_tp.state_dict()
  207. >>> checkpoint = {
  208. >>> "model": model_state_dict
  209. >>> }
  210. >>> dist_cp.load_state_dict(
  211. >>> state_dict=checkpoint,
  212. >>> storage_reader=dist_cp.FileSystemReader(checkpoint_file),
  213. >>> planner=dist_cp.DefaultLoadPlanner(),
  214. >>> )
  215. >>> model.load_state_dict(checkpoint["model_state"])
  216. >>>
  217. >>> optim_state = dist_cp.load_sharded_optimizer_state_dict(
  218. >>> model_state_dict,
  219. >>> optimizer_key="optimizer",
  220. >>> storage_reader=dist_cp.FileSystemReader("checkpoint"),
  221. >>> )
  222. >>>
  223. >>> flattened_osd = FSDP.optim_state_dict_to_load(
  224. >>> model, optim, optim_state["optimizer"]
  225. >>> )
  226. >>>
  227. >>> optim.load_state_dict(flattened_osd)
  228. """
  229. metadata = storage_reader.read_metadata()
  230. layout_specs, dp_pg = _get_state_dict_2d_layout(model_state_dict)
  231. dp_pg_device_type = dist.distributed_c10d._get_pg_default_device(dp_pg).type
  232. device_module = _get_device_module(dp_pg_device_type)
  233. if dp_pg is None:
  234. placements = []
  235. for i in range(dist.get_world_size()):
  236. device_info = _normalize_device_info(
  237. dp_pg_device_type, i % device_module.device_count()
  238. )
  239. placements.append(f"rank:{i}/{device_info}")
  240. sharding_spec = ChunkShardingSpec(dim=0, placements=placements) # type: ignore[arg-type]
  241. else:
  242. sharding_spec = _create_colwise_spec(dp_pg)
  243. # Create a state_dict for optimizer state
  244. state_dict: STATE_DICT_TYPE = {}
  245. fqn_to_offset: Dict[str, Sequence[int]] = {}
  246. for key, value in metadata.state_dict_metadata.items():
  247. key_path = metadata.planner_data[key]
  248. if key_path[0] != optimizer_key:
  249. continue
  250. if isinstance(value, BytesStorageMetadata):
  251. state_dict[key] = "<bytes_io>"
  252. continue
  253. # value: TensorStorageMetadata
  254. if value.size.numel() == 1:
  255. state_dict[key] = _alloc_tensor(
  256. value.properties, value.size, dp_pg_device_type
  257. )
  258. elif dp_pg is None:
  259. state_dict[key] = _create_chunk_sharded_tensor(
  260. _alloc_tensor(value.properties, value.size, dp_pg_device_type),
  261. rank=dist.get_rank(),
  262. world_size=dist.get_world_size(),
  263. num_devices_per_node=device_module.device_count(),
  264. pg=_get_default_group(),
  265. )
  266. else:
  267. spec_key = key_path[2]
  268. alloc_size = layout_specs.get(spec_key, (None, value.size))[1]
  269. properties = ShardTensorProperties(
  270. dtype=value.properties.dtype,
  271. layout=value.properties.layout,
  272. requires_grad=value.properties.requires_grad,
  273. memory_format=value.properties.memory_format,
  274. pin_memory=value.properties.pin_memory,
  275. )
  276. st_md = sharding_spec.build_metadata(torch.Size(alloc_size), properties)
  277. local_shards = []
  278. current_rank = dist.get_rank(dp_pg)
  279. for shard_md in st_md.shards_metadata:
  280. if cast(_remote_device, shard_md.placement).rank() != current_rank:
  281. continue
  282. local_shards.append(
  283. Shard(
  284. tensor=_alloc_tensor(
  285. value.properties, shard_md.shard_sizes, dp_pg_device_type
  286. ),
  287. metadata=shard_md,
  288. )
  289. )
  290. st = ShardedTensor._init_from_local_shards_and_global_metadata(
  291. local_shards, st_md, process_group=dp_pg
  292. )
  293. if spec_key in layout_specs and layout_specs[spec_key][0] is not None:
  294. fqn_to_offset[key] = cast(Sequence[int], layout_specs[spec_key][0])
  295. state_dict[key] = st
  296. # Whether we unflatten before or after doesn't matter
  297. load_state_dict(
  298. state_dict=state_dict,
  299. storage_reader=storage_reader,
  300. # FIXME the type of planner is wrong in load_state_dict
  301. planner=_ReaderWithOffset(fqn_to_offset) if dp_pg is not None else planner,
  302. )
  303. state_dict = unflatten_state_dict(state_dict, metadata.planner_data)
  304. return state_dict