deepspeed.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447
  1. # Copyright 2020 The HuggingFace Team. All rights reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. """
  15. Integration with Deepspeed
  16. """
  17. import copy
  18. import importlib.metadata as importlib_metadata
  19. import importlib.util
  20. import weakref
  21. from functools import partialmethod
  22. from ..dependency_versions_check import dep_version_check
  23. from ..utils import is_accelerate_available, is_torch_available, is_torch_mlu_available, logging
  24. if is_torch_available():
  25. import torch
  26. logger = logging.get_logger(__name__)
  27. def is_deepspeed_available():
  28. package_exists = importlib.util.find_spec("deepspeed") is not None
  29. # Check we're not importing a "deepspeed" directory somewhere but the actual library by trying to grab the version
  30. # AND checking it has an author field in the metadata that is HuggingFace.
  31. if package_exists:
  32. try:
  33. if is_torch_mlu_available():
  34. _ = importlib_metadata.metadata("deepspeed-mlu")
  35. return True
  36. _ = importlib_metadata.metadata("deepspeed")
  37. return True
  38. except importlib_metadata.PackageNotFoundError:
  39. return False
  40. if is_accelerate_available() and is_deepspeed_available():
  41. from accelerate.utils.deepspeed import HfDeepSpeedConfig as DeepSpeedConfig
  42. else:
  43. # Inherits from a dummy `object` if accelerate is not available, so that python succeeds to import this file.
  44. # Deepspeed glue code will never inherit this dummy object as it checks if accelerate is available.
  45. from builtins import object as DeepSpeedConfig
  46. class HfDeepSpeedConfig(DeepSpeedConfig):
  47. """
  48. This object contains a DeepSpeed configuration dictionary and can be quickly queried for things like zero stage.
  49. A `weakref` of this object is stored in the module's globals to be able to access the config from areas where
  50. things like the Trainer object is not available (e.g. `from_pretrained` and `_get_resized_embeddings`). Therefore
  51. it's important that this object remains alive while the program is still running.
  52. [`Trainer`] uses the `HfTrainerDeepSpeedConfig` subclass instead. That subclass has logic to sync the configuration
  53. with values of [`TrainingArguments`] by replacing special placeholder values: `"auto"`. Without this special logic
  54. the DeepSpeed configuration is not modified in any way.
  55. Args:
  56. config_file_or_dict (`Union[str, Dict]`): path to DeepSpeed config file or dict.
  57. """
  58. def __init__(self, config_file_or_dict):
  59. # set global weakref object
  60. set_hf_deepspeed_config(self)
  61. dep_version_check("accelerate")
  62. dep_version_check("deepspeed")
  63. super().__init__(config_file_or_dict)
  64. class HfTrainerDeepSpeedConfig(HfDeepSpeedConfig):
  65. """
  66. The `HfTrainerDeepSpeedConfig` object is meant to be created during `TrainingArguments` object creation and has the
  67. same lifespan as the latter.
  68. """
  69. def __init__(self, config_file_or_dict):
  70. super().__init__(config_file_or_dict)
  71. self._dtype = None
  72. self.mismatches = []
  73. def dtype(self):
  74. if self._dtype is None:
  75. raise ValueError("trainer_config_process() wasn't called yet to tell dtype")
  76. return self._dtype
  77. def is_auto(self, ds_key_long):
  78. val = self.get_value(ds_key_long)
  79. if val is None:
  80. return False
  81. else:
  82. return val == "auto"
  83. def fill_match(self, ds_key_long, hf_val, hf_key=None, must_match=True):
  84. """
  85. A utility method that massages the config file and can optionally verify that the values match.
  86. 1. Replace "auto" values with `TrainingArguments` value.
  87. 2. If it wasn't "auto" and `must_match` is true, then check that DS config matches Trainer
  88. config values and if mismatched add the entry to `self.mismatched` - will assert during
  89. `trainer_config_finalize` for one or more mismatches.
  90. """
  91. config, ds_key = self.find_config_node(ds_key_long)
  92. if config is None:
  93. return
  94. if config.get(ds_key) == "auto":
  95. config[ds_key] = hf_val
  96. return
  97. if not must_match:
  98. return
  99. ds_val = config.get(ds_key)
  100. if ds_val is not None and ds_val != hf_val:
  101. self.mismatches.append(f"- ds {ds_key_long}={ds_val} vs hf {hf_key}={hf_val}")
  102. fill_only = partialmethod(fill_match, must_match=False)
  103. def trainer_config_process(self, args, auto_find_batch_size=False):
  104. """
  105. Adjust the config with `TrainingArguments` values. This stage is run during `TrainingArguments` object
  106. creation.
  107. """
  108. # DeepSpeed does:
  109. # train_batch_size = world_size * train_micro_batch_size_per_gpu * gradient_accumulation_steps
  110. train_batch_size = args.world_size * args.per_device_train_batch_size * args.gradient_accumulation_steps
  111. self.fill_match(
  112. "train_micro_batch_size_per_gpu",
  113. args.per_device_train_batch_size,
  114. "per_device_train_batch_size",
  115. not auto_find_batch_size,
  116. )
  117. self.fill_match(
  118. "gradient_accumulation_steps",
  119. args.gradient_accumulation_steps,
  120. "gradient_accumulation_steps",
  121. )
  122. self.fill_match(
  123. "train_batch_size",
  124. train_batch_size,
  125. "train_batch_size (calculated)",
  126. not auto_find_batch_size,
  127. )
  128. self.fill_match("gradient_clipping", args.max_grad_norm, "max_grad_norm")
  129. self.fill_match("optimizer.params.lr", args.learning_rate, "learning_rate")
  130. self.fill_match(
  131. "optimizer.params.betas",
  132. [args.adam_beta1, args.adam_beta2],
  133. "adam_beta1+adam_beta2",
  134. )
  135. self.fill_match("optimizer.params.eps", args.adam_epsilon, "adam_epsilon")
  136. self.fill_match("optimizer.params.weight_decay", args.weight_decay, "weight_decay")
  137. self.fill_only("scheduler.params.warmup_min_lr", 0) # not a trainer arg
  138. self.fill_match("scheduler.params.warmup_max_lr", args.learning_rate, "learning_rate")
  139. # total_num_steps - will get set in trainer_config_finalize
  140. # fp16
  141. if args.fp16 or args.fp16_full_eval:
  142. fp16_backend = "apex" if args.fp16_backend == "apex" else "amp"
  143. else:
  144. fp16_backend = None
  145. if args.save_on_each_node:
  146. # deepspeed uses shared storage by default. Let's override this setting if save_on_each_node == True
  147. self.config["checkpoint"] = self.config.get("checkpoint", {})
  148. self.config["checkpoint"]["use_node_local_storage"] = args.save_on_each_node
  149. # amp: similar to the pytorch native amp - it has a bunch of optional params but we won't set
  150. # any here unless the user did the work
  151. self.fill_match(
  152. "fp16.enabled",
  153. ((args.fp16 or args.fp16_full_eval) and fp16_backend == "amp"),
  154. "fp16|fp16_full_eval+fp16_backend(amp)",
  155. )
  156. # apex: delegates amp work to apex (which needs to be available), but it cannot be used with any
  157. # ZeRO features
  158. self.fill_match("amp.enabled", fp16_backend == "apex", "fp16+fp16_backend(apex)")
  159. self.fill_match("amp.opt_level", args.fp16_opt_level, "fp16_opt_level")
  160. self.fill_match("bf16.enabled", (args.bf16 or args.bf16_full_eval), "bf16|bf16_full_eval")
  161. # deepspeed's default mode is fp16 unless there is a config that says differently
  162. if self.is_true("bf16.enabled"):
  163. self._dtype = torch.bfloat16
  164. elif self.is_false("fp16.enabled"):
  165. self._dtype = torch.float32
  166. else:
  167. self._dtype = torch.float16
  168. def trainer_config_finalize(self, args, model, num_training_steps):
  169. """
  170. This stage is run after we have the model and know num_training_steps.
  171. Now we can complete the configuration process.
  172. """
  173. # zero
  174. # deal with config keys that use `auto` value and rely on model's hidden_size
  175. hidden_size_based_keys = [
  176. "zero_optimization.reduce_bucket_size",
  177. "zero_optimization.stage3_prefetch_bucket_size",
  178. "zero_optimization.stage3_param_persistence_threshold",
  179. ]
  180. hidden_size_auto_keys = [x for x in hidden_size_based_keys if self.is_auto(x)]
  181. if len(hidden_size_auto_keys) > 0:
  182. if hasattr(model.config, "hidden_size"):
  183. hidden_size = model.config.hidden_size
  184. elif hasattr(model.config, "hidden_sizes"):
  185. # if there are many hidden sizes pick the largest one
  186. hidden_size = max(model.config.hidden_sizes)
  187. elif hasattr(model.config, "text_config") and hasattr(model.config.text_config, "hidden_size"):
  188. hidden_size = model.config.text_config.hidden_size
  189. elif hasattr(model.config, "text_config") and hasattr(model.config.text_config, "hidden_sizes"):
  190. # if there are many hidden sizes pick the largest one
  191. hidden_size = max(model.config.text_config.hidden_sizes)
  192. else:
  193. raise ValueError(
  194. "The model's config file has neither `hidden_size` nor `hidden_sizes` entry, "
  195. "therefore it's not possible to automatically fill out the following `auto` entries "
  196. f"in the DeepSpeed config file: {hidden_size_auto_keys}. You can fix that by replacing "
  197. "`auto` values for these keys with an integer value of your choice."
  198. )
  199. self.fill_only("zero_optimization.reduce_bucket_size", hidden_size * hidden_size)
  200. if self.is_zero3():
  201. # automatically assign the optimal config values based on model config
  202. self.fill_only(
  203. "zero_optimization.stage3_prefetch_bucket_size",
  204. int(0.9 * hidden_size * hidden_size),
  205. )
  206. self.fill_only(
  207. "zero_optimization.stage3_param_persistence_threshold",
  208. 10 * hidden_size,
  209. )
  210. # scheduler
  211. self.fill_match(
  212. "scheduler.params.total_num_steps",
  213. num_training_steps,
  214. "num_training_steps (calculated)",
  215. )
  216. self.fill_match(
  217. "scheduler.params.warmup_num_steps",
  218. args.get_warmup_steps(num_training_steps),
  219. "warmup_steps",
  220. )
  221. if len(self.mismatches) > 0:
  222. mismatches = "\n".join(self.mismatches)
  223. raise ValueError(
  224. "Please correct the following DeepSpeed config values that mismatch TrainingArguments"
  225. f" values:\n{mismatches}\nThe easiest method is to set these DeepSpeed config values to 'auto'."
  226. )
  227. # keep the config object global to be able to access it anywhere during TrainingArguments life-cycle
  228. _hf_deepspeed_config_weak_ref = None
  229. def set_hf_deepspeed_config(hf_deepspeed_config_obj):
  230. # this is a special weakref global object to allow us to get to Deepspeed config from APIs
  231. # that don't have an easy way to get to the Deepspeed config outside of the Trainer domain.
  232. global _hf_deepspeed_config_weak_ref
  233. # will go away automatically when HfDeepSpeedConfig is destroyed (when TrainingArguments is destroyed)
  234. _hf_deepspeed_config_weak_ref = weakref.ref(hf_deepspeed_config_obj)
  235. def unset_hf_deepspeed_config():
  236. # useful for unit tests to ensure the global state doesn't leak - call from `tearDown` method
  237. global _hf_deepspeed_config_weak_ref
  238. _hf_deepspeed_config_weak_ref = None
  239. def is_deepspeed_zero3_enabled():
  240. if _hf_deepspeed_config_weak_ref is not None and _hf_deepspeed_config_weak_ref() is not None:
  241. return _hf_deepspeed_config_weak_ref().is_zero3()
  242. else:
  243. return False
  244. def deepspeed_config():
  245. if _hf_deepspeed_config_weak_ref is not None and _hf_deepspeed_config_weak_ref() is not None:
  246. return _hf_deepspeed_config_weak_ref().config
  247. else:
  248. return None
  249. def deepspeed_optim_sched(trainer, hf_deepspeed_config, args, num_training_steps, model_parameters):
  250. """
  251. A convenience wrapper that deals with optimizer and lr scheduler configuration.
  252. """
  253. from accelerate.utils import DummyOptim, DummyScheduler
  254. config = hf_deepspeed_config.config
  255. # Mixing and matching DS schedulers and optimizers is supported unless Offload is enabled in which case it's:
  256. # 1. DS scheduler + DS optimizer: Yes
  257. # 2. HF scheduler + HF optimizer: Mostly*
  258. # 3. DS scheduler + HF optimizer: Mostly*
  259. # 4. HF scheduler + DS optimizer: Yes
  260. #
  261. # Mostly*: All non-native DeepSpeed optimizers that have both CPU and GPU implementation should work (except LAMB)
  262. optimizer = None
  263. if "optimizer" in config:
  264. if args.adafactor:
  265. raise ValueError(
  266. "--adafactor was passed, but also found `optimizer` configured in the DeepSpeed config. "
  267. "Only one optimizer can be configured."
  268. )
  269. optimizer = DummyOptim(params=model_parameters)
  270. else:
  271. if hf_deepspeed_config.is_offload():
  272. logger.info(
  273. "Detected ZeRO Offload and non-DeepSpeed optimizers: This combination should work as long as the"
  274. " custom optimizer has both CPU and GPU implementation (except LAMB)"
  275. )
  276. # ds supports Adam, OneBitAdam, and Lamb optimizers and can import other optimizers from torch.
  277. # But trainer uses AdamW by default.
  278. optimizer = trainer.create_optimizer()
  279. # To use other optimizers requires voiding warranty with: `zero_allow_untested_optimizer`
  280. config["zero_allow_untested_optimizer"] = True
  281. lr_scheduler = None
  282. if "scheduler" in config:
  283. lr_scheduler = DummyScheduler(optimizer)
  284. else:
  285. if isinstance(optimizer, DummyOptim):
  286. def _lr_scheduler_callable(optimizer):
  287. # create a shallow copy first, so later modifications do not affect original trainer
  288. trainer_copy = copy.copy(trainer)
  289. # at the time _lr_scheduler_callable is called, trainer.lr_scheduler has been set
  290. # update it to None so that we can re-create a new scheduler
  291. trainer_copy.lr_scheduler = None
  292. lr_scheduler = trainer_copy.create_scheduler(
  293. num_training_steps=num_training_steps, optimizer=optimizer
  294. )
  295. return lr_scheduler
  296. lr_scheduler = DummyScheduler(optimizer, lr_scheduler_callable=_lr_scheduler_callable)
  297. else:
  298. lr_scheduler = trainer.create_scheduler(num_training_steps=num_training_steps, optimizer=optimizer)
  299. return optimizer, lr_scheduler
  300. def deepspeed_init(trainer, num_training_steps, inference=False):
  301. """
  302. Init DeepSpeed, after updating the DeepSpeed configuration with any relevant Trainer's args.
  303. If `resume_from_checkpoint` was passed then an attempt to resume from a previously saved checkpoint will be made.
  304. Args:
  305. trainer: Trainer object
  306. num_training_steps: per single gpu
  307. resume_from_checkpoint: path to a checkpoint if to resume from after normal DeepSpeedEngine load
  308. inference: launch in inference mode (no optimizer and no lr scheduler)
  309. auto_find_batch_size: whether to ignore the `train_micro_batch_size_per_gpu` argument as it's being
  310. set automatically by the auto batch size finder
  311. Returns: optimizer, lr_scheduler
  312. We may use `deepspeed_init` more than once during the life of Trainer, when we do - it's a temp hack based on:
  313. https://github.com/microsoft/DeepSpeed/issues/1394#issuecomment-937405374 until Deepspeed fixes a bug where it
  314. can't resume from a checkpoint after it did some stepping https://github.com/microsoft/DeepSpeed/issues/1612
  315. """
  316. from deepspeed.utils import logger as ds_logger
  317. model = trainer.model
  318. args = trainer.args
  319. hf_deepspeed_config = trainer.accelerator.state.deepspeed_plugin.hf_ds_config
  320. # resume config update - some bits like `model` and `num_training_steps` only become available during train
  321. hf_deepspeed_config.trainer_config_finalize(args, model, num_training_steps)
  322. # set the Deepspeed log level consistent with the Trainer
  323. ds_logger.setLevel(args.get_process_log_level())
  324. if inference:
  325. # only Z3 makes sense for the inference
  326. if not hf_deepspeed_config.is_zero3():
  327. raise ValueError("ZeRO inference only makes sense with ZeRO Stage 3 - please adjust your config")
  328. # in case the training config is re-used for inference
  329. hf_deepspeed_config.del_config_sub_tree("optimizer")
  330. hf_deepspeed_config.del_config_sub_tree("lr_scheduler")
  331. optimizer, lr_scheduler = None, None
  332. model_parameters = None
  333. else:
  334. trainer.optimizer = None # important for when deepspeed_init is used as re-init
  335. model_parameters = list(filter(lambda p: p.requires_grad, model.parameters()))
  336. optimizer, lr_scheduler = deepspeed_optim_sched(
  337. trainer, hf_deepspeed_config, args, num_training_steps, model_parameters
  338. )
  339. # keep for quick debug:
  340. # from pprint import pprint; pprint(config)
  341. return optimizer, lr_scheduler
  342. def deepspeed_load_checkpoint(deepspeed_engine, checkpoint_path, load_module_strict=True):
  343. # it's possible that the user is trying to resume from model_path, which doesn't necessarily
  344. # contain a deepspeed checkpoint. e.g. examples just check if the dir exists and assume it's
  345. # a resume from a checkpoint and not just a local pretrained weight. So we check here if the
  346. # path contains what looks like a deepspeed checkpoint
  347. import glob
  348. deepspeed_checkpoint_dirs = sorted(glob.glob(f"{checkpoint_path}/global_step*"))
  349. if len(deepspeed_checkpoint_dirs) > 0:
  350. logger.info(f"Attempting to resume from {checkpoint_path}")
  351. # this magically updates self.optimizer and self.lr_scheduler
  352. load_path, _ = deepspeed_engine.load_checkpoint(
  353. checkpoint_path,
  354. load_module_strict=load_module_strict,
  355. load_optimizer_states=True,
  356. load_lr_scheduler_states=True,
  357. )
  358. if load_path is None:
  359. raise ValueError(f"[deepspeed] failed to resume from checkpoint {checkpoint_path}")
  360. else:
  361. raise ValueError(f"Can't find a valid checkpoint at {checkpoint_path}")