peft.py 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503
  1. # Copyright 2023 The HuggingFace Team. All rights reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import importlib
  15. import inspect
  16. import warnings
  17. from typing import Any, Dict, List, Optional, Union
  18. from packaging import version
  19. from ..utils import (
  20. check_peft_version,
  21. find_adapter_config_file,
  22. is_accelerate_available,
  23. is_peft_available,
  24. is_torch_available,
  25. logging,
  26. )
  27. if is_torch_available():
  28. import torch
  29. if is_accelerate_available():
  30. from accelerate import dispatch_model
  31. from accelerate.utils import get_balanced_memory, infer_auto_device_map
  32. # Minimum PEFT version supported for the integration
  33. MIN_PEFT_VERSION = "0.5.0"
  34. logger = logging.get_logger(__name__)
  35. class PeftAdapterMixin:
  36. """
  37. A class containing all functions for loading and using adapters weights that are supported in PEFT library. For
  38. more details about adapters and injecting them on a transformer-based model, check out the documentation of PEFT
  39. library: https://huggingface.co/docs/peft/index
  40. Currently supported PEFT methods are all non-prefix tuning methods. Below is the list of supported PEFT methods
  41. that anyone can load, train and run with this mixin class:
  42. - Low Rank Adapters (LoRA): https://huggingface.co/docs/peft/conceptual_guides/lora
  43. - IA3: https://huggingface.co/docs/peft/conceptual_guides/ia3
  44. - AdaLora: https://arxiv.org/abs/2303.10512
  45. Other PEFT models such as prompt tuning, prompt learning are out of scope as these adapters are not "injectable"
  46. into a torch module. For using these methods, please refer to the usage guide of PEFT library.
  47. With this mixin, if the correct PEFT version is installed, it is possible to:
  48. - Load an adapter stored on a local path or in a remote Hub repository, and inject it in the model
  49. - Attach new adapters in the model and train them with Trainer or by your own.
  50. - Attach multiple adapters and iteratively activate / deactivate them
  51. - Activate / deactivate all adapters from the model.
  52. - Get the `state_dict` of the active adapter.
  53. """
  54. _hf_peft_config_loaded = False
  55. def load_adapter(
  56. self,
  57. peft_model_id: Optional[str] = None,
  58. adapter_name: Optional[str] = None,
  59. revision: Optional[str] = None,
  60. token: Optional[str] = None,
  61. device_map: Optional[str] = "auto",
  62. max_memory: Optional[str] = None,
  63. offload_folder: Optional[str] = None,
  64. offload_index: Optional[int] = None,
  65. peft_config: Dict[str, Any] = None,
  66. adapter_state_dict: Optional[Dict[str, "torch.Tensor"]] = None,
  67. low_cpu_mem_usage: bool = False,
  68. adapter_kwargs: Optional[Dict[str, Any]] = None,
  69. ) -> None:
  70. """
  71. Load adapter weights from file or remote Hub folder. If you are not familiar with adapters and PEFT methods, we
  72. invite you to read more about them on PEFT official documentation: https://huggingface.co/docs/peft
  73. Requires peft as a backend to load the adapter weights.
  74. Args:
  75. peft_model_id (`str`, *optional*):
  76. The identifier of the model to look for on the Hub, or a local path to the saved adapter config file
  77. and adapter weights.
  78. adapter_name (`str`, *optional*):
  79. The adapter name to use. If not set, will use the default adapter.
  80. revision (`str`, *optional*, defaults to `"main"`):
  81. The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
  82. git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
  83. identifier allowed by git.
  84. <Tip>
  85. To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>"`.
  86. </Tip>
  87. token (`str`, `optional`):
  88. Whether to use authentication token to load the remote folder. Userful to load private repositories
  89. that are on HuggingFace Hub. You might need to call `huggingface-cli login` and paste your tokens to
  90. cache it.
  91. device_map (`str` or `Dict[str, Union[int, str, torch.device]]` or `int` or `torch.device`, *optional*):
  92. A map that specifies where each submodule should go. It doesn't need to be refined to each
  93. parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the
  94. same device. If we only pass the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank
  95. like `1`) on which the model will be allocated, the device map will map the entire model to this
  96. device. Passing `device_map = 0` means put the whole model on GPU 0.
  97. To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For
  98. more information about each option see [designing a device
  99. map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).
  100. max_memory (`Dict`, *optional*):
  101. A dictionary device identifier to maximum memory. Will default to the maximum memory available for each
  102. GPU and the available CPU RAM if unset.
  103. offload_folder (`str` or `os.PathLike`, `optional`):
  104. If the `device_map` contains any value `"disk"`, the folder where we will offload weights.
  105. offload_index (`int`, `optional`):
  106. `offload_index` argument to be passed to `accelerate.dispatch_model` method.
  107. peft_config (`Dict[str, Any]`, *optional*):
  108. The configuration of the adapter to add, supported adapters are non-prefix tuning and adaption prompts
  109. methods. This argument is used in case users directly pass PEFT state dicts
  110. adapter_state_dict (`Dict[str, torch.Tensor]`, *optional*):
  111. The state dict of the adapter to load. This argument is used in case users directly pass PEFT state
  112. dicts
  113. low_cpu_mem_usage (`bool`, *optional*, defaults to `False`):
  114. Reduce memory usage while loading the PEFT adapter. This should also speed up the loading process.
  115. Requires PEFT version 0.13.0 or higher.
  116. adapter_kwargs (`Dict[str, Any]`, *optional*):
  117. Additional keyword arguments passed along to the `from_pretrained` method of the adapter config and
  118. `find_adapter_config_file` method.
  119. """
  120. check_peft_version(min_version=MIN_PEFT_VERSION)
  121. # peft only supports low_cpu_mem_usage starting from v0.13.0
  122. peft_load_kwargs = {}
  123. if low_cpu_mem_usage:
  124. min_version_lcmu = "0.13.0"
  125. if version.parse(importlib.metadata.version("peft")) >= version.parse(min_version_lcmu):
  126. peft_load_kwargs["low_cpu_mem_usage"] = low_cpu_mem_usage
  127. else:
  128. raise ValueError(
  129. "The version of PEFT you are using does not support `low_cpu_mem_usage` yet, "
  130. f"please install PEFT >= {min_version_lcmu}."
  131. )
  132. adapter_name = adapter_name if adapter_name is not None else "default"
  133. if adapter_kwargs is None:
  134. adapter_kwargs = {}
  135. from peft import PeftConfig, inject_adapter_in_model, load_peft_weights
  136. from peft.utils import set_peft_model_state_dict
  137. if self._hf_peft_config_loaded and adapter_name in self.peft_config:
  138. raise ValueError(f"Adapter with name {adapter_name} already exists. Please use a different name.")
  139. if peft_model_id is None and (adapter_state_dict is None and peft_config is None):
  140. raise ValueError(
  141. "You should either pass a `peft_model_id` or a `peft_config` and `adapter_state_dict` to load an adapter."
  142. )
  143. if "device" not in adapter_kwargs:
  144. device = self.device if not hasattr(self, "hf_device_map") else list(self.hf_device_map.values())[0]
  145. else:
  146. device = adapter_kwargs.pop("device")
  147. # To avoid PEFT errors later on with safetensors.
  148. if isinstance(device, torch.device):
  149. device = str(device)
  150. # We keep `revision` in the signature for backward compatibility
  151. if revision is not None and "revision" not in adapter_kwargs:
  152. adapter_kwargs["revision"] = revision
  153. elif revision is not None and "revision" in adapter_kwargs and revision != adapter_kwargs["revision"]:
  154. logger.error(
  155. "You passed a `revision` argument both in `adapter_kwargs` and as a standalone argument. "
  156. "The one in `adapter_kwargs` will be used."
  157. )
  158. # Override token with adapter_kwargs' token
  159. if "token" in adapter_kwargs:
  160. token = adapter_kwargs.pop("token")
  161. if peft_config is None:
  162. adapter_config_file = find_adapter_config_file(
  163. peft_model_id,
  164. token=token,
  165. **adapter_kwargs,
  166. )
  167. if adapter_config_file is None:
  168. raise ValueError(
  169. f"adapter model file not found in {peft_model_id}. Make sure you are passing the correct path to the "
  170. "adapter model."
  171. )
  172. peft_config = PeftConfig.from_pretrained(
  173. peft_model_id,
  174. token=token,
  175. **adapter_kwargs,
  176. )
  177. # Create and add fresh new adapters into the model.
  178. inject_adapter_in_model(peft_config, self, adapter_name, **peft_load_kwargs)
  179. if not self._hf_peft_config_loaded:
  180. self._hf_peft_config_loaded = True
  181. if peft_model_id is not None:
  182. adapter_state_dict = load_peft_weights(peft_model_id, token=token, device=device, **adapter_kwargs)
  183. # We need to pre-process the state dict to remove unneeded prefixes - for backward compatibility
  184. processed_adapter_state_dict = {}
  185. prefix = "base_model.model."
  186. for key, value in adapter_state_dict.items():
  187. if key.startswith(prefix):
  188. new_key = key[len(prefix) :]
  189. else:
  190. new_key = key
  191. processed_adapter_state_dict[new_key] = value
  192. # Load state dict
  193. incompatible_keys = set_peft_model_state_dict(
  194. self, processed_adapter_state_dict, adapter_name, **peft_load_kwargs
  195. )
  196. if incompatible_keys is not None:
  197. # check only for unexpected keys
  198. if hasattr(incompatible_keys, "unexpected_keys") and len(incompatible_keys.unexpected_keys) > 0:
  199. logger.warning(
  200. f"Loading adapter weights from {peft_model_id} led to unexpected keys not found in the model: "
  201. f" {incompatible_keys.unexpected_keys}. "
  202. )
  203. # Re-dispatch model and hooks in case the model is offloaded to CPU / Disk.
  204. if (
  205. (getattr(self, "hf_device_map", None) is not None)
  206. and (len(set(self.hf_device_map.values()).intersection({"cpu", "disk"})) > 0)
  207. and len(self.peft_config) == 1
  208. ):
  209. self._dispatch_accelerate_model(
  210. device_map=device_map,
  211. max_memory=max_memory,
  212. offload_folder=offload_folder,
  213. offload_index=offload_index,
  214. )
  215. def add_adapter(self, adapter_config, adapter_name: Optional[str] = None) -> None:
  216. r"""
  217. If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT
  218. official documentation: https://huggingface.co/docs/peft
  219. Adds a fresh new adapter to the current model for training purpose. If no adapter name is passed, a default
  220. name is assigned to the adapter to follow the convention of PEFT library (in PEFT we use "default" as the
  221. default adapter name).
  222. Args:
  223. adapter_config (`~peft.PeftConfig`):
  224. The configuration of the adapter to add, supported adapters are non-prefix tuning and adaption prompts
  225. methods
  226. adapter_name (`str`, *optional*, defaults to `"default"`):
  227. The name of the adapter to add. If no name is passed, a default name is assigned to the adapter.
  228. """
  229. check_peft_version(min_version=MIN_PEFT_VERSION)
  230. from peft import PeftConfig, inject_adapter_in_model
  231. adapter_name = adapter_name or "default"
  232. if not self._hf_peft_config_loaded:
  233. self._hf_peft_config_loaded = True
  234. elif adapter_name in self.peft_config:
  235. raise ValueError(f"Adapter with name {adapter_name} already exists. Please use a different name.")
  236. if not isinstance(adapter_config, PeftConfig):
  237. raise TypeError(f"adapter_config should be an instance of PeftConfig. Got {type(adapter_config)} instead.")
  238. # Retrieve the name or path of the model, one could also use self.config._name_or_path
  239. # but to be consistent with what we do in PEFT: https://github.com/huggingface/peft/blob/6e783780ca9df3a623992cc4d1d665001232eae0/src/peft/mapping.py#L100
  240. adapter_config.base_model_name_or_path = self.__dict__.get("name_or_path", None)
  241. inject_adapter_in_model(adapter_config, self, adapter_name)
  242. self.set_adapter(adapter_name)
  243. def set_adapter(self, adapter_name: Union[List[str], str]) -> None:
  244. """
  245. If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT
  246. official documentation: https://huggingface.co/docs/peft
  247. Sets a specific adapter by forcing the model to use a that adapter and disable the other adapters.
  248. Args:
  249. adapter_name (`Union[List[str], str]`):
  250. The name of the adapter to set. Can be also a list of strings to set multiple adapters.
  251. """
  252. check_peft_version(min_version=MIN_PEFT_VERSION)
  253. if not self._hf_peft_config_loaded:
  254. raise ValueError("No adapter loaded. Please load an adapter first.")
  255. elif isinstance(adapter_name, list):
  256. missing = set(adapter_name) - set(self.peft_config)
  257. if len(missing) > 0:
  258. raise ValueError(
  259. f"Following adapter(s) could not be found: {', '.join(missing)}. Make sure you are passing the correct adapter name(s)."
  260. f" current loaded adapters are: {list(self.peft_config.keys())}"
  261. )
  262. elif adapter_name not in self.peft_config:
  263. raise ValueError(
  264. f"Adapter with name {adapter_name} not found. Please pass the correct adapter name among {list(self.peft_config.keys())}"
  265. )
  266. from peft.tuners.tuners_utils import BaseTunerLayer
  267. from peft.utils import ModulesToSaveWrapper
  268. _adapters_has_been_set = False
  269. for _, module in self.named_modules():
  270. if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)):
  271. # For backward compatbility with previous PEFT versions
  272. if hasattr(module, "set_adapter"):
  273. module.set_adapter(adapter_name)
  274. else:
  275. module.active_adapter = adapter_name
  276. _adapters_has_been_set = True
  277. if not _adapters_has_been_set:
  278. raise ValueError(
  279. "Did not succeeded in setting the adapter. Please make sure you are using a model that supports adapters."
  280. )
  281. def disable_adapters(self) -> None:
  282. r"""
  283. If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT
  284. official documentation: https://huggingface.co/docs/peft
  285. Disable all adapters that are attached to the model. This leads to inferring with the base model only.
  286. """
  287. check_peft_version(min_version=MIN_PEFT_VERSION)
  288. if not self._hf_peft_config_loaded:
  289. raise ValueError("No adapter loaded. Please load an adapter first.")
  290. from peft.tuners.tuners_utils import BaseTunerLayer
  291. from peft.utils import ModulesToSaveWrapper
  292. for _, module in self.named_modules():
  293. if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)):
  294. # The recent version of PEFT need to call `enable_adapters` instead
  295. if hasattr(module, "enable_adapters"):
  296. module.enable_adapters(enabled=False)
  297. else:
  298. module.disable_adapters = True
  299. def enable_adapters(self) -> None:
  300. """
  301. If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT
  302. official documentation: https://huggingface.co/docs/peft
  303. Enable adapters that are attached to the model. The model will use `self.active_adapter()`
  304. """
  305. check_peft_version(min_version=MIN_PEFT_VERSION)
  306. if not self._hf_peft_config_loaded:
  307. raise ValueError("No adapter loaded. Please load an adapter first.")
  308. from peft.tuners.tuners_utils import BaseTunerLayer
  309. for _, module in self.named_modules():
  310. if isinstance(module, BaseTunerLayer):
  311. # The recent version of PEFT need to call `enable_adapters` instead
  312. if hasattr(module, "enable_adapters"):
  313. module.enable_adapters(enabled=True)
  314. else:
  315. module.disable_adapters = False
  316. def active_adapters(self) -> List[str]:
  317. """
  318. If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT
  319. official documentation: https://huggingface.co/docs/peft
  320. Gets the current active adapters of the model. In case of multi-adapter inference (combining multiple adapters
  321. for inference) returns the list of all active adapters so that users can deal with them accordingly.
  322. For previous PEFT versions (that does not support multi-adapter inference), `module.active_adapter` will return
  323. a single string.
  324. """
  325. check_peft_version(min_version=MIN_PEFT_VERSION)
  326. if not is_peft_available():
  327. raise ImportError("PEFT is not available. Please install PEFT to use this function: `pip install peft`.")
  328. if not self._hf_peft_config_loaded:
  329. raise ValueError("No adapter loaded. Please load an adapter first.")
  330. from peft.tuners.tuners_utils import BaseTunerLayer
  331. for _, module in self.named_modules():
  332. if isinstance(module, BaseTunerLayer):
  333. active_adapters = module.active_adapter
  334. break
  335. # For previous PEFT versions
  336. if isinstance(active_adapters, str):
  337. active_adapters = [active_adapters]
  338. return active_adapters
  339. def active_adapter(self) -> str:
  340. warnings.warn(
  341. "The `active_adapter` method is deprecated and will be removed in a future version.", FutureWarning
  342. )
  343. return self.active_adapters()[0]
  344. def get_adapter_state_dict(self, adapter_name: Optional[str] = None) -> dict:
  345. """
  346. If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT
  347. official documentation: https://huggingface.co/docs/peft
  348. Gets the adapter state dict that should only contain the weights tensors of the specified adapter_name adapter.
  349. If no adapter_name is passed, the active adapter is used.
  350. Args:
  351. adapter_name (`str`, *optional*):
  352. The name of the adapter to get the state dict from. If no name is passed, the active adapter is used.
  353. """
  354. check_peft_version(min_version=MIN_PEFT_VERSION)
  355. if not self._hf_peft_config_loaded:
  356. raise ValueError("No adapter loaded. Please load an adapter first.")
  357. from peft import get_peft_model_state_dict
  358. if adapter_name is None:
  359. adapter_name = self.active_adapter()
  360. adapter_state_dict = get_peft_model_state_dict(self, adapter_name=adapter_name)
  361. return adapter_state_dict
  362. def _dispatch_accelerate_model(
  363. self,
  364. device_map: str,
  365. max_memory: Optional[int] = None,
  366. offload_folder: Optional[str] = None,
  367. offload_index: Optional[int] = None,
  368. ) -> None:
  369. """
  370. Optional re-dispatch the model and attach new hooks to the model in case the model has been loaded with
  371. accelerate (i.e. with `device_map=xxx`)
  372. Args:
  373. device_map (`str` or `Dict[str, Union[int, str, torch.device]]` or `int` or `torch.device`, *optional*):
  374. A map that specifies where each submodule should go. It doesn't need to be refined to each
  375. parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the
  376. same device. If we only pass the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank
  377. like `1`) on which the model will be allocated, the device map will map the entire model to this
  378. device. Passing `device_map = 0` means put the whole model on GPU 0.
  379. To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For
  380. more information about each option see [designing a device
  381. map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).
  382. max_memory (`Dict`, *optional*):
  383. A dictionary device identifier to maximum memory. Will default to the maximum memory available for each
  384. GPU and the available CPU RAM if unset.
  385. offload_folder (`str` or `os.PathLike`, *optional*):
  386. If the `device_map` contains any value `"disk"`, the folder where we will offload weights.
  387. offload_index (`int`, *optional*):
  388. The offload_index argument to be passed to `accelerate.dispatch_model` method.
  389. """
  390. dispatch_model_kwargs = {}
  391. # Safety checker for previous `accelerate` versions
  392. # `offload_index` was introduced in https://github.com/huggingface/accelerate/pull/873/
  393. if "offload_index" in inspect.signature(dispatch_model).parameters:
  394. dispatch_model_kwargs["offload_index"] = offload_index
  395. no_split_module_classes = self._no_split_modules
  396. if device_map != "sequential":
  397. max_memory = get_balanced_memory(
  398. self,
  399. max_memory=max_memory,
  400. no_split_module_classes=no_split_module_classes,
  401. low_zero=(device_map == "balanced_low_0"),
  402. )
  403. if isinstance(device_map, str):
  404. device_map = infer_auto_device_map(
  405. self, max_memory=max_memory, no_split_module_classes=no_split_module_classes
  406. )
  407. dispatch_model(
  408. self,
  409. device_map=device_map,
  410. offload_dir=offload_folder,
  411. **dispatch_model_kwargs,
  412. )