quantizer_torchao.py 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import importlib
  15. from typing import TYPE_CHECKING, Union
  16. from packaging import version
  17. from .base import HfQuantizer
  18. from .quantizers_utils import get_module_from_name
  19. if TYPE_CHECKING:
  20. from ..modeling_utils import PreTrainedModel
  21. from typing import Any, Dict, List
  22. from ..utils import is_torch_available, is_torchao_available, logging
  23. if is_torch_available():
  24. import torch
  25. if is_torchao_available():
  26. from torchao.quantization import quantize_
  27. logger = logging.get_logger(__name__)
  28. # Finds the parent of a node module named "name"
  29. def find_parent(model, name):
  30. module_tree = name.split(".")[:-1]
  31. parent = model
  32. for m in module_tree:
  33. parent = parent._modules[m]
  34. return parent
  35. class TorchAoHfQuantizer(HfQuantizer):
  36. """
  37. Quantizer for torchao: https://github.com/pytorch/ao/
  38. """
  39. requires_parameters_quantization = True
  40. requires_calibration = False
  41. required_packages = ["torchao"]
  42. def __init__(self, quantization_config, **kwargs):
  43. super().__init__(quantization_config, **kwargs)
  44. def validate_environment(self, *args, **kwargs):
  45. if not is_torchao_available():
  46. raise ImportError("Loading an torchao quantized model requires torchao library (`pip install torchao`)")
  47. self.offload = False
  48. device_map = kwargs.get("device_map", None)
  49. if isinstance(device_map, dict):
  50. if "cpu" in device_map.values() or "disk" in device_map.values():
  51. if self.pre_quantized:
  52. raise ValueError(
  53. "You are attempting to perform cpu/disk offload with a pre-quantized torchao model "
  54. "This is not supported yet . Please remove the CPU or disk device from the device_map."
  55. )
  56. else:
  57. self.offload = True
  58. def update_torch_dtype(self, torch_dtype):
  59. if self.quantization_config.quant_type == "int4_weight_only":
  60. if torch_dtype is not None and torch_dtype != torch.bfloat16:
  61. logger.warning_once(
  62. f"Setting torch_dtype to {torch_dtype} for int4_weight_only quantization, but only bfloat16 is supported right now. Please set the torch_dtype to bfloat16."
  63. )
  64. if torch_dtype is None:
  65. logger.warning_once(
  66. "Setting torch_dtype to torch.bfloat16 for int4_weight_only quantization since only bfloat16 is supported right now. Please set torch_dtype=torch.bfloat16 to remove this warning."
  67. )
  68. torch_dtype = torch.bfloat16
  69. return torch_dtype
  70. def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype":
  71. if version.parse(importlib.metadata.version("accelerate")) > version.parse("0.19.0"):
  72. from accelerate.utils import CustomDtype
  73. map_to_target_dtype = {
  74. "int4_weight_only": CustomDtype.INT4,
  75. "int8_weight_only": torch.int8,
  76. "int8_dynamic_activation_int8_weight": torch.int8,
  77. }
  78. return map_to_target_dtype[self.quantization_config.quant_type]
  79. else:
  80. raise ValueError(
  81. "You are using `device_map='auto'` on a torchao quantized model. To automatically compute"
  82. " the appropriate device map, you should upgrade your `accelerate` library with "
  83. "`pip install --upgrade accelerate`"
  84. )
  85. def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]:
  86. # need more space for the quantization parameters (e.g. scale). Tested with int4 wo and group size = 128
  87. max_memory = {key: val * 0.9 for key, val in max_memory.items()}
  88. return max_memory
  89. def _process_model_before_weight_loading(self, model: "PreTrainedModel", **kwargs):
  90. from ..integrations import get_keys_to_not_convert
  91. self.modules_to_not_convert = get_keys_to_not_convert(model)
  92. if self.quantization_config.modules_to_not_convert is not None:
  93. self.modules_to_not_convert.extend(self.quantization_config.modules_to_not_convert)
  94. return
  95. def check_quantized_param(
  96. self,
  97. model: "PreTrainedModel",
  98. param_value: "torch.Tensor",
  99. param_name: str,
  100. state_dict: Dict[str, Any],
  101. **kwargs,
  102. ) -> bool:
  103. param_device = kwargs.pop("param_device", None)
  104. # check if the param_name is not in self.modules_to_not_convert
  105. if any((key + "." in param_name) or (key == param_name) for key in self.modules_to_not_convert):
  106. return False
  107. elif param_device == "cpu" and self.offload:
  108. # We don't quantize weights that we offload
  109. return False
  110. else:
  111. # we only quantize the weight of nn.Linear
  112. module, tensor_name = get_module_from_name(model, param_name)
  113. return isinstance(module, torch.nn.Linear) and (tensor_name == "weight")
  114. def create_quantized_param(
  115. self,
  116. model: "PreTrainedModel",
  117. param_value: "torch.Tensor",
  118. param_name: str,
  119. target_device: "torch.device",
  120. state_dict: Dict[str, Any],
  121. unexpected_keys: List[str],
  122. ):
  123. """
  124. Each nn.Linear layer that needs to be quantized is processsed here.
  125. First, we set the value the weight tensor, then we move it to the target device. Finally, we quantize the module.
  126. """
  127. module, tensor_name = get_module_from_name(model, param_name)
  128. module._parameters[tensor_name] = torch.nn.Parameter(param_value).to(device=target_device)
  129. quantize_(module, self.quantization_config.get_apply_tensor_subclass())
  130. def _process_model_after_weight_loading(self, model):
  131. """No process required for torchao quantized model"""
  132. return
  133. def is_serializable(self, safe_serialization=None):
  134. if safe_serialization:
  135. logger.warning(
  136. "torchao quantized model does not support safe serialization, "
  137. "please set `safe_serialization` to False"
  138. )
  139. return False
  140. _is_torchao_serializable = version.parse(importlib.metadata.version("huggingface_hub")) >= version.parse(
  141. "0.25.0"
  142. )
  143. if not _is_torchao_serializable:
  144. logger.warning("torchao quantized model is only serializable after huggingface_hub >= 0.25.0 ")
  145. return _is_torchao_serializable
  146. @property
  147. def is_trainable(self):
  148. supported_quant_types_for_training = [
  149. "int8_weight_only",
  150. "int8_dynamic_activation_int8_weight",
  151. ]
  152. return self.quantization_config.quant_type in supported_quant_types_for_training