quantizer_quanto.py 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219
  1. # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import importlib
  15. from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
  16. from packaging import version
  17. from .base import HfQuantizer
  18. from .quantizers_utils import get_module_from_name
  19. if TYPE_CHECKING:
  20. from ..modeling_utils import PreTrainedModel
  21. from ..utils import (
  22. is_accelerate_available,
  23. is_optimum_quanto_available,
  24. is_quanto_available,
  25. is_torch_available,
  26. logging,
  27. )
  28. from ..utils.quantization_config import QuantoConfig
  29. if is_torch_available():
  30. import torch
  31. logger = logging.get_logger(__name__)
  32. class QuantoHfQuantizer(HfQuantizer):
  33. """
  34. Quantizer for the quanto library
  35. """
  36. required_packages = ["quanto", "accelerate"]
  37. requires_parameters_quantization = True
  38. requires_calibration = False
  39. def __init__(self, quantization_config: QuantoConfig, **kwargs):
  40. super().__init__(quantization_config, **kwargs)
  41. self.post_init()
  42. def post_init(self):
  43. r"""
  44. Safety checker
  45. """
  46. if self.quantization_config.activations is not None and not self.pre_quantized:
  47. raise ValueError(
  48. "We don't support quantizing the activations with transformers library."
  49. "Use quanto library for more complex use cases such as activations quantization, calibration and quantization aware training."
  50. )
  51. def validate_environment(self, *args, **kwargs):
  52. if not (is_optimum_quanto_available() or is_quanto_available()):
  53. raise ImportError(
  54. "Loading an optimum-quanto quantized model requires optimum-quanto library (`pip install optimum-quanto`)"
  55. )
  56. if not is_accelerate_available():
  57. raise ImportError(
  58. "Loading an optimum-quanto quantized model requires accelerate library (`pip install accelerate`)"
  59. )
  60. def update_device_map(self, device_map):
  61. if device_map is None:
  62. device_map = {"": "cpu"}
  63. logger.info(
  64. "The device_map was not initialized. "
  65. "Setting device_map to {'':'cpu'}. "
  66. "If you want to use the model for inference, please set device_map ='auto'"
  67. )
  68. return device_map
  69. def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
  70. if torch_dtype is None:
  71. logger.info("You did not specify `torch_dtype` in `from_pretrained`. Setting it to `torch.float32`.")
  72. torch_dtype = torch.float32
  73. return torch_dtype
  74. def update_missing_keys(self, model, missing_keys: List[str], prefix: str) -> List[str]:
  75. if is_optimum_quanto_available():
  76. from optimum.quanto import QModuleMixin
  77. elif is_quanto_available():
  78. logger.warning_once(
  79. "Importing from quanto will be deprecated in v4.47. Please install optimum-quanto instrad `pip install optimum-quanto`"
  80. )
  81. from quanto import QModuleMixin
  82. not_missing_keys = []
  83. for name, module in model.named_modules():
  84. if isinstance(module, QModuleMixin):
  85. for missing in missing_keys:
  86. if (
  87. (name in missing or name in f"{prefix}.{missing}")
  88. and not missing.endswith(".weight")
  89. and not missing.endswith(".bias")
  90. ):
  91. not_missing_keys.append(missing)
  92. return [k for k in missing_keys if k not in not_missing_keys]
  93. def check_quantized_param(
  94. self,
  95. model: "PreTrainedModel",
  96. param_value: "torch.Tensor",
  97. param_name: str,
  98. state_dict: Dict[str, Any],
  99. **kwargs,
  100. ) -> bool:
  101. """
  102. Check if a parameter needs to be quantized.
  103. """
  104. if is_optimum_quanto_available():
  105. from optimum.quanto import QModuleMixin
  106. elif is_quanto_available():
  107. logger.warning_once(
  108. "Importing from quanto will be deprecated in v4.47. Please install optimum-quanto instrad `pip install optimum-quanto`"
  109. )
  110. from quanto import QModuleMixin
  111. device_map = kwargs.get("device_map", None)
  112. param_device = kwargs.get("param_device", None)
  113. # we don't quantize the model if the module is going to be offloaded to the cpu
  114. if device_map is not None and param_device is not None:
  115. device_map_values = set(device_map.values())
  116. if param_device == "cpu" and len(device_map_values) > 1:
  117. if not (device_map_values == {"cpu"} or device_map_values == {"cpu", "disk"}):
  118. return False
  119. module, tensor_name = get_module_from_name(model, param_name)
  120. # We only quantize the weights and the bias is not quantized.
  121. if isinstance(module, QModuleMixin) and "weight" in tensor_name:
  122. # if the weights are quantized, don't need to recreate it again with `create_quantized_param`
  123. return not module.frozen
  124. else:
  125. return False
  126. def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]:
  127. max_memory = {key: val * 0.90 for key, val in max_memory.items()}
  128. return max_memory
  129. def create_quantized_param(
  130. self,
  131. model: "PreTrainedModel",
  132. param_value: "torch.Tensor",
  133. param_name: str,
  134. target_device: "torch.device",
  135. *args,
  136. **kwargs,
  137. ):
  138. """
  139. Create the quantized parameter by calling .freeze() after setting it to the module.
  140. """
  141. from accelerate.utils import set_module_tensor_to_device
  142. set_module_tensor_to_device(model, param_name, target_device, param_value)
  143. module, _ = get_module_from_name(model, param_name)
  144. module.freeze()
  145. module.weight.requires_grad = False
  146. def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype":
  147. if version.parse(importlib.metadata.version("accelerate")) > version.parse("0.27.0"):
  148. from accelerate.utils import CustomDtype
  149. mapping = {
  150. "int8": torch.int8,
  151. "float8": CustomDtype.FP8,
  152. "int4": CustomDtype.INT4,
  153. "int2": CustomDtype.INT2,
  154. }
  155. target_dtype = mapping[self.quantization_config.weights]
  156. return target_dtype
  157. else:
  158. raise ValueError(
  159. "You are using `device_map='auto'` on an optimum-quanto quantized model. To automatically compute"
  160. " the appropriate device map, you should upgrade your `accelerate` library,"
  161. "`pip install --upgrade accelerate` or install it from source."
  162. )
  163. def _process_model_before_weight_loading(
  164. self, model: "PreTrainedModel", keep_in_fp32_modules: List[str] = [], **kwargs
  165. ):
  166. from ..integrations import get_keys_to_not_convert, replace_with_quanto_layers
  167. # We keep some modules such as the lm_head in their original dtype for numerical stability reasons
  168. if self.quantization_config.modules_to_not_convert is None:
  169. self.modules_to_not_convert = get_keys_to_not_convert(model)
  170. else:
  171. self.modules_to_not_convert = self.quantization_config.modules_to_not_convert
  172. if not isinstance(self.modules_to_not_convert, list):
  173. self.modules_to_not_convert = [self.modules_to_not_convert]
  174. self.modules_to_not_convert.extend(keep_in_fp32_modules)
  175. model, _ = replace_with_quanto_layers(
  176. model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config
  177. )
  178. model.config.quantization_config = self.quantization_config
  179. def _process_model_after_weight_loading(self, model):
  180. return model
  181. @property
  182. def is_trainable(self, model: Optional["PreTrainedModel"] = None):
  183. return True
  184. def is_serializable(self, safe_serialization=None):
  185. return False