quantizer_awq.py 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147
  1. # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import importlib.metadata
  15. from typing import TYPE_CHECKING
  16. from packaging import version
  17. from .base import HfQuantizer
  18. if TYPE_CHECKING:
  19. from ..modeling_utils import PreTrainedModel
  20. from ..utils import is_accelerate_available, is_auto_awq_available, is_torch_available, logging
  21. from ..utils.quantization_config import AWQLinearVersion
  22. if is_torch_available():
  23. import torch
  24. logger = logging.get_logger(__name__)
  25. class AwqQuantizer(HfQuantizer):
  26. """
  27. 4-bit quantization for Activation-aware Weight Quantization(AWQ) (https://arxiv.org/abs/2306.00978)
  28. """
  29. # AWQ requires data callibration - we support only inference
  30. requires_calibration = True
  31. required_packages = ["awq", "accelerate"]
  32. def __init__(self, quantization_config, **kwargs):
  33. super().__init__(quantization_config, **kwargs)
  34. def validate_environment(self, device_map, **kwargs):
  35. if not is_auto_awq_available():
  36. raise ImportError("Loading an AWQ quantized model requires auto-awq library (`pip install autoawq`)")
  37. if not is_accelerate_available():
  38. raise ImportError("Loading an AWQ quantized model requires accelerate (`pip install accelerate`)")
  39. if self.quantization_config.version == AWQLinearVersion.IPEX:
  40. if version.parse(importlib.metadata.version("autoawq")) < version.parse("0.2.6"):
  41. raise RuntimeError(
  42. "To use IPEX backend, you need autoawq>0.6.2. Please install the latest version or from source."
  43. )
  44. if (
  45. device_map is not None
  46. and isinstance(device_map, dict)
  47. and (torch.device("cpu") not in device_map.values() or len(device_map.values()) > 1)
  48. ):
  49. raise ValueError(
  50. "You are attempting to load an IPEX version AWQ model with a device_map that contains more than CPU."
  51. " This is not supported. Please make sure only cpu in the device_map."
  52. )
  53. else:
  54. if not torch.cuda.is_available():
  55. raise RuntimeError(
  56. "GPU is required to run AWQ quantized model. You can use IPEX version AWQ if you have an Intel CPU"
  57. )
  58. if device_map is None:
  59. logger.warning_once(
  60. "You have loaded an AWQ model on CPU and have a CUDA device available, make sure to set "
  61. "your model on a GPU device in order to run your model."
  62. )
  63. elif device_map is not None:
  64. if isinstance(device_map, dict) and ("cpu" in device_map.values() or "disk" in device_map.values()):
  65. raise ValueError(
  66. "You are attempting to load an AWQ model with a device_map that contains a CPU or disk device."
  67. " This is not supported. Please remove the CPU or disk device from the device_map."
  68. )
  69. def update_torch_dtype(self, torch_dtype):
  70. if torch_dtype is None:
  71. torch_dtype = torch.float16
  72. elif torch_dtype != torch.float16:
  73. logger.warning("We suggest you to set `torch_dtype=torch.float16` for better efficiency with AWQ.")
  74. return torch_dtype
  75. def _process_model_before_weight_loading(self, model: "PreTrainedModel", **kwargs):
  76. from ..integrations import get_keys_to_not_convert, replace_quantization_scales, replace_with_awq_linear
  77. self.modules_to_not_convert = get_keys_to_not_convert(model)
  78. if self.quantization_config.modules_to_not_convert is not None:
  79. self.modules_to_not_convert.extend(self.quantization_config.modules_to_not_convert)
  80. model, has_been_replaced = replace_with_awq_linear(
  81. model, quantization_config=self.quantization_config, modules_to_not_convert=self.modules_to_not_convert
  82. )
  83. model = replace_quantization_scales(model, model.config.model_type)
  84. if not has_been_replaced:
  85. logger.warning(
  86. "You are loading an AWQ model but no linear modules were found in your model."
  87. " Please double check your model architecture, or submit an issue on github if you think this is a bug."
  88. )
  89. def _process_model_after_weight_loading(self, model):
  90. if self.quantization_config.do_fuse:
  91. from ..integrations import fuse_awq_modules
  92. model = fuse_awq_modules(model, self.quantization_config)
  93. model._awq_is_fused = True # TODO: consider storing this flag in model.config instead
  94. if self.quantization_config.version == AWQLinearVersion.EXLLAMA:
  95. from ..integrations import post_init_awq_exllama_modules
  96. model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config)
  97. if self.quantization_config.version == AWQLinearVersion.IPEX:
  98. from ..integrations import post_init_awq_ipex_modules
  99. model = post_init_awq_ipex_modules(model)
  100. def is_serializable(self, safe_serialization=None):
  101. # AWQ through auto-awq has been always serializable, except if the model is fused.
  102. if self.quantization_config.do_fuse:
  103. logger.warning("You cannot save an AWQ model that uses fused modules!")
  104. return False
  105. if self.quantization_config.version == AWQLinearVersion.EXLLAMA:
  106. logger.warning("You cannot save an AWQ model that uses Exllama backend!")
  107. return False
  108. return True
  109. @property
  110. def is_trainable(self):
  111. # AWQ supports PEFT fine-tuning from version 0.2.0
  112. MIN_AWQ_VERSION_FOR_PEFT = "0.2.0"
  113. return version.parse(importlib.metadata.version("autoawq")) >= version.parse(MIN_AWQ_VERSION_FOR_PEFT)