quantizer_gptq.py 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293
  1. # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import importlib
  15. from typing import TYPE_CHECKING, Optional
  16. from packaging import version
  17. from .base import HfQuantizer
  18. if TYPE_CHECKING:
  19. from ..modeling_utils import PreTrainedModel
  20. from ..utils import is_auto_gptq_available, is_optimum_available, is_torch_available, logging
  21. from ..utils.quantization_config import GPTQConfig, QuantizationConfigMixin
  22. if is_torch_available():
  23. import torch
  24. logger = logging.get_logger(__name__)
  25. class GptqHfQuantizer(HfQuantizer):
  26. """
  27. Quantizer of the GPTQ method - for GPTQ the quantizer support calibration of the model through
  28. `auto_gptq` package. Quantization is done under the hood for users if they load a non-prequantized model.
  29. """
  30. requires_calibration = False
  31. required_packages = ["optimum", "auto_gptq"]
  32. optimum_quantizer = None
  33. def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs):
  34. super().__init__(quantization_config, **kwargs)
  35. from optimum.gptq import GPTQQuantizer
  36. self.optimum_quantizer = GPTQQuantizer.from_dict(self.quantization_config.to_dict_optimum())
  37. def validate_environment(self, *args, **kwargs):
  38. gptq_supports_cpu = version.parse(importlib.metadata.version("auto-gptq")) > version.parse("0.4.2")
  39. if not gptq_supports_cpu and not torch.cuda.is_available():
  40. raise RuntimeError("GPU is required to quantize or run quantize model.")
  41. elif not (is_optimum_available() and is_auto_gptq_available()):
  42. raise ImportError(
  43. "Loading a GPTQ quantized model requires optimum (`pip install optimum`) and auto-gptq library (`pip install auto-gptq`)"
  44. )
  45. elif version.parse(importlib.metadata.version("auto_gptq")) < version.parse("0.4.2"):
  46. raise ImportError(
  47. "You need a version of auto_gptq >= 0.4.2 to use GPTQ: `pip install --upgrade auto-gptq`"
  48. )
  49. def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
  50. if torch_dtype is None:
  51. torch_dtype = torch.float16
  52. elif torch_dtype != torch.float16:
  53. logger.info("We suggest you to set `torch_dtype=torch.float16` for better efficiency with GPTQ.")
  54. return torch_dtype
  55. def _process_model_before_weight_loading(self, model: "PreTrainedModel", **kwargs):
  56. if model.__class__.main_input_name != "input_ids":
  57. raise RuntimeError("We can only quantize pure text model.")
  58. if self.pre_quantized:
  59. model = self.optimum_quantizer.convert_model(model)
  60. def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
  61. if self.pre_quantized:
  62. model = self.optimum_quantizer.post_init_model(model)
  63. else:
  64. if self.quantization_config.tokenizer is None:
  65. self.quantization_config.tokenizer = model.name_or_path
  66. self.optimum_quantizer.quantize_model(model, self.quantization_config.tokenizer)
  67. model.config.quantization_config = GPTQConfig.from_dict(self.optimum_quantizer.to_dict())
  68. @property
  69. def is_trainable(self, model: Optional["PreTrainedModel"] = None):
  70. return True
  71. def is_serializable(self, safe_serialization=None):
  72. return True