quantizer_bitnet.py 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115
  1. # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from typing import TYPE_CHECKING, Dict, List, Union
  15. from .base import HfQuantizer
  16. if TYPE_CHECKING:
  17. from ..modeling_utils import PreTrainedModel
  18. from ..utils import is_accelerate_available, is_torch_available, logging
  19. if is_torch_available():
  20. import torch
  21. logger = logging.get_logger(__name__)
  22. class BitNetHfQuantizer(HfQuantizer):
  23. """
  24. 1.58-bit quantization from BitNet quantization method:
  25. Before loading: it converts the linear layers into BitLinear layers during loading.
  26. Checkout the paper introducing this method : https://arxiv.org/pdf/2402.17764
  27. """
  28. requires_parameters_quantization = False
  29. requires_calibration = True
  30. required_packages = ["accelerate"]
  31. def __init__(self, quantization_config, **kwargs):
  32. super().__init__(quantization_config, **kwargs)
  33. self.quantization_config = quantization_config
  34. def validate_environment(self, *args, **kwargs):
  35. if not is_accelerate_available():
  36. raise ImportError("Loading a BitNet quantized model requires accelerate (`pip install accelerate`)")
  37. if kwargs.get("from_tf", False) or kwargs.get("from_flax", False):
  38. raise ValueError(
  39. "Loading ternary weights from tf/flax is currently not supported, please make"
  40. " sure the weights are in PyTorch format."
  41. )
  42. if not torch.cuda.is_available():
  43. logger.warning_once(
  44. "You don't have a GPU available to load the model, the inference will be slow because of weight unpacking"
  45. )
  46. return
  47. device_map = kwargs.get("device_map", None)
  48. if device_map is None:
  49. logger.warning_once(
  50. "You have loaded a BitNet model on CPU and have a CUDA device available, make sure to set "
  51. "your model on a GPU device in order to run your model."
  52. )
  53. elif device_map is not None:
  54. if isinstance(device_map, dict) and ("cpu" in device_map.values() or "disk" in device_map.values()):
  55. raise ValueError(
  56. "You are attempting to load a BitNet model with a device_map that contains a CPU or disk device."
  57. "This is not supported. Please remove the CPU or disk device from the device_map."
  58. )
  59. def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
  60. return model
  61. def _process_model_before_weight_loading(
  62. self,
  63. model: "PreTrainedModel",
  64. device_map,
  65. keep_in_fp32_modules: List[str] = [],
  66. **kwargs,
  67. ):
  68. from ..integrations import get_keys_to_not_convert, replace_with_bitnet_linear
  69. self.modules_to_not_convert = get_keys_to_not_convert(model)
  70. if self.quantization_config.modules_to_not_convert is not None:
  71. self.modules_to_not_convert.extend(self.quantization_config.modules_to_not_convert)
  72. model = replace_with_bitnet_linear(
  73. model,
  74. modules_to_not_convert=self.modules_to_not_convert,
  75. quantization_config=self.quantization_config,
  76. pre_quantized=self.pre_quantized,
  77. )
  78. def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]:
  79. max_memory = {key: val * 0.90 for key, val in max_memory.items()}
  80. return max_memory
  81. def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype":
  82. target_dtype = torch.int8
  83. return target_dtype
  84. def is_serializable(self, safe_serialization=None):
  85. return True
  86. @property
  87. def is_trainable(self) -> bool:
  88. return False