text
stringlengths 1
1.02k
| class_index
int64 0
10.8k
| source
stringlengths 85
188
|
---|---|---|
def check_quantized_param(
self,
model: "PreTrainedModel",
param_value: "torch.Tensor",
param_name: str,
state_dict: Dict[str, Any],
**kwargs,
) -> bool:
param_device = kwargs.pop("param_device", None)
# check if the param_name is not in self.modules_to_not_convert
if any((key + "." in param_name) or (key == param_name) for key in self.modules_to_not_convert):
return False
elif param_device == "cpu" and self.offload:
# We don't quantize weights that we offload
return False
else:
# we only quantize the weight of nn.Linear
module, tensor_name = get_module_from_name(model, param_name)
return isinstance(module, torch.nn.Linear) and (tensor_name == "weight")
| 345 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_torchao.py
|
def create_quantized_param(
self,
model: "PreTrainedModel",
param_value: "torch.Tensor",
param_name: str,
target_device: "torch.device",
state_dict: Dict[str, Any],
unexpected_keys: List[str],
):
"""
Each nn.Linear layer that needs to be quantized is processsed here.
First, we set the value the weight tensor, then we move it to the target device. Finally, we quantize the module.
"""
from torchao.quantization import quantize_
module, tensor_name = get_module_from_name(model, param_name)
| 345 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_torchao.py
|
if self.pre_quantized:
module._parameters[tensor_name] = torch.nn.Parameter(param_value.to(device=target_device))
if isinstance(module, nn.Linear):
module.extra_repr = types.MethodType(_linear_extra_repr, module)
else:
module._parameters[tensor_name] = torch.nn.Parameter(param_value).to(device=target_device)
quantize_(module, self.quantization_config.get_apply_tensor_subclass())
def _process_model_after_weight_loading(self, model, **kwargs):
"""No process required for torchao quantized model"""
return
| 345 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_torchao.py
|
def is_serializable(self, safe_serialization=None):
if safe_serialization:
logger.warning(
"torchao quantized model does not support safe serialization, "
"please set `safe_serialization` to False"
)
return False
_is_torchao_serializable = version.parse(importlib.metadata.version("huggingface_hub")) >= version.parse(
"0.25.0"
)
if not _is_torchao_serializable:
logger.warning("torchao quantized model is only serializable after huggingface_hub >= 0.25.0 ")
if self.offload and self.quantization_config.modules_to_not_convert is None:
logger.warning(
"The model contains offloaded modules and these modules are not quantized. We don't recommend saving the model as we won't be able to reload them."
"If you want to specify modules to not quantize, please specify modules_to_not_convert in the quantization_config."
)
| 345 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_torchao.py
|
return False
return _is_torchao_serializable
| 345 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_torchao.py
|
@property
def is_trainable(self):
supported_quant_types_for_training = [
"int8_weight_only",
"int8_dynamic_activation_int8_weight",
]
return self.quantization_config.quant_type in supported_quant_types_for_training
| 345 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_torchao.py
|
class GptqHfQuantizer(HfQuantizer):
"""
Quantizer of the GPTQ method - for GPTQ the quantizer support calibration of the model through
`auto_gptq` or `gptqmodel` package. Quantization is done under the hood for users if they load a non-prequantized model.
"""
requires_calibration = False
required_packages = ["optimum", "auto_gptq", "gptqmodel"]
optimum_quantizer = None
def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs):
super().__init__(quantization_config, **kwargs)
if not is_optimum_available():
raise ImportError("Loading a GPTQ quantized model requires optimum (`pip install optimum`)")
from optimum.gptq import GPTQQuantizer
self.optimum_quantizer = GPTQQuantizer.from_dict(self.quantization_config.to_dict_optimum())
| 346 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_gptq.py
|
def validate_environment(self, *args, **kwargs):
if not is_optimum_available():
raise ImportError("Loading a GPTQ quantized model requires optimum (`pip install optimum`)")
if is_auto_gptq_available() and is_gptqmodel_available():
logger.warning("Detected gptqmodel and auto-gptq, will use gptqmodel")
| 346 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_gptq.py
|
gptq_supports_cpu = (
is_auto_gptq_available()
and version.parse(importlib.metadata.version("auto-gptq")) > version.parse("0.4.2")
) or is_gptqmodel_available()
if not gptq_supports_cpu and not torch.cuda.is_available():
raise RuntimeError("GPU is required to quantize or run quantize model.")
elif not (is_auto_gptq_available() or is_gptqmodel_available()):
raise ImportError(
"Loading a GPTQ quantized model requires gptqmodel (`pip install gptqmodel`) or auto-gptq (`pip install auto-gptq`) library. "
)
elif is_auto_gptq_available() and version.parse(importlib.metadata.version("auto_gptq")) < version.parse(
"0.4.2"
):
raise ImportError(
"You need a version of auto_gptq >= 0.4.2 to use GPTQ: `pip install --upgrade auto-gptq` or use gptqmodel by `pip install gptqmodel>=1.4.3`."
)
elif is_gptqmodel_available() and (
| 346 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_gptq.py
|
version.parse(importlib.metadata.version("gptqmodel")) < version.parse("1.4.3")
or version.parse(importlib.metadata.version("optimum")) < version.parse("1.23.99")
):
raise ImportError("The gptqmodel version should be >= 1.4.3, optimum version should >= 1.24.0")
| 346 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_gptq.py
|
def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
if torch_dtype is None:
torch_dtype = torch.float16
logger.info("Loading the model in `torch.float16`. To overwrite it, set `torch_dtype` manually.")
elif torch_dtype != torch.float16:
logger.info("We suggest you to set `torch_dtype=torch.float16` for better efficiency with GPTQ.")
return torch_dtype
def update_device_map(self, device_map):
if device_map is None:
device_map = {"": torch.device("cpu")}
# Only with auto-gptq do not support CPU, we should move the model to cuda if available.
if not is_gptqmodel_available() and device_map in ("cpu", {"": torch.device("cpu")}):
device_map == {"": 0}
return device_map
| 346 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_gptq.py
|
def _process_model_before_weight_loading(self, model: "PreTrainedModel", **kwargs):
if model.__class__.main_input_name != "input_ids":
raise RuntimeError("We can only quantize pure text model.")
if self.pre_quantized:
model = self.optimum_quantizer.convert_model(model, **kwargs)
def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
if self.pre_quantized:
model = self.optimum_quantizer.post_init_model(model)
else:
if self.quantization_config.tokenizer is None:
self.quantization_config.tokenizer = model.name_or_path
self.optimum_quantizer.quantize_model(model, self.quantization_config.tokenizer)
model.config.quantization_config = GPTQConfig.from_dict(self.optimum_quantizer.to_dict())
@property
def is_trainable(self, model: Optional["PreTrainedModel"] = None):
return True
| 346 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_gptq.py
|
def is_serializable(self, safe_serialization=None):
return True
| 346 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_gptq.py
|
class QuantoHfQuantizer(HfQuantizer):
"""
Quantizer for the quanto library
"""
required_packages = ["quanto", "accelerate"]
requires_parameters_quantization = True
requires_calibration = False
def __init__(self, quantization_config: QuantoConfig, **kwargs):
super().__init__(quantization_config, **kwargs)
self.post_init()
def post_init(self):
r"""
Safety checker
"""
if self.quantization_config.activations is not None and not self.pre_quantized:
raise ValueError(
"We don't support quantizing the activations with transformers library."
"Use quanto library for more complex use cases such as activations quantization, calibration and quantization aware training."
)
| 347 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_quanto.py
|
def validate_environment(self, *args, **kwargs):
if not is_optimum_quanto_available():
raise ImportError(
"Loading an optimum-quanto quantized model requires optimum-quanto library (`pip install optimum-quanto`)"
)
if not is_accelerate_available():
raise ImportError(
"Loading an optimum-quanto quantized model requires accelerate library (`pip install accelerate`)"
)
def update_device_map(self, device_map):
if device_map is None:
device_map = {"": "cpu"}
logger.info(
"The device_map was not initialized. "
"Setting device_map to {'':'cpu'}. "
"If you want to use the model for inference, please set device_map ='auto'"
)
return device_map
| 347 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_quanto.py
|
def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
if torch_dtype is None:
logger.info("You did not specify `torch_dtype` in `from_pretrained`. Setting it to `torch.float32`.")
torch_dtype = torch.float32
return torch_dtype
def update_missing_keys(self, model, missing_keys: List[str], prefix: str) -> List[str]:
if is_optimum_quanto_available():
from optimum.quanto import QModuleMixin
| 347 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_quanto.py
|
not_missing_keys = []
for name, module in model.named_modules():
if isinstance(module, QModuleMixin):
for missing in missing_keys:
if (
(name in missing or name in f"{prefix}.{missing}")
and not missing.endswith(".weight")
and not missing.endswith(".bias")
):
not_missing_keys.append(missing)
return [k for k in missing_keys if k not in not_missing_keys]
def check_quantized_param(
self,
model: "PreTrainedModel",
param_value: "torch.Tensor",
param_name: str,
state_dict: Dict[str, Any],
**kwargs,
) -> bool:
"""
Check if a parameter needs to be quantized.
"""
if is_optimum_quanto_available():
from optimum.quanto import QModuleMixin
| 347 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_quanto.py
|
device_map = kwargs.get("device_map", None)
param_device = kwargs.get("param_device", None)
# we don't quantize the model if the module is going to be offloaded to the cpu
if device_map is not None and param_device is not None:
device_map_values = set(device_map.values())
if param_device == "cpu" and len(device_map_values) > 1:
if not (device_map_values == {"cpu"} or device_map_values == {"cpu", "disk"}):
return False
module, tensor_name = get_module_from_name(model, param_name)
# We only quantize the weights and the bias is not quantized.
if isinstance(module, QModuleMixin) and "weight" in tensor_name:
# if the weights are quantized, don't need to recreate it again with `create_quantized_param`
return not module.frozen
else:
return False
| 347 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_quanto.py
|
def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]:
max_memory = {key: val * 0.90 for key, val in max_memory.items()}
return max_memory
def create_quantized_param(
self,
model: "PreTrainedModel",
param_value: "torch.Tensor",
param_name: str,
target_device: "torch.device",
*args,
**kwargs,
):
"""
Create the quantized parameter by calling .freeze() after setting it to the module.
"""
from accelerate.utils import set_module_tensor_to_device
set_module_tensor_to_device(model, param_name, target_device, param_value)
module, _ = get_module_from_name(model, param_name)
module.freeze()
module.weight.requires_grad = False
| 347 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_quanto.py
|
def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype":
if version.parse(importlib.metadata.version("accelerate")) > version.parse("0.27.0"):
from accelerate.utils import CustomDtype
mapping = {
"int8": torch.int8,
"float8": CustomDtype.FP8,
"int4": CustomDtype.INT4,
"int2": CustomDtype.INT2,
}
target_dtype = mapping[self.quantization_config.weights]
return target_dtype
else:
raise ValueError(
"You are using `device_map='auto'` on an optimum-quanto quantized model. To automatically compute"
" the appropriate device map, you should upgrade your `accelerate` library,"
"`pip install --upgrade accelerate` or install it from source."
)
| 347 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_quanto.py
|
def _process_model_before_weight_loading(
self, model: "PreTrainedModel", keep_in_fp32_modules: List[str] = [], **kwargs
):
from ..integrations import get_keys_to_not_convert, replace_with_quanto_layers
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if self.quantization_config.modules_to_not_convert is None:
self.modules_to_not_convert = get_keys_to_not_convert(model)
else:
self.modules_to_not_convert = self.quantization_config.modules_to_not_convert
if not isinstance(self.modules_to_not_convert, list):
self.modules_to_not_convert = [self.modules_to_not_convert]
self.modules_to_not_convert.extend(keep_in_fp32_modules)
model, _ = replace_with_quanto_layers(
model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config
)
model.config.quantization_config = self.quantization_config
| 347 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_quanto.py
|
def _process_model_after_weight_loading(self, model, **kwargs):
return model
@property
def is_trainable(self, model: Optional["PreTrainedModel"] = None):
return True
def is_serializable(self, safe_serialization=None):
return False
| 347 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_quanto.py
|
class EetqHfQuantizer(HfQuantizer):
"""
8-bit quantization from EETQ quantization method:
before loading: converts transformer layers into W8A16Linear during loading: load 16bit weight and pass to the
layer object after: quantizes individual weights in Linear8bitLt into 8bit at first .cuda() call
"""
requires_parameters_quantization = True
requires_calibration = False
required_packages = ["eetq", "accelerate"]
def __init__(self, quantization_config, **kwargs):
super().__init__(quantization_config, **kwargs)
self.quantization_config = quantization_config
def validate_environment(self, *args, **kwargs):
if not is_eetq_available():
raise ImportError(
"Using `eetq` 8-bit quantization requires eetq."
"Please install the latest version of eetq from : https://github.com/NetEase-FuXi/EETQ"
)
| 348 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_eetq.py
|
try:
import eetq # noqa: F401
except ImportError as exc:
if "shard_checkpoint" in str(exc):
# EETQ 1.0.0 is currently broken with the latest transformers because it tries to import the removed
# shard_checkpoint function, see https://github.com/NetEase-FuXi/EETQ/issues/34.
# TODO: Update message once eetq releases a fix
raise ImportError(
"You are using a version of EETQ that is incompatible with the current transformers version. "
"Either downgrade transformers to <= v4.46.3 or, if available, upgrade EETQ to > v1.0.0."
) from exc
else:
raise
if not is_accelerate_available():
raise ImportError("Loading an EETQ quantized model requires accelerate (`pip install accelerate`)")
| 348 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_eetq.py
|
if kwargs.get("from_tf", False) or kwargs.get("from_flax", False):
raise ValueError(
"Converting into 8-bit weights from tf/flax weights is currently not supported, please make"
" sure the weights are in PyTorch format."
)
if not torch.cuda.is_available():
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
| 348 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_eetq.py
|
device_map = kwargs.get("device_map", None)
if device_map is None:
logger.warning_once(
"You have loaded an EETQ model on CPU and have a CUDA device available, make sure to set "
"your model on a GPU device in order to run your model."
)
elif device_map is not None:
if isinstance(device_map, dict) and ("cpu" in device_map.values() or "disk" in device_map.values()):
raise ValueError(
"You are attempting to load an EETQ model with a device_map that contains a CPU or disk device."
" This is not supported. Please remove the CPU or disk device from the device_map."
)
| 348 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_eetq.py
|
def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
if torch_dtype is None:
torch_dtype = torch.float16
logger.info(
"Overriding torch_dtype=%s with `torch_dtype=torch.float16` due to "
"requirements of `eetq` to enable model loading in 8-bit. "
"Pass your own torch_dtype to specify the dtype of the remaining non-linear layers or pass"
" torch_dtype=torch.float16 to remove this warning.",
torch_dtype,
)
elif torch_dtype != torch.float16:
logger.info("We suggest you to set `torch_dtype=torch.float16` for better efficiency with EETQ.")
return torch_dtype
def check_quantized_param(
self,
model: "PreTrainedModel",
param_value: "torch.Tensor",
param_name: str,
state_dict: Dict[str, Any],
**kwargs,
):
from eetq import EetqLinear
| 348 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_eetq.py
|
module, tensor_name = get_module_from_name(model, param_name)
if isinstance(module, EetqLinear):
if self.pre_quantized or tensor_name == "bias":
if tensor_name == "weight" and param_value.dtype != torch.int8:
raise ValueError("Expect quantized weights but got an unquantized weight")
return False
else:
if tensor_name == "weight_scale":
raise ValueError("Expect unquantized weights but got a quantized weight_scale")
return True
return False
def create_quantized_param(
self,
model: "PreTrainedModel",
param_value: "torch.Tensor",
param_name: str,
target_device: "torch.device",
state_dict: Dict[str, Any],
unexpected_keys: Optional[List[str]] = None,
):
"""
quantizes weights into qweight and weight_scales
"""
from eetq import quantize_and_preprocess_weights
| 348 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_eetq.py
|
module, tensor_name = get_module_from_name(model, param_name)
new_value, weight_scale = quantize_and_preprocess_weights(param_value)
module._buffers[tensor_name] = new_value.to(target_device)
module.register("weight_scales", weight_scale.to(target_device))
def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
return model
def _process_model_before_weight_loading(
self,
model: "PreTrainedModel",
device_map,
keep_in_fp32_modules: List[str] = [],
**kwargs,
):
from ..integrations import get_keys_to_not_convert, replace_with_eetq_linear
self.modules_to_not_convert = get_keys_to_not_convert(model)
if self.quantization_config.modules_to_not_convert is not None:
self.modules_to_not_convert.extend(self.quantization_config.modules_to_not_convert)
| 348 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_eetq.py
|
model = replace_with_eetq_linear(
model,
modules_to_not_convert=self.modules_to_not_convert,
quantization_config=self.quantization_config,
pre_quantized=self.pre_quantized,
)
model.config.quantization_config = self.quantization_config
def is_serializable(self, safe_serialization=None):
return True
@property
def is_trainable(self) -> bool:
return True
| 348 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_eetq.py
|
class BitNetHfQuantizer(HfQuantizer):
"""
1.58-bit quantization from BitNet quantization method:
Before loading: it converts the linear layers into BitLinear layers during loading.
Checkout the paper introducing this method : https://arxiv.org/pdf/2402.17764
"""
requires_parameters_quantization = False
requires_calibration = True
required_packages = ["accelerate"]
def __init__(self, quantization_config, **kwargs):
super().__init__(quantization_config, **kwargs)
self.quantization_config = quantization_config
def validate_environment(self, *args, **kwargs):
if not is_accelerate_available():
raise ImportError("Loading a BitNet quantized model requires accelerate (`pip install accelerate`)")
| 349 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bitnet.py
|
if kwargs.get("from_tf", False) or kwargs.get("from_flax", False):
raise ValueError(
"Loading ternary weights from tf/flax is currently not supported, please make"
" sure the weights are in PyTorch format."
)
if not torch.cuda.is_available():
logger.warning_once(
"You don't have a GPU available to load the model, the inference will be slow because of weight unpacking"
)
return
| 349 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bitnet.py
|
device_map = kwargs.get("device_map", None)
if device_map is None:
logger.warning_once(
"You have loaded a BitNet model on CPU and have a CUDA device available, make sure to set "
"your model on a GPU device in order to run your model."
)
elif device_map is not None:
if isinstance(device_map, dict) and ("cpu" in device_map.values() or "disk" in device_map.values()):
raise ValueError(
"You are attempting to load a BitNet model with a device_map that contains a CPU or disk device."
"This is not supported. Please remove the CPU or disk device from the device_map."
)
def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
return model
| 349 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bitnet.py
|
def _process_model_before_weight_loading(
self,
model: "PreTrainedModel",
device_map,
keep_in_fp32_modules: List[str] = [],
**kwargs,
):
from ..integrations import get_keys_to_not_convert, replace_with_bitnet_linear
self.modules_to_not_convert = get_keys_to_not_convert(model)
if self.quantization_config.modules_to_not_convert is not None:
self.modules_to_not_convert.extend(self.quantization_config.modules_to_not_convert)
model = replace_with_bitnet_linear(
model,
modules_to_not_convert=self.modules_to_not_convert,
quantization_config=self.quantization_config,
pre_quantized=self.pre_quantized,
)
def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]:
max_memory = {key: val * 0.90 for key, val in max_memory.items()}
return max_memory
| 349 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bitnet.py
|
def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype":
target_dtype = torch.int8
return target_dtype
def is_serializable(self, safe_serialization=None):
return True
@property
def is_trainable(self) -> bool:
return False
| 349 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bitnet.py
|
class AqlmHfQuantizer(HfQuantizer):
"""
Quantizer of the AQLM method. Enables the loading of prequantized models.
"""
requires_calibration = True
required_packages = ["aqlm"]
optimum_quantizer = None
def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs):
super().__init__(quantization_config, **kwargs)
self.quantization_config = quantization_config
def validate_environment(self, *args, **kwargs):
if not is_accelerate_available():
raise ImportError("Using `aqlm` quantization requires Accelerate: `pip install accelerate`")
if not is_aqlm_available():
raise ImportError("Using `aqlm` quantization requires AQLM: `pip install aqlm[gpu,cpu]`")
| 350 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_aqlm.py
|
def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
if torch_dtype is None:
if torch.cuda.is_available():
torch_dtype = torch.float16
logger.info(
"CUDA available. Assuming AQLM inference on GPU and loading the model in `torch.float16`. To overwrite it, set `torch_dtype` manually."
)
else:
torch_dtype = torch.float32
logger.info(
"CUDA is unavailable. Assuming AQLM inference on CPU and loading the model in `torch.float32`. To overwrite it, set `torch_dtype` manually."
)
return torch_dtype
| 350 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_aqlm.py
|
def _process_model_before_weight_loading(
self,
model: "PreTrainedModel",
**kwargs,
):
replace_with_aqlm_linear(
model,
quantization_config=self.quantization_config,
linear_weights_not_to_quantize=self.quantization_config.linear_weights_not_to_quantize,
)
model.config.quantization_config = self.quantization_config
def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
return model
| 350 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_aqlm.py
|
@property
def is_trainable(self, model: Optional["PreTrainedModel"] = None):
aqlm_supports_training = version.parse(importlib.metadata.version("aqlm")) >= version.parse("1.0.2")
if aqlm_supports_training:
return True
else:
logger.warning(
f"Currently installed `aqlm` version ({importlib.metadata.version('aqlm')}) doesn't support training. If you wish to train a quantized model, please update `aqlm` with `pip install aqlm>=1.0.2`"
)
return False
def is_serializable(self, safe_serialization=None):
return True
| 350 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_aqlm.py
|
class HqqHfQuantizer(HfQuantizer):
"""
HQQ quantizer base HF class.
nn.Linear modules are first tagged with quant_config in _process_model_before_weight_loading().
The actual quantization and offloading to the GPU is done in check_quantized_param().
"""
use_keep_in_fp32_modules = False
requires_parameters_quantization = True
requires_calibration = False
required_packages = ["hqq"]
def __init__(self, quantization_config, **kwargs):
super().__init__(quantization_config, **kwargs)
self.torch_dtype = None
self.using_multi_gpu = False
def validate_environment(self, *args, **kwargs):
if not (is_hqq_available()):
raise ImportError(
"A valid HQQ version (>=0.2.1) is not available. Please follow the instructions to install it: `https://github.com/mobiusml/hqq/`."
)
| 351 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_hqq.py
|
if kwargs.get("from_tf", False) or kwargs.get("from_flax", False):
raise ValueError(
"Converting weights from tf/flax weights is currently not supported, please make"
" sure the weights are in PyTorch format."
)
if not torch.cuda.is_available():
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
if self.torch_dtype is None:
if "torch_dtype" in kwargs:
self.torch_dtype = kwargs["torch_dtype"]
else:
self.torch_dtype = torch.float32
logger.info("Setting torch_dtype to torch.float32 as the default value since it was not specified.")
| 351 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_hqq.py
|
device_map = kwargs.get("device_map", None)
if isinstance(device_map, dict):
if "cpu" in device_map.values() or "disk" in device_map.values():
raise ValueError(
"You are attempting to use an HQQ model with a device_map that contains a CPU or disk device."
" This is not supported. Please remove the CPU or disk device from the device_map."
)
else:
self.using_multi_gpu = len(set(device_map.values())) > 1
def update_missing_keys(
self, model: "PreTrainedModel", missing_keys: List[str], prefix: str, **kwargs
) -> List[str]:
if self.pre_quantized:
return [key for key in missing_keys if ("weight" not in key)]
else:
return missing_keys
| 351 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_hqq.py
|
# Adds missing keys for HQQLinear modules that are loaded but the model with initialized with torch.nn.Linear
def update_expected_keys(
self, model: "PreTrainedModel", expected_keys: List[str], loaded_keys: List[str]
) -> List[str]:
if not self.pre_quantized:
return expected_keys
# Collects all quantizable (linear) layers
def _find_hqq_quantizable_layers(model, layers):
for name, module in model.named_children():
if isinstance(module, (torch.nn.Linear)):
layers.add(module.name)
_find_hqq_quantizable_layers(module, layers)
new_keys = set(expected_keys)
if is_hqq_available():
from hqq.core.quantize import HQQLinear
# Name modules
for name, module in model.named_modules():
module.name = name
| 351 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_hqq.py
|
# valid modules are Linear layers that have HQQLinear state_dict. We ignore skip_modules and any layers with Linear state_dict() params
_valid_modules = set()
_find_hqq_quantizable_layers(model, _valid_modules)
_valid_modules -= set(model.config.quantization_config["skip_modules"])
# Append new expected layers based on _ref_keys
_ref_keys = HQQLinear(
linear_layer=None, quant_config=None, compute_dtype=torch.float16, device="cpu"
).state_dict_keys() - {"bias"}
# Clean-up
_rm_keys = set()
for key in new_keys:
if any(_module in key for _module in _valid_modules):
_rm_keys.add(key)
new_keys -= _rm_keys
# At this point, new_keys contains all the keys of the layers that are NOT HQQLinear or torch.nn.Linear
| 351 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_hqq.py
|
# Re-populate Linear/HQQLinear
for _module in _valid_modules:
if _module + ".weight" in loaded_keys:
new_keys.add(_module + ".weight")
else:
new_keys.update({_module + "." + _ref_key for _ref_key in _ref_keys})
if _module + ".bias" in loaded_keys:
new_keys.add(_module + ".bias")
return list(new_keys)
def check_quantized_param(
self,
model: "PreTrainedModel",
param_value: "torch.Tensor",
param_name: str,
state_dict: Dict[str, Any],
**kwargs,
) -> bool:
if is_hqq_available():
from hqq.core.quantize import HQQLinear
module, tensor_name = get_module_from_name(model, param_name)
| 351 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_hqq.py
|
if self.pre_quantized:
return (
(isinstance(module, torch.nn.Linear) or isinstance(module, HQQLinear))
and tensor_name != "weight"
and tensor_name != "bias"
)
else:
return isinstance(module, torch.nn.Linear) and tensor_name == "weight"
def create_quantized_param(
self,
model: "PreTrainedModel",
param_value: "torch.Tensor",
param_name: str,
target_device: "torch.device",
state_dict: Dict[str, Any],
unexpected_keys: List[str],
):
"""
Each nn.Linear layer is processsed here.
We first check if the corresponding module state_dict contains already HQQ quantized parameters.
If not, we create a temp linear layer with the module state_dict params and use it for quantization
"""
if is_hqq_available():
from hqq.core.quantize import HQQLinear
| 351 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_hqq.py
|
module, tensor_name = get_module_from_name(model, param_name)
layer_name = ".".join(param_name.split(".")[:-1])
parent_module = find_parent(model, layer_name)
node = layer_name.split(".")[-1]
# set module state_dict
module_state_dict = {}
for k, v in state_dict.items():
if layer_name + "." in k:
module_state_dict[k.split(".")[-1]] = v
if unexpected_keys is not None and k in unexpected_keys:
unexpected_keys.remove(k)
if self.pre_quantized:
if isinstance(module, HQQLinear):
return
else:
hqq_layer = HQQLinear(
linear_layer=None,
quant_config=None,
compute_dtype=self.torch_dtype,
device=target_device,
)
hqq_layer.load_state_dict(module_state_dict)
| 351 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_hqq.py
|
if hqq_layer.bias is not None and isinstance(hqq_layer.bias, torch.Tensor):
hqq_layer.bias = torch.nn.Parameter(hqq_layer.bias)
if self.using_multi_gpu:
hqq_layer = self._patch_layer_for_multigpu(hqq_layer)
setattr(parent_module, node, hqq_layer)
# cleanup
del module.__dict__, module
torch.cuda.empty_cache()
return
# Step 1: populate module with weight/bias from module state dict
for key in module_state_dict:
setattr(module, key, torch.nn.Parameter(module_state_dict[key]))
| 351 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_hqq.py
|
# Step 2: Replace module with either HQQLinear or move it to device. We do this via setattr on the parent as doing on it on the module
# directly doesn't work.
if hasattr(module, "quant_config"):
hqq_layer = HQQLinear(
module,
module.quant_config,
compute_dtype=self.torch_dtype,
device=target_device,
del_orig=True,
)
if hqq_layer.bias is not None and isinstance(hqq_layer.bias, torch.Tensor):
hqq_layer.bias = torch.nn.Parameter(hqq_layer.bias)
if self.using_multi_gpu:
hqq_layer = self._patch_layer_for_multigpu(hqq_layer)
setattr(parent_module, node, hqq_layer)
else:
module = module.to(dtype=self.torch_dtype, device=target_device)
setattr(parent_module, node, module)
torch.cuda.empty_cache()
| 351 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_hqq.py
|
# Remove accelerate hook and uses a simpler forward pass. Otherwise, this breaks with multi-gpu
def _patch_layer_for_multigpu(self, hqq_layer):
hqq_layer = remove_hook_from_module(hqq_layer)
def forward_with_device(self, x):
out = torch.matmul(x.to(self.device), self.dequantize().t())
if self.bias is not None:
out += self.bias
return out
hqq_layer.forward = lambda x: forward_with_device(hqq_layer, x)
return hqq_layer
def _process_model_before_weight_loading(
self,
model: "PreTrainedModel",
device_map,
keep_in_fp32_modules: List[str] = None,
**kwargs,
):
keep_in_fp32_modules = keep_in_fp32_modules if keep_in_fp32_modules is not None else []
| 351 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_hqq.py
|
# Add the corresponding quant_config to each valid module. This allows us to do the actual nn.Linear -> HQQLinear conversion in create_quantized_param().
# prepare_for_hqq_linear() also sets the right quantization config inside the model (model.config.quantization_config) and the layers (hqq_layer.quant_config)
model = prepare_for_hqq_linear(model, quantization_config=self.quantization_config)
def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
model.is_hqq_quantized = True
model.is_hqq_serializable = self.is_serializable()
return model
def is_serializable(self, safe_serialization=None):
return True
@property
def is_trainable(self) -> bool:
return True
| 351 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_hqq.py
|
class VptqHfQuantizer(HfQuantizer):
"""
Quantizer of the VPTQ method. Enables the loading of prequantized models.
"""
requires_calibration = True
required_packages = ["vptq"]
def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs):
super().__init__(quantization_config, **kwargs)
self.quantization_config = quantization_config
def validate_environment(self, *args, **kwargs):
if not is_accelerate_available():
raise ImportError("Using `vptq` quantization requires Accelerate: `pip install accelerate`")
if not is_vptq_available():
raise ImportError("Using `vptq` quantization requires VPTQ>=0.0.4: `pip install -U vptq`")
| 352 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_vptq.py
|
def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
if torch_dtype is None:
if torch.cuda.is_available():
torch_dtype = torch.float16
logger.info(
"CUDA available. Assuming VPTQ inference on GPU and loading the model in `torch.float16`. To overwrite it, set `torch_dtype` manually."
)
else:
import vptq
device_availability = getattr(vptq, "device_availability", lambda device: False)
if device_availability("cpu") is True:
raise RuntimeError("No GPU found. Please wait for the next release of VPTQ to use CPU inference")
torch_dtype = torch.float32
logger.info("No GPU found. Assuming VPTQ inference on CPU and loading the model in `torch.float32`.")
return torch_dtype
| 352 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_vptq.py
|
def _process_model_before_weight_loading(
self,
model: "PreTrainedModel",
**kwargs,
):
"""
we don't have param like modules_to_not_convert to indicate which layers should not be quantized
because `quantization_config` include the layers that should be quantized
"""
from ..integrations import replace_with_vptq_linear
modules_to_not_convert = kwargs.get("modules_to_not_convert", []) + (
self.quantization_config.modules_to_not_convert or []
)
replace_with_vptq_linear(
model,
quantization_config=self.quantization_config,
modules_to_not_convert=modules_to_not_convert,
)
model.config.quantization_config = self.quantization_config
def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
return model
@property
def is_trainable(self, model: Optional["PreTrainedModel"] = None):
return False
| 352 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_vptq.py
|
def is_serializable(self, safe_serialization=None):
return True
| 352 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_vptq.py
|
class Bnb8BitHfQuantizer(HfQuantizer):
"""
8-bit quantization from bitsandbytes quantization method:
before loading: converts transformer layers into Linear8bitLt during loading: load 16bit weight and pass to the
layer object after: quantizes individual weights in Linear8bitLt into 8bit at fitst .cuda() call
saving:
from state dict, as usual; saves weights and 'SCB' component
loading:
need to locate SCB component and pass to the Linear8bitLt object
"""
use_keep_in_fp32_modules = True
requires_parameters_quantization = True
requires_calibration = False
required_packages = ["bitsandbytes", "accelerate"]
def __init__(self, quantization_config, **kwargs):
super().__init__(quantization_config, **kwargs)
if self.quantization_config.llm_int8_skip_modules is not None:
self.modules_to_not_convert = self.quantization_config.llm_int8_skip_modules
| 353 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_8bit.py
|
def validate_environment(self, *args, **kwargs):
if not is_accelerate_available():
raise ImportError(
f"Using `bitsandbytes` 8-bit quantization requires Accelerate: `pip install 'accelerate>={ACCELERATE_MIN_VERSION}'`"
)
if not is_bitsandbytes_available():
raise ImportError(
"Using `bitsandbytes` 8-bit quantization requires the latest version of bitsandbytes: `pip install -U bitsandbytes`"
)
from ..integrations import validate_bnb_backend_availability
from ..utils import is_bitsandbytes_multi_backend_available
bnb_multibackend_is_enabled = is_bitsandbytes_multi_backend_available()
validate_bnb_backend_availability(raise_exception=True)
| 353 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_8bit.py
|
if kwargs.get("from_tf", False) or kwargs.get("from_flax", False):
raise ValueError(
"Converting into 4-bit or 8-bit weights from tf/flax weights is currently not supported, please make"
" sure the weights are in PyTorch format."
)
| 353 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_8bit.py
|
device_map = kwargs.get("device_map", None)
if (
device_map is not None
and isinstance(device_map, dict)
and not self.quantization_config.llm_int8_enable_fp32_cpu_offload
):
device_map_without_lm_head = {
key: device_map[key] for key in device_map.keys() if key not in self.modules_to_not_convert
}
if set(device_map.values()) == {"cpu"} and bnb_multibackend_is_enabled:
pass
elif "cpu" in device_map_without_lm_head.values() or "disk" in device_map_without_lm_head.values():
raise ValueError(
"Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the "
"quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules "
"in 32-bit, you need to set `llm_int8_enable_fp32_cpu_offload=True` and pass a custom `device_map` to "
| 353 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_8bit.py
|
"`from_pretrained`. Check "
"https://huggingface.co/docs/transformers/main/en/main_classes/quantization#offload-between-cpu-and-gpu "
"for more details. "
)
| 353 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_8bit.py
|
if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.37.2"):
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 8bit inference and training"
" make sure you have the latest version of `bitsandbytes` installed"
)
def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]:
# need more space for buffers that are created during quantization
max_memory = {key: val * 0.90 for key, val in max_memory.items()}
return max_memory
| 353 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_8bit.py
|
def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
if torch_dtype is None:
# We force the `dtype` to be float16, this is a requirement from `bitsandbytes`
logger.info(
"Overriding torch_dtype=%s with `torch_dtype=torch.float16` due to "
"requirements of `bitsandbytes` to enable model loading in 8-bit or 4-bit. "
"Pass your own torch_dtype to specify the dtype of the remaining non-linear layers or pass"
" torch_dtype=torch.float16 to remove this warning.",
torch_dtype,
)
torch_dtype = torch.float16
return torch_dtype
| 353 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_8bit.py
|
def update_device_map(self, device_map):
if device_map is None:
if torch.cuda.is_available():
device_map = {"": torch.cuda.current_device()}
elif is_torch_xpu_available():
device_map = {"": f"xpu:{torch.xpu.current_device()}"}
else:
device_map = {"": "cpu"}
logger.info(
"The device_map was not initialized. "
f"Setting device_map to {device_map}. "
"If you want to use the model for inference, please set device_map ='auto' "
)
return device_map
def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype":
if target_dtype != torch.int8:
logger.info("target_dtype {target_dtype} is replaced by `torch.int8` for 8-bit BnB quantization")
return torch.int8
| 353 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_8bit.py
|
def check_quantized_param(
self,
model: "PreTrainedModel",
param_value: "torch.Tensor",
param_name: str,
state_dict: Dict[str, Any],
**kwargs,
):
import bitsandbytes as bnb
module, tensor_name = get_module_from_name(model, param_name)
if isinstance(module._parameters.get(tensor_name, None), bnb.nn.Int8Params):
if self.pre_quantized:
if param_name.replace("weight", "SCB") not in state_dict.keys():
raise ValueError("Missing quantization component `SCB`")
if param_value.dtype != torch.int8:
raise ValueError(
f"Incompatible dtype `{param_value.dtype}` when loading 8-bit prequantized weight. Expected `torch.int8`."
)
return True
return False
| 353 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_8bit.py
|
def create_quantized_param(
self,
model: "PreTrainedModel",
param_value: "torch.Tensor",
param_name: str,
target_device: "torch.device",
state_dict: Dict[str, Any],
unexpected_keys: Optional[List[str]] = None,
):
"""
combines logic from _load_state_dict_into_meta_model and .integrations.bitsandbytes.py::set_module_quantized_tensor_to_device()
needs aux items from state dicts, if found - removes them from unexpected_keys
"""
import bitsandbytes as bnb
fp16_statistics_key = param_name.replace("weight", "SCB")
fp16_weights_format_key = param_name.replace("weight", "weight_format")
fp16_statistics = state_dict.get(fp16_statistics_key, None)
fp16_weights_format = state_dict.get(fp16_weights_format_key, None)
| 353 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_8bit.py
|
module, tensor_name = get_module_from_name(model, param_name)
if tensor_name not in module._parameters:
raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.")
old_value = getattr(module, tensor_name)
if not isinstance(module._parameters[tensor_name], bnb.nn.Int8Params):
raise ValueError(f"Parameter `{tensor_name}` should only be a `bnb.nn.Int8Params` instance.")
if (
old_value.device == torch.device("meta")
and target_device not in ["meta", torch.device("meta")]
and param_value is None
):
raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {target_device}.")
| 353 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_8bit.py
|
new_value = param_value.to("cpu")
if self.pre_quantized and not self.is_serializable():
raise ValueError(
"Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. "
"Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`."
)
# Support models using `Conv1D` in place of `nn.Linear` (e.g. openai-community/gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls, Conv1D):
if fp16_statistics is None:
new_value = new_value.T
kwargs = old_value.__dict__
new_value = bnb.nn.Int8Params(new_value, requires_grad=False, **kwargs).to(target_device)
| 353 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_8bit.py
|
module._parameters[tensor_name] = new_value
if fp16_statistics is not None:
setattr(module.weight, "SCB", fp16_statistics.to(target_device))
if unexpected_keys is not None:
unexpected_keys.remove(fp16_statistics_key)
# We just need to pop the `weight_format` keys from the state dict to remove unneeded
# messages. The correct format is correctly retrieved during the first forward pass.
if fp16_weights_format is not None and unexpected_keys is not None:
unexpected_keys.remove(fp16_weights_format_key)
def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
model.is_loaded_in_8bit = True
model.is_8bit_serializable = self.is_serializable()
return model
| 353 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_8bit.py
|
def _process_model_before_weight_loading(
self,
model: "PreTrainedModel",
device_map,
keep_in_fp32_modules: List[str] = [],
**kwargs,
):
from ..integrations import get_keys_to_not_convert, replace_with_bnb_linear
llm_int8_enable_fp32_cpu_offload = self.quantization_config.llm_int8_enable_fp32_cpu_offload
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if self.quantization_config.llm_int8_skip_modules is None:
self.modules_to_not_convert = get_keys_to_not_convert(model)
else:
self.modules_to_not_convert = self.quantization_config.llm_int8_skip_modules
if not isinstance(self.modules_to_not_convert, list):
self.modules_to_not_convert = [self.modules_to_not_convert]
self.modules_to_not_convert.extend(keep_in_fp32_modules)
| 353 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_8bit.py
|
# Extend `self.modules_to_not_convert` to keys that are supposed to be offloaded to `cpu` or `disk`
if isinstance(device_map, dict) and len(device_map.keys()) > 1:
keys_on_cpu = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
if len(keys_on_cpu) > 0 and not llm_int8_enable_fp32_cpu_offload:
raise ValueError(
"If you want to offload some keys to `cpu` or `disk`, you need to set "
"`llm_int8_enable_fp32_cpu_offload=True`. Note that these modules will not be "
" converted to 8-bit but kept in 32-bit."
)
self.modules_to_not_convert.extend(keys_on_cpu)
model = replace_with_bnb_linear(
model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config
)
# TODO: consider bringing replace_with_bnb_linear() code from ..integrations/bitsandbyter.py to here
| 353 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_8bit.py
|
model.config.quantization_config = self.quantization_config
def is_serializable(self, safe_serialization=None):
_bnb_supports_8bit_serialization = version.parse(importlib.metadata.version("bitsandbytes")) > version.parse(
"0.37.2"
)
if not _bnb_supports_8bit_serialization:
logger.warning(
"You are calling `save_pretrained` to a 8-bit converted model, but your `bitsandbytes` version doesn't support it. "
"If you want to save 8-bit models, make sure to have `bitsandbytes>0.37.2` installed. You will most likely face errors or"
" unexpected behaviours."
)
return False
return True
@property
def is_trainable(self) -> bool:
return version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse("0.37.0")
def _dequantize(self, model):
from ..integrations import dequantize_and_replace
| 353 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_8bit.py
|
model = dequantize_and_replace(
model, self.modules_to_not_convert, quantization_config=self.quantization_config
)
return model
| 353 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_bnb_8bit.py
|
class HfQuantizer(ABC):
"""
Abstract class of the HuggingFace quantizer. Supports for now quantizing HF transformers models for inference and/or quantization.
This class is used only for transformers.PreTrainedModel.from_pretrained and cannot be easily used outside the scope of that method
yet.
| 354 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/base.py
|
Attributes
quantization_config (`transformers.utils.quantization_config.QuantizationConfigMixin`):
The quantization config that defines the quantization parameters of your model that you want to quantize.
modules_to_not_convert (`List[str]`, *optional*):
The list of module names to not convert when quantizing the model.
required_packages (`List[str]`, *optional*):
The list of required pip packages to install prior to using the quantizer
requires_calibration (`bool`):
Whether the quantization method requires to calibrate the model before using it.
requires_parameters_quantization (`bool`):
Whether the quantization method requires to create a new Parameter. For example, for bitsandbytes, it is
required to create a new xxxParameter in order to properly quantize the model.
"""
requires_calibration = False
required_packages = None
requires_parameters_quantization = False
| 354 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/base.py
|
def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs):
self.quantization_config = quantization_config
# -- Handle extra kwargs below --
self.modules_to_not_convert = kwargs.pop("modules_to_not_convert", [])
self.pre_quantized = kwargs.pop("pre_quantized", True)
if not self.pre_quantized and self.requires_calibration:
raise ValueError(
f"The quantization method {quantization_config.quant_method} does require the model to be pre-quantized."
f" You explicitly passed `pre_quantized=False` meaning your model weights are not quantized. Make sure to "
f"pass `pre_quantized=True` while knowing what you are doing."
)
| 354 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/base.py
|
def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
"""
Some quantization methods require to explicitly set the dtype of the model to a
target dtype. You need to override this method in case you want to make sure that behavior is
preserved
Args:
torch_dtype (`torch.dtype`):
The input dtype that is passed in `from_pretrained`
"""
return torch_dtype
def update_device_map(self, device_map: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
"""
Override this method if you want to pass a override the existing device map with a new
one. E.g. for bitsandbytes, since `accelerate` is a hard requirement, if no device_map is
passed, the device_map is set to `"auto"``
Args:
device_map (`Union[dict, str]`, *optional*):
The device_map that is passed through the `from_pretrained` method.
"""
return device_map
| 354 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/base.py
|
def adjust_target_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
"""
Override this method if you want to adjust the `target_dtype` variable used in `from_pretrained`
to compute the device_map in case the device_map is a `str`. E.g. for bitsandbytes we force-set `target_dtype`
to `torch.int8` and for 4-bit we pass a custom enum `accelerate.CustomDtype.int4`.
Args:
torch_dtype (`torch.dtype`, *optional*):
The torch_dtype that is used to compute the device_map.
"""
return torch_dtype
def update_missing_keys(self, model, missing_keys: List[str], prefix: str) -> List[str]:
"""
Override this method if you want to adjust the `missing_keys`.
Args:
missing_keys (`List[str]`, *optional*):
The list of missing keys in the checkpoint compared to the state dict of the model
"""
return missing_keys
| 354 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/base.py
|
def update_expected_keys(self, model, expected_keys: List[str], loaded_keys: List[str]) -> List[str]:
"""
Override this method if you want to adjust the `update_expected_keys`.
Args:
expected_keys (`List[str]`, *optional*):
The list of the expected keys in the initialized model.
loaded_keys (`List[str]`, *optional*):
The list of the loaded keys in the checkpoint.
"""
return expected_keys
def get_special_dtypes_update(self, model, torch_dtype: "torch.dtype") -> Dict[str, "torch.dtype"]:
"""
returns dtypes for modules that are not quantized - used for the computation of the device_map in case
one passes a str as a device_map. The method will use the `modules_to_not_convert` that is modified
in `_process_model_before_weight_loading`.
| 354 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/base.py
|
Args:
model (`~transformers.PreTrainedModel`):
The model to quantize
torch_dtype (`torch.dtype`):
The dtype passed in `from_pretrained` method.
"""
return {
name: torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in self.modules_to_not_convert)
}
def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]:
"""adjust max_memory argument for infer_auto_device_map() if extra memory is needed for quantization"""
return max_memory
| 354 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/base.py
|
def check_quantized_param(
self,
model: "PreTrainedModel",
param_value: "torch.Tensor",
param_name: str,
state_dict: Dict[str, Any],
**kwargs,
) -> bool:
"""
checks if a loaded state_dict component is part of quantized param + some validation; only defined if
requires_parameters_quantization == True for quantization methods that require to create a new parameters
for quantization.
"""
return False
def create_quantized_param(self, *args, **kwargs) -> "torch.nn.Parameter":
"""
takes needed components from state_dict and creates quantized param; only applicable if
requires_parameters_quantization == True
"""
if not self.requires_parameters_quantization:
raise AttributeError(
f"`.create_quantized_param()` method is not supported by quantizer class {self.__class__.__name__}."
)
| 354 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/base.py
|
def validate_environment(self, *args, **kwargs):
"""
This method is used to potentially check for potential conflicts with arguments that are
passed in `from_pretrained`. You need to define it for all future quantizers that are integrated with transformers.
If no explicit check are needed, simply return nothing.
"""
return
def preprocess_model(self, model: "PreTrainedModel", **kwargs):
"""
Setting model attributes and/or converting model before weights loading. At this point
the model should be initialized on the meta device so you can freely manipulate the skeleton
of the model in order to replace modules in-place. Make sure to override the abstract method `_process_model_before_weight_loading`.
| 354 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/base.py
|
Args:
model (`~transformers.PreTrainedModel`):
The model to quantize
kwargs (`dict`, *optional*):
The keyword arguments that are passed along `_process_model_before_weight_loading`.
"""
model.is_quantized = True
model.quantization_method = self.quantization_config.quant_method
return self._process_model_before_weight_loading(model, **kwargs)
def postprocess_model(self, model: "PreTrainedModel", **kwargs):
"""
Post-process the model post weights loading.
Make sure to override the abstract method `_process_model_after_weight_loading`.
Args:
model (`~transformers.PreTrainedModel`):
The model to quantize
kwargs (`dict`, *optional*):
The keyword arguments that are passed along `_process_model_after_weight_loading`.
"""
return self._process_model_after_weight_loading(model, **kwargs)
| 354 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/base.py
|
def dequantize(self, model):
"""
Potentially dequantize the model to retrive the original model, with some loss in accuracy / performance.
Note not all quantization schemes support this.
"""
model = self._dequantize(model)
# Delete quantizer and quantization config
del model.hf_quantizer
del model.config.quantization_config
del model.config._pre_quantization_dtype
model.is_quantized = False
return model
def _dequantize(self, model):
raise NotImplementedError(
f"{self.quantization_config.quant_method} has no implementation of `dequantize`, please raise an issue on GitHub."
)
@property
def is_qat_trainable(self) -> bool:
"""Flag indicating whether the quantized model can carry out quantization aware training"""
return False
@abstractmethod
def _process_model_before_weight_loading(self, model, **kwargs): ...
| 354 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/base.py
|
@abstractmethod
def _process_model_after_weight_loading(self, model, **kwargs): ...
@abstractmethod
def is_serializable(self, safe_serialization=None): ...
@property
@abstractmethod
def is_trainable(self): ...
| 354 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/base.py
|
class HiggsHfQuantizer(HfQuantizer):
"""
Quantizer of the HIGGS method. Enables the loading of prequantized models and in-flight quantization of full-precision models.
"""
requires_calibration = False
requires_parameters_quantization = True
required_packages = ["flute-kernel", "fast_hadamard_transform"]
def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs):
super().__init__(quantization_config, **kwargs)
self.quantization_config = quantization_config
def validate_environment(self, device_map, **kwargs):
if not torch.cuda.is_available():
raise NotImplementedError("HIGGS quantization is only supported on GPU. Please use a different quantizer.")
if not is_accelerate_available():
raise ImportError("Using `higgs` quantization requires Accelerate: `pip install accelerate`")
| 355 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_higgs.py
|
if not is_flute_available():
raise ImportError("Using `higgs` quantization requires FLUTE: `pip install flute-kernel>=0.3.0`")
if not is_hadamard_available():
raise ImportError(
"Using `higgs` quantization requires fast_hadamard_transform: `pip install fast_hadamard_transform`"
)
if device_map is None:
raise ValueError(
"You are attempting to load a HIGGS model without setting device_map."
" Please set device_map comprised of 'cuda' devices."
)
elif isinstance(device_map, dict) and ("cpu" in device_map.values() or "disk" in device_map.values()):
raise ValueError(
"You are attempting to load a HIGGS model with a device_map that contains a CPU or disk device."
" This is not supported. Please remove the CPU or disk device from the device_map."
)
| 355 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_higgs.py
|
def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
if torch_dtype is None:
logger.info("`torch_dtype` is None. Setting `torch_dtype=torch.float16` for FLUTE compatibility.")
torch_dtype = torch.float16
elif torch_dtype != torch.float16 and torch_dtype != torch.bfloat16:
raise ValueError(
f"Invalid `torch_dtype` {torch_dtype}. HIGGS quantization only supports `torch_dtype=torch.float16` or `torch_dtype=torch.bfloat16`."
)
return torch_dtype
def create_quantized_param(
self,
model: "PreTrainedModel",
param_value: "torch.Tensor",
param_name: str,
target_device: "torch.device",
state_dict: Dict[str, Any],
unexpected_keys: Optional[List[str]] = None,
):
from ..integrations import quantize_with_higgs
| 355 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_higgs.py
|
"""
Quantizes weights into weight and weight_scale
"""
flute_dict = quantize_with_higgs(
param_value.to(target_device),
self.quantization_config.bits,
self.quantization_config.p,
self.quantization_config.group_size,
self.quantization_config.hadamard_size,
)
del param_value
module, tensor_name = get_module_from_name(model, param_name)
for key, value in flute_dict.items():
if key in module._parameters:
module._parameters[key] = torch.nn.Parameter(value, requires_grad=False)
elif key in module._buffers:
module._buffers[key] = torch.nn.Buffer(value)
else:
raise ValueError(f"Unexpected key {key} in module {module}")
if unexpected_keys is not None and param_name in unexpected_keys:
unexpected_keys.remove(param_name)
| 355 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_higgs.py
|
module.num_sms_packed = torch.nn.Parameter(
torch.tensor(get_num_sms_from_device(target_device), device=target_device, dtype=torch.int32),
requires_grad=False,
)
def _process_model_before_weight_loading(
self,
model: "PreTrainedModel",
**kwargs,
):
from ..integrations import replace_with_higgs_linear
replace_with_higgs_linear(
model,
quantization_config=self.quantization_config,
)
model.config.quantization_config = self.quantization_config
def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
import flute.utils
from ..integrations import HiggsLinear
| 355 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_higgs.py
|
flute_workspaces = {}
for name, module in model.named_modules():
if isinstance(module, HiggsLinear):
# Every HiggsLinear needs a "workspace": a buffer for the unpacking operation.
# This buffer needs to be on the same device as the weights, but can be reused across modules otherwise.
if module.weight.device not in flute_workspaces:
flute_workspaces[module.weight.device] = flute.utils.make_workspace_streamk(
device=module.weight.device
)
module.workspace = flute_workspaces[module.weight.device]
| 355 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_higgs.py
|
# FLUTE weights are packed in a way that is optimized for a specific number of SMs (GPU streaming multiprocessors).
# If the model is loaded on a different device than the one it was saved on, we need to repack the weights.
if module.num_sms_packed.item() != get_num_sms_from_device(module.weight.device):
new_device = module.weight.device
new_num_sms = get_num_sms_from_device(new_device)
module.weight.data = flute.utils.pack(
flute.utils.unpack(
weight=module.weight.data,
scales=module.scales.data,
workspace=module.workspace,
num_bits=module.num_bits,
group_size=module.group_size,
num_sms_packed=module.num_sms_packed.item(),
).T.contiguous(),
module.num_bits,
| 355 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_higgs.py
|
module.group_size,
)
module.num_sms_packed = torch.nn.Parameter(
torch.tensor(new_num_sms, device=new_device, dtype=torch.int32),
requires_grad=False,
)
| 355 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_higgs.py
|
def update_missing_keys(self, model, missing_keys: List[str], prefix: str) -> List[str]:
from ..integrations import HiggsLinear
not_missing_keys = []
for name, module in model.named_modules():
if isinstance(module, HiggsLinear):
for missing in missing_keys:
if (
(name in missing or name in f"{prefix}.{missing}")
and not missing.endswith(".weight")
and not missing.endswith(".bias")
):
not_missing_keys.append(missing)
return [k for k in missing_keys if k not in not_missing_keys]
@property
def is_trainable(self, model: Optional["PreTrainedModel"] = None):
return False
def is_serializable(self, safe_serialization=None):
return True
| 355 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_higgs.py
|
def check_quantized_param(
self,
model: "PreTrainedModel",
param_value: "torch.Tensor",
param_name: str,
state_dict: Dict[str, Any],
**kwargs,
) -> bool:
from ..integrations import HiggsLinear
module, tensor_name = get_module_from_name(model, param_name)
if isinstance(module, HiggsLinear) and tensor_name == "weight" and param_value.dtype != torch.int16:
# Only quantize weights of HiggsLinear modules that are not already quantized
return True
else:
return False
def _dequantize(self, model):
from ..integrations import dequantize_higgs
model = dequantize_higgs(model)
return model
| 355 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/quantizer_higgs.py
|
class AutoQuantizationConfig:
"""
The Auto-HF quantization config class that takes care of automatically dispatching to the correct
quantization config given a quantization config stored in a dictionary.
"""
@classmethod
def from_dict(cls, quantization_config_dict: Dict):
quant_method = quantization_config_dict.get("quant_method", None)
# We need a special care for bnb models to make sure everything is BC ..
if quantization_config_dict.get("load_in_8bit", False) or quantization_config_dict.get("load_in_4bit", False):
suffix = "_4bit" if quantization_config_dict.get("load_in_4bit", False) else "_8bit"
quant_method = QuantizationMethod.BITS_AND_BYTES + suffix
elif quant_method is None:
raise ValueError(
"The model's quantization config from the arguments has no `quant_method` attribute. Make sure that the model has been correctly quantized"
)
| 356 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/auto.py
|
if quant_method not in AUTO_QUANTIZATION_CONFIG_MAPPING.keys():
raise ValueError(
f"Unknown quantization type, got {quant_method} - supported types are:"
f" {list(AUTO_QUANTIZER_MAPPING.keys())}"
)
target_cls = AUTO_QUANTIZATION_CONFIG_MAPPING[quant_method]
return target_cls.from_dict(quantization_config_dict)
| 356 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/auto.py
|
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
model_config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
if getattr(model_config, "quantization_config", None) is None:
raise ValueError(
f"Did not found a `quantization_config` in {pretrained_model_name_or_path}. Make sure that the model is correctly quantized."
)
quantization_config_dict = model_config.quantization_config
quantization_config = cls.from_dict(quantization_config_dict)
# Update with potential kwargs that are passed through from_pretrained.
quantization_config.update(**kwargs)
return quantization_config
| 356 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/auto.py
|
class AutoHfQuantizer:
"""
The Auto-HF quantizer class that takes care of automatically instantiating to the correct
`HfQuantizer` given the `QuantizationConfig`.
"""
@classmethod
def from_config(cls, quantization_config: Union[QuantizationConfigMixin, Dict], **kwargs):
# Convert it to a QuantizationConfig if the q_config is a dict
if isinstance(quantization_config, dict):
quantization_config = AutoQuantizationConfig.from_dict(quantization_config)
quant_method = quantization_config.quant_method
# Again, we need a special care for bnb as we have a single quantization config
# class for both 4-bit and 8-bit quantization
if quant_method == QuantizationMethod.BITS_AND_BYTES:
if quantization_config.load_in_8bit:
quant_method += "_8bit"
else:
quant_method += "_4bit"
| 357 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/auto.py
|
if quant_method not in AUTO_QUANTIZER_MAPPING.keys():
raise ValueError(
f"Unknown quantization type, got {quant_method} - supported types are:"
f" {list(AUTO_QUANTIZER_MAPPING.keys())}"
)
target_cls = AUTO_QUANTIZER_MAPPING[quant_method]
return target_cls(quantization_config, **kwargs)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
quantization_config = AutoQuantizationConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
return cls.from_config(quantization_config)
| 357 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/auto.py
|
@classmethod
def merge_quantization_configs(
cls,
quantization_config: Union[dict, QuantizationConfigMixin],
quantization_config_from_args: Optional[QuantizationConfigMixin],
):
"""
handles situations where both quantization_config from args and quantization_config from model config are present.
"""
if quantization_config_from_args is not None:
warning_msg = (
"You passed `quantization_config` or equivalent parameters to `from_pretrained` but the model you're loading"
" already has a `quantization_config` attribute. The `quantization_config` from the model will be used."
)
else:
warning_msg = ""
if isinstance(quantization_config, dict):
quantization_config = AutoQuantizationConfig.from_dict(quantization_config)
| 357 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/quantizers/auto.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.