text
stringlengths
1
1.02k
class_index
int64
0
10.8k
source
stringlengths
85
188
# Build new lm head new_lm_head_shape = (old_lm_head_dim, new_num_tokens) if not transposed else (new_num_tokens, old_lm_head_dim) has_new_lm_head_bias = old_lm_head.bias is not None # When using DeepSpeed ZeRO-3, we shouldn't create new embeddings with DeepSpeed init # because the shape of the new embedding layer is used across various modeling files # as well as to update config vocab size. Shape will be 0 when using DeepSpeed init leading # to errors when training. new_lm_head = nn.Linear( *new_lm_head_shape, bias=has_new_lm_head_bias, device=old_lm_head.weight.device, dtype=old_lm_head.weight.dtype, ) if new_num_tokens > old_num_tokens and not mean_resizing: # initialize new embeddings (in particular added tokens) with a mean of 0 and std equals `config.initializer_range`. self._init_weights(new_lm_head)
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
elif new_num_tokens > old_num_tokens and mean_resizing: # initialize new lm_head weights (in particular added tokens). The new lm_head weights # will be initialized from a multivariate normal distribution that has old embeddings' mean and covariance. # as described in this article: https://nlp.stanford.edu/~johnhew/vocab-expansion.html logger.warning_once( "The new lm_head weights will be initialized from a multivariate normal distribution that has old embeddings' mean and covariance. " "As described in this article: https://nlp.stanford.edu/~johnhew/vocab-expansion.html. " "To disable this, use `mean_resizing=False`" ) added_num_tokens = new_num_tokens - old_num_tokens if is_deepspeed_zero3_enabled() and not is_quantized: import deepspeed
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
params = [old_lm_head.weight] if has_new_lm_head_bias: params += [old_lm_head.bias] with deepspeed.zero.GatheredParameters(params, modifier_rank=None): self._init_added_lm_head_weights_with_mean( old_lm_head, new_lm_head, old_lm_head_dim, old_num_tokens, added_num_tokens, transposed ) if has_new_lm_head_bias: self._init_added_lm_head_bias_with_mean(old_lm_head, new_lm_head, added_num_tokens) else: self._init_added_lm_head_weights_with_mean( old_lm_head, new_lm_head, old_lm_head_dim, old_num_tokens, added_num_tokens, transposed ) if has_new_lm_head_bias: self._init_added_lm_head_bias_with_mean(old_lm_head, new_lm_head, added_num_tokens) num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if is_deepspeed_zero3_enabled() and not is_quantized: import deepspeed params = [old_lm_head.weight, old_lm_head.bias, new_lm_head.weight, new_lm_head.bias] with deepspeed.zero.GatheredParameters(params, modifier_rank=0): self._copy_lm_head_original_to_resized( new_lm_head, old_lm_head, num_tokens_to_copy, transposed, has_new_lm_head_bias ) else: self._copy_lm_head_original_to_resized( new_lm_head, old_lm_head, num_tokens_to_copy, transposed, has_new_lm_head_bias ) return new_lm_head
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
def _init_added_embeddings_weights_with_mean( self, old_embeddings, new_embeddings, old_embedding_dim, old_num_tokens, added_num_tokens ): old_embeddings_weight = old_embeddings.weight.data.to(torch.float32) mean_embeddings = torch.mean(old_embeddings_weight, axis=0) old_centered_embeddings = old_embeddings_weight - mean_embeddings covariance = old_centered_embeddings.T @ old_centered_embeddings / old_num_tokens
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
# Check if the covariance is positive definite. eigenvalues = torch.linalg.eigvals(covariance) is_covariance_psd = bool( (covariance == covariance.T).all() and not torch.is_complex(eigenvalues) and (eigenvalues > 0).all() ) if is_covariance_psd: # If covariances is positive definite, a distribution can be created. and we can sample new weights from it. distribution = torch.distributions.multivariate_normal.MultivariateNormal( mean_embeddings, covariance_matrix=1e-9 * covariance ) new_embeddings.weight.data[-1 * added_num_tokens :, :] = distribution.sample( sample_shape=(added_num_tokens,) ).to(old_embeddings.weight.dtype) else: # Otherwise, just initialize with the mean. because distribtion will not be created. new_embeddings.weight.data[-1 * added_num_tokens :, :] = (
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
mean_embeddings[None, :].repeat(added_num_tokens, 1).to(old_embeddings.weight.dtype) )
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
def _init_added_lm_head_weights_with_mean( self, old_lm_head, new_lm_head, old_lm_head_dim, old_num_tokens, added_num_tokens, transposed=False, ): if transposed: # Transpose to the desired shape for the function. new_lm_head.weight.data = new_lm_head.weight.data.T old_lm_head.weight.data = old_lm_head.weight.data.T # The same initilization logic as Embeddings. self._init_added_embeddings_weights_with_mean( old_lm_head, new_lm_head, old_lm_head_dim, old_num_tokens, added_num_tokens ) if transposed: # Transpose again to the correct shape. new_lm_head.weight.data = new_lm_head.weight.data.T old_lm_head.weight.data = old_lm_head.weight.data.T
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
def _init_added_lm_head_bias_with_mean(self, old_lm_head, new_lm_head, added_num_tokens): bias_mean = torch.mean(old_lm_head.bias.data, axis=0, dtype=torch.float32) bias_std = torch.std(old_lm_head.bias.data, axis=0).to(torch.float32) new_lm_head.bias.data[-1 * added_num_tokens :].normal_(mean=bias_mean, std=1e-9 * bias_std) def _copy_lm_head_original_to_resized( self, new_lm_head, old_lm_head, num_tokens_to_copy, transposed, has_new_lm_head_bias ): # Copy old lm head weights to new lm head if not transposed: new_lm_head.weight.data[:num_tokens_to_copy, :] = old_lm_head.weight.data[:num_tokens_to_copy, :] else: new_lm_head.weight.data[:, :num_tokens_to_copy] = old_lm_head.weight.data[:, :num_tokens_to_copy] # Copy bias weights to new lm head if has_new_lm_head_bias: new_lm_head.bias.data[:num_tokens_to_copy] = old_lm_head.bias.data[:num_tokens_to_copy]
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
def resize_position_embeddings(self, new_num_position_embeddings: int): raise NotImplementedError( f"`resize_position_embeddings` is not implemented for {self.__class__}`. To implement it, you should " f"overwrite this method in the class {self.__class__} in `modeling_{self.__class__.__module__}.py`" ) def get_position_embeddings(self) -> Union[nn.Embedding, Tuple[nn.Embedding]]: raise NotImplementedError( f"`get_position_embeddings` is not implemented for {self.__class__}`. To implement it, you should " f"overwrite this method in the class {self.__class__} in `modeling_{self.__class__.__module__}.py`" )
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
def init_weights(self): """ If needed prunes and maybe initializes weights. If using a custom `PreTrainedModel`, you need to implement any initialization logic in `_init_weights`. """ # Prune heads if needed if self.config.pruned_heads: self.prune_heads(self.config.pruned_heads) if _init_weights: # Initialize weights self.apply(self._initialize_weights) # Tie weights should be skipped when not initializing all weights # since from_pretrained(...) calls tie weights anyways self.tie_weights() def prune_heads(self, heads_to_prune: Dict[int, List[int]]): """ Prunes heads of the base model.
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
Arguments: heads_to_prune (`Dict[int, List[int]]`): Dictionary with keys being selected layer indices (`int`) and associated values being the list of heads to prune in said layer (list of `int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2. """ # save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads for layer, heads in heads_to_prune.items(): union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads) self.config.pruned_heads[layer] = list(union_heads) # Unfortunately we have to store it as list for JSON self.base_model._prune_heads(heads_to_prune) def gradient_checkpointing_enable(self, gradient_checkpointing_kwargs=None): """ Activates gradient checkpointing for the current model.
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
Note that in other frameworks this feature can be referred to as "activation checkpointing" or "checkpoint activations". We pass the `__call__` method of the modules instead of `forward` because `__call__` attaches all the hooks of the module. https://discuss.pytorch.org/t/any-different-between-model-input-and-model-forward-input/3690/2 Args: gradient_checkpointing_kwargs (dict, *optional*): Additional keyword arguments passed along to the `torch.utils.checkpoint.checkpoint` function. """ if not self.supports_gradient_checkpointing: raise ValueError(f"{self.__class__.__name__} does not support gradient checkpointing.") if gradient_checkpointing_kwargs is None: gradient_checkpointing_kwargs = {"use_reentrant": True} gradient_checkpointing_func = functools.partial(checkpoint, **gradient_checkpointing_kwargs)
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
# For old GC format (transformers < 4.35.0) for models that live on the Hub # we will fall back to the overwritten `_set_gradient_checkpointing` method _is_using_old_format = "value" in inspect.signature(self._set_gradient_checkpointing).parameters if not _is_using_old_format: self._set_gradient_checkpointing(enable=True, gradient_checkpointing_func=gradient_checkpointing_func) else: self.apply(partial(self._set_gradient_checkpointing, value=True)) logger.warning( "You are using an old version of the checkpointing format that is deprecated (We will also silently ignore `gradient_checkpointing_kwargs` in case you passed it)." "Please update to the new format on your modeling file. To use the new format, you need to completely remove the definition of the method `_set_gradient_checkpointing` in your model." )
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if getattr(self, "_hf_peft_config_loaded", False): # When using PEFT + gradient checkpointing + Trainer we need to make sure the input has requires_grad=True # we do it also on PEFT: https://github.com/huggingface/peft/blob/85013987aa82aa1af3da1236b6902556ce3e483e/src/peft/peft_model.py#L334 # When training with PEFT, only LoRA layers will have requires grad set to True, but the output of frozen layers need to propagate # the gradients to make sure the gradient flows. self.enable_input_require_grads() def _set_gradient_checkpointing(self, enable: bool = True, gradient_checkpointing_func: Callable = checkpoint): is_gradient_checkpointing_set = False
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
# Apply it on the top-level module in case the top-level modules supports it # for example, LongT5Stack inherits from `PreTrainedModel`. if hasattr(self, "gradient_checkpointing"): self._gradient_checkpointing_func = gradient_checkpointing_func self.gradient_checkpointing = enable is_gradient_checkpointing_set = True for module in self.modules(): if hasattr(module, "gradient_checkpointing"): module._gradient_checkpointing_func = gradient_checkpointing_func module.gradient_checkpointing = enable is_gradient_checkpointing_set = True if not is_gradient_checkpointing_set: raise ValueError( f"{self.__class__.__name__} is not compatible with gradient checkpointing. Make sure all the architecture support it by setting a boolean attribute" " `gradient_checkpointing` to modules of the model that uses checkpointing." )
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
def gradient_checkpointing_disable(self): """ Deactivates gradient checkpointing for the current model.
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
Note that in other frameworks this feature can be referred to as "activation checkpointing" or "checkpoint activations". """ if self.supports_gradient_checkpointing: # For old GC format (transformers < 4.35.0) for models that live on the Hub # we will fall back to the overwritten `_set_gradient_checkpointing` methid _is_using_old_format = "value" in inspect.signature(self._set_gradient_checkpointing).parameters if not _is_using_old_format: self._set_gradient_checkpointing(enable=False) else: logger.warning( "You are using an old version of the checkpointing format that is deprecated (We will also silently ignore `gradient_checkpointing_kwargs` in case you passed it)."
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
"Please update to the new format on your modeling file. To use the new format, you need to completely remove the definition of the method `_set_gradient_checkpointing` in your model." ) self.apply(partial(self._set_gradient_checkpointing, value=False))
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if getattr(self, "_hf_peft_config_loaded", False): self.disable_input_require_grads() @property def is_gradient_checkpointing(self) -> bool: """ Whether gradient checkpointing is activated for this model or not. Note that in other frameworks this feature can be referred to as "activation checkpointing" or "checkpoint activations". """ return any(hasattr(m, "gradient_checkpointing") and m.gradient_checkpointing for m in self.modules())
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
def save_pretrained( self, save_directory: Union[str, os.PathLike], is_main_process: bool = True, state_dict: Optional[dict] = None, save_function: Callable = torch.save, push_to_hub: bool = False, max_shard_size: Union[int, str] = "5GB", safe_serialization: bool = True, variant: Optional[str] = None, token: Optional[Union[str, bool]] = None, save_peft_format: bool = True, **kwargs, ): """ Save a model and its configuration file to a directory, so that it can be re-loaded using the [`~PreTrainedModel.from_pretrained`] class method.
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
Arguments: save_directory (`str` or `os.PathLike`): Directory to which to save. Will be created if it doesn't exist. is_main_process (`bool`, *optional*, defaults to `True`): Whether the process calling this is the main process or not. Useful when in distributed training like TPUs and need to call this function on all processes. In this case, set `is_main_process=True` only on the main process to avoid race conditions. state_dict (nested dictionary of `torch.Tensor`): The state dictionary of the model to save. Will default to `self.state_dict()`, but can be used to only save parts of the model or if special precautions need to be taken when recovering the state dictionary of a model (like when using model parallelism). save_function (`Callable`):
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
The function to use to save the state dictionary. Useful on distributed training like TPUs when one need to replace `torch.save` by another method. push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with `repo_id` (will default to the name of `save_directory` in your namespace). max_shard_size (`int` or `str`, *optional*, defaults to `"5GB"`): The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). We default it to 5GB in order for models to be able to run easily on free-tier google colab instances without CPU OOM issues.
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
<Tip warning={true}> If a single weight of the model is bigger than `max_shard_size`, it will be in its own checkpoint shard which will be bigger than `max_shard_size`. </Tip>
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). variant (`str`, *optional*): If specified, weights are saved in the format pytorch_model.<variant>.bin. token (`str` or `bool`, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). save_peft_format (`bool`, *optional*, defaults to `True`): For backward compatibility with PEFT library, in case adapter weights are attached to the model, all keys of the state dict of adapters needs to be pre-pended with `base_model.model`. Advanced users can disable this behaviours by setting `save_peft_format` to `False`.
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
kwargs (`Dict[str, Any]`, *optional*): Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. """ use_auth_token = kwargs.pop("use_auth_token", None) ignore_metadata_errors = kwargs.pop("ignore_metadata_errors", False)
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token if token is not None: kwargs["token"] = token _hf_peft_config_loaded = getattr(self, "_hf_peft_config_loaded", False) hf_quantizer = getattr(self, "hf_quantizer", None) quantization_serializable = ( hf_quantizer is not None and isinstance(hf_quantizer, HfQuantizer) and hf_quantizer.is_serializable(safe_serialization=safe_serialization) )
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if hf_quantizer is not None and not _hf_peft_config_loaded and not quantization_serializable: raise ValueError( f"The model is quantized with {hf_quantizer.quantization_config.quant_method} and is not serializable - check out the warnings from" " the logger on the traceback to understand the reason why the quantized model is not serializable." ) if "save_config" in kwargs: warnings.warn( "`save_config` is deprecated and will be removed in v5 of Transformers. Use `is_main_process` instead." ) is_main_process = kwargs.pop("save_config") if safe_serialization and not is_safetensors_available(): raise ImportError("`safe_serialization` requires the `safetensors library: `pip install safetensors`.") if os.path.isfile(save_directory): logger.error(f"Provided path ({save_directory}) should be a directory, not a file") return
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
os.makedirs(save_directory, exist_ok=True) if push_to_hub: commit_message = kwargs.pop("commit_message", None) repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) repo_id = self._create_repo(repo_id, **kwargs) files_timestamps = self._get_files_timestamps(save_directory) # Only save the model itself if we are using distributed training model_to_save = unwrap_model(self) # save the string version of dtype to the config, e.g. convert torch.float32 => "float32" # we currently don't use this setting automatically, but may start to use with v5 dtype = get_parameter_dtype(model_to_save) model_to_save.config.torch_dtype = str(dtype).split(".")[1] # Attach architecture to the config model_to_save.config.architectures = [model_to_save.__class__.__name__]
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
# Unset attn implementation so it can be set to another one when loading back model_to_save.config._attn_implementation_autoset = False # If we have a custom model, we copy the file defining it in the folder and set the attributes so it can be # loaded from the Hub. if self._auto_class is not None: custom_object_save(self, save_directory, config=self.config)
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
# Save the config if is_main_process: if not _hf_peft_config_loaded: # If the model config has set attributes that should be in the generation config, move them there. misplaced_generation_parameters = model_to_save.config._get_non_default_generation_parameters() if self.can_generate() and len(misplaced_generation_parameters) > 0: warnings.warn( "Moving the following attributes in the config to the generation config: " f"{misplaced_generation_parameters}. You are seeing this warning because you've set " "generation parameters in the model config, as opposed to in the generation config.", UserWarning, ) for param_name, param_value in misplaced_generation_parameters.items(): setattr(model_to_save.generation_config, param_name, param_value)
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
setattr(model_to_save.config, param_name, None)
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
model_to_save.config.save_pretrained(save_directory) if self.can_generate(): model_to_save.generation_config.save_pretrained(save_directory) if _hf_peft_config_loaded: logger.info( "Detected adapters on the model, saving the model in the PEFT format, only adapter weights will be saved." ) state_dict = model_to_save.get_adapter_state_dict() if save_peft_format: logger.info( "To match the expected format of the PEFT library, all keys of the state dict of adapters will be pre-pended with `base_model.model`." ) peft_state_dict = {} for key, value in state_dict.items(): peft_state_dict[f"base_model.model.{key}"] = value state_dict = peft_state_dict active_adapter = self.active_adapters()
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if len(active_adapter) > 1: raise ValueError( "Multiple active adapters detected, saving multiple active adapters is not supported yet. You can save adapters separately one by one " "by iteratively calling `model.set_adapter(adapter_name)` then `model.save_pretrained(...)`" ) active_adapter = active_adapter[0] current_peft_config = self.peft_config[active_adapter] current_peft_config.save_pretrained(save_directory) # for offloaded modules module_map = {}
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
# Save the model if state_dict is None: # if any model parameters are offloaded, make module map if ( hasattr(self, "hf_device_map") and len(set(self.hf_device_map.values())) > 1 and ("cpu" in self.hf_device_map.values() or "disk" in self.hf_device_map.values()) ): warnings.warn( "Attempting to save a model with offloaded modules. Ensure that unallocated cpu memory exceeds the `shard_size` (5GB default)" ) for name, module in model_to_save.named_modules(): if name == "": continue module_state_dict = module.state_dict() for key in module_state_dict: module_map[name + f".{key}"] = module state_dict = model_to_save.state_dict()
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
# Translate state_dict from smp to hf if saving with smp >= 1.10 if IS_SAGEMAKER_MP_POST_1_10: for smp_to_hf, _ in smp.state.module_manager.translate_functions: state_dict = smp_to_hf(state_dict) # Handle the case where some state_dict keys shouldn't be saved if self._keys_to_ignore_on_save is not None: for ignore_key in self._keys_to_ignore_on_save: if ignore_key in state_dict.keys(): del state_dict[ignore_key] # Rename state_dict keys before saving to file. Do nothing unless overriden in a particular model. # (initially introduced with TimmWrapperModel to remove prefix and make checkpoints compatible with timm) state_dict = self._fix_state_dict_keys_on_save(state_dict)
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if safe_serialization: # Safetensors does not allow tensor aliasing. # We're going to remove aliases before saving ptrs = collections.defaultdict(list) for name, tensor in state_dict.items(): # Sometimes in the state_dict we have non-tensor objects. # e.g. in bitsandbytes we have some `str` objects in the state_dict if isinstance(tensor, torch.Tensor): ptrs[id_tensor_storage(tensor)].append(name) else: # In the non-tensor case, fall back to the pointer of the object itself ptrs[id(tensor)].append(name)
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
# These are all the pointers of shared tensors if hasattr(self, "hf_device_map"): # if the model has offloaded parameters, we must check using find_tied_parameters() tied_params = find_tied_parameters(self) if tied_params: tied_names = tied_params[0] shared_ptrs = { ptr: names for ptr, names in ptrs.items() if any(name in tied_names for name in names) } else: shared_ptrs = {} else: shared_ptrs = {ptr: names for ptr, names in ptrs.items() if len(names) > 1}
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
# Recursively descend to find tied weight keys _tied_weights_keys = _get_tied_weight_keys(self) error_names = [] to_delete_names = set() for names in shared_ptrs.values(): # Removing the keys which are declared as known duplicates on # load. This allows to make sure the name which is kept is consistent. if _tied_weights_keys is not None: found = 0 for name in sorted(names): matches_pattern = any(re.search(pat, name) for pat in _tied_weights_keys) if matches_pattern and name in state_dict: found += 1 if found < len(names): to_delete_names.add(name) # We are entering a place where the weights and the transformers configuration do NOT match.
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
shared_names, disjoint_names = _find_disjoint(shared_ptrs.values(), state_dict) # Those are actually tensor sharing but disjoint from each other, we can safely clone them # Reloaded won't have the same property, but it shouldn't matter in any meaningful way. for name in disjoint_names: state_dict[name] = state_dict[name].clone()
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
# When not all duplicates have been cleaned, still remove those keys, but put a clear warning. # If the link between tensors was done at runtime then `from_pretrained` will not get # the key back leading to random tensor. A proper warning will be shown # during reload (if applicable), but since the file is not necessarily compatible with # the config, better show a proper warning. shared_names, identical_names = _find_identical(shared_names, state_dict) # delete tensors that have identical storage for inames in identical_names: known = inames.intersection(to_delete_names) for name in known: del state_dict[name] unknown = inames.difference(to_delete_names) if len(unknown) > 1: error_names.append(unknown) if shared_names: error_names.append(set(shared_names))
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if len(error_names) > 0: raise RuntimeError( f"The weights trying to be saved contained shared tensors {error_names} that are mismatching the transformers base configuration. Try saving using `safe_serialization=False` or remove this tensor sharing.", ) # Shard the model if it is too big. if not _hf_peft_config_loaded: weights_name = SAFE_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME weights_name = _add_variant(weights_name, variant) else: weights_name = ADAPTER_SAFE_WEIGHTS_NAME if safe_serialization else ADAPTER_WEIGHTS_NAME
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors") state_dict_split = split_torch_state_dict_into_shards( state_dict, filename_pattern=filename_pattern, max_shard_size=max_shard_size ) # Save index if sharded index = None if state_dict_split.is_sharded: index = { "metadata": state_dict_split.metadata, "weight_map": state_dict_split.tensor_to_filename, } # Clean the folder from a previous save for filename in os.listdir(save_directory): full_filename = os.path.join(save_directory, filename) # If we have a shard file that is not going to be replaced, we delete it, but only from the main process # in distributed settings to avoid race conditions. weights_no_suffix = weights_name.replace(".bin", "").replace(".safetensors", "")
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
# make sure that file to be deleted matches format of sharded file, e.g. pytorch_model-00001-of-00005 filename_no_suffix = filename.replace(".bin", "").replace(".safetensors", "") reg = re.compile(r"(.*?)-\d{5}-of-\d{5}")
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if ( filename.startswith(weights_no_suffix) and os.path.isfile(full_filename) and filename not in state_dict_split.filename_to_tensors.keys() and is_main_process and reg.fullmatch(filename_no_suffix) is not None ): os.remove(full_filename) # Save the model filename_to_tensors = state_dict_split.filename_to_tensors.items() if module_map: filename_to_tensors = logging.tqdm(filename_to_tensors, desc="Saving checkpoint shards") for shard_file, tensors in filename_to_tensors: shard = {} for tensor in tensors: shard[tensor] = state_dict[tensor].contiguous() # delete reference, see https://github.com/huggingface/transformers/pull/34890 del state_dict[tensor]
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
# remake shard with onloaded parameters if necessary if module_map: if accelerate_version < version.parse("0.31"): raise ImportError( f"You need accelerate version to be greater or equal than 0.31 to save models with offloaded parameters. Detected version {accelerate_version}. " f"Please upgrade accelerate with `pip install -U accelerate`" ) # init state_dict for this shard shard_state_dict = {name: "" for name in shard} for module_name in shard: module = module_map[module_name] # update state dict with onloaded parameters shard_state_dict = get_state_dict_from_offload(module, module_name, shard_state_dict) # assign shard to be the completed state dict shard = shard_state_dict del shard_state_dict gc.collect()
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if safe_serialization: # At some point we will need to deal better with save_function (used for TPU and other distributed # joyfulness), but for now this enough. safe_save_file(shard, os.path.join(save_directory, shard_file), metadata={"format": "pt"}) else: save_function(shard, os.path.join(save_directory, shard_file)) del state_dict
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if index is None: path_to_weights = os.path.join(save_directory, weights_name) logger.info(f"Model weights saved in {path_to_weights}") else: save_index_file = SAFE_WEIGHTS_INDEX_NAME if safe_serialization else WEIGHTS_INDEX_NAME save_index_file = os.path.join(save_directory, _add_variant(save_index_file, variant)) # Save the index as well with open(save_index_file, "w", encoding="utf-8") as f: content = json.dumps(index, indent=2, sort_keys=True) + "\n" f.write(content) logger.info( f"The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be " f"split in {len(state_dict_split.filename_to_tensors)} checkpoint shards. You can find where each parameters has been saved in the " f"index located at {save_index_file}." )
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if push_to_hub: # Eventually create an empty model card model_card = create_and_tag_model_card( repo_id, self.model_tags, token=token, ignore_metadata_errors=ignore_metadata_errors ) # Update model card if needed: model_card.save(os.path.join(save_directory, "README.md")) self._upload_modified_files( save_directory, repo_id, files_timestamps, commit_message=commit_message, token=token, ) @wraps(PushToHubMixin.push_to_hub) def push_to_hub(self, *args, **kwargs): tags = self.model_tags if self.model_tags is not None else [] tags_kwargs = kwargs.get("tags", []) if isinstance(tags_kwargs, str): tags_kwargs = [tags_kwargs] for tag in tags_kwargs: if tag not in tags: tags.append(tag)
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if tags: kwargs["tags"] = tags return super().push_to_hub(*args, **kwargs) def get_memory_footprint(self, return_buffers=True): r""" Get the memory footprint of a model. This will return the memory footprint of the current model in bytes. Useful to benchmark the memory footprint of the current model and design some tests. Solution inspired from the PyTorch discussions: https://discuss.pytorch.org/t/gpu-memory-that-model-uses/56822/2
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
Arguments: return_buffers (`bool`, *optional*, defaults to `True`): Whether to return the size of the buffer tensors in the computation of the memory footprint. Buffers are tensors that do not require gradients and not registered as parameters. E.g. mean and std in batch norm layers. Please see: https://discuss.pytorch.org/t/what-pytorch-means-by-buffers/120266/2 """ mem = sum([param.nelement() * param.element_size() for param in self.parameters()]) if return_buffers: mem_bufs = sum([buf.nelement() * buf.element_size() for buf in self.buffers()]) mem = mem + mem_bufs return mem
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
@wraps(torch.nn.Module.cuda) def cuda(self, *args, **kwargs): if getattr(self, "quantization_method", None) == QuantizationMethod.HQQ: raise ValueError("`.cuda` is not supported for HQQ-quantized models.") # Checks if the model has been loaded in 4-bit or 8-bit with BNB if getattr(self, "quantization_method", None) == QuantizationMethod.BITS_AND_BYTES: if getattr(self, "is_loaded_in_8bit", False): raise ValueError( "Calling `cuda()` is not supported for `8-bit` quantized models. " " Please use the model as it is, since the model has already been set to the correct devices." ) elif version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.43.2"): raise ValueError( "Calling `cuda()` is not supported for `4-bit` quantized models with the installed version of bitsandbytes. "
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
f"The current device is `{self.device}`. If you intended to move the model, please install bitsandbytes >= 0.43.2." ) else: return super().cuda(*args, **kwargs)
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
@wraps(torch.nn.Module.to) def to(self, *args, **kwargs): # For BNB/GPTQ models, we prevent users from casting the model to another dtype to restrict unwanted behaviours. # the correct API should be to load the model with the desired dtype directly through `from_pretrained`. dtype_present_in_args = "dtype" in kwargs if not dtype_present_in_args: for arg in args: if isinstance(arg, torch.dtype): dtype_present_in_args = True break
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if getattr(self, "quantization_method", None) == QuantizationMethod.HQQ: raise ValueError("`.to` is not supported for HQQ-quantized models.") # Checks if the model has been loaded in 4-bit or 8-bit with BNB if getattr(self, "quantization_method", None) == QuantizationMethod.BITS_AND_BYTES: if dtype_present_in_args: raise ValueError( "You cannot cast a bitsandbytes model in a new `dtype`. Make sure to load the model using `from_pretrained` using the" " desired `dtype` by passing the correct `torch_dtype` argument." )
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if getattr(self, "is_loaded_in_8bit", False): raise ValueError( "`.to` is not supported for `8-bit` bitsandbytes models. Please use the model as it is, since the" " model has already been set to the correct devices and casted to the correct `dtype`." ) elif version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.43.2"): raise ValueError( "Calling `to()` is not supported for `4-bit` quantized models with the installed version of bitsandbytes. " f"The current device is `{self.device}`. If you intended to move the model, please install bitsandbytes >= 0.43.2." ) elif getattr(self, "quantization_method", None) == QuantizationMethod.GPTQ: if dtype_present_in_args: raise ValueError(
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
"You cannot cast a GPTQ model in a new `dtype`. Make sure to load the model using `from_pretrained` using the desired" " `dtype` by passing the correct `torch_dtype` argument." ) return super().to(*args, **kwargs)
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
def half(self, *args): # Checks if the model is quantized if getattr(self, "is_quantized", False): raise ValueError( "`.half()` is not supported for quantized model. Please use the model as it is, since the" " model has already been casted to the correct `dtype`." ) else: return super().half(*args) def float(self, *args): # Checks if the model is quantized if getattr(self, "is_quantized", False): raise ValueError( "`.float()` is not supported for quantized model. Please use the model as it is, since the" " model has already been casted to the correct `dtype`." ) else: return super().float(*args)
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
@classmethod def from_pretrained( cls: Type[SpecificPreTrainedModelType], pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, config: Optional[Union[PretrainedConfig, str, os.PathLike]] = None, cache_dir: Optional[Union[str, os.PathLike]] = None, ignore_mismatched_sizes: bool = False, force_download: bool = False, local_files_only: bool = False, token: Optional[Union[str, bool]] = None, revision: str = "main", use_safetensors: Optional[bool] = None, weights_only: bool = True, **kwargs, ) -> SpecificPreTrainedModelType: r""" Instantiate a pretrained pytorch model from a pre-trained model configuration. The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train the model, you should first set it back in training mode with `model.train()`.
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task. The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those weights are discarded. If model weights are the same precision as the base model (and is a supported model), weights will be lazily loaded in using the `meta` device and brought into memory once an input is passed through that layer regardless of `low_cpu_mem_usage`. Parameters: pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): Can be either:
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In this case, `from_tf` should be set to `True` and a configuration object should be provided as `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. - A path or url to a model folder containing a *flax checkpoint file* in *.msgpack* format (e.g, `./flax_model/` containing `flax_model.msgpack`). In this case, `from_flax` should be set to `True`.
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
- `None` if you are both providing the configuration and state dictionary (resp. with keyword arguments `config` and `state_dict`). model_args (sequence of positional arguments, *optional*): All remaining positional arguments will be passed to the underlying model's `__init__` method. config (`Union[PretrainedConfig, str, os.PathLike]`, *optional*): Can be either:
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
- an instance of a class derived from [`PretrainedConfig`], - a string or path valid as input to [`~PretrainedConfig.from_pretrained`]. Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when: - The model is a model provided by the library (loaded with the *model id* string of a pretrained model). - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the save directory. - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a configuration JSON file named *config.json* is found in the directory. state_dict (`Dict[str, torch.Tensor]`, *optional*): A state dictionary to use instead of a state dictionary loaded from saved weights file.
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using [`~PreTrainedModel.save_pretrained`] and [`~PreTrainedModel.from_pretrained`] is not a simpler option. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. from_tf (`bool`, *optional*, defaults to `False`): Load the model weights from a TensorFlow checkpoint save file (see docstring of `pretrained_model_name_or_path` argument). from_flax (`bool`, *optional*, defaults to `False`): Load the model weights from a Flax checkpoint save file (see docstring of `pretrained_model_name_or_path` argument).
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`): Whether or not to raise an error if some of the weights from the checkpoint do not have the same size as the weights of the model (if for instance, you are instantiating a model with 10 labels from a checkpoint with 3 labels). force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download: Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v5 of Transformers. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
output_loading_info(`bool`, *optional*, defaults to `False`): Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only(`bool`, *optional*, defaults to `False`): Whether or not to only look at local files (i.e., do not try to download the model). token (`str` or `bool`, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git.
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
<Tip> To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>"`. </Tip> mirror (`str`, *optional*): Mirror source to accelerate downloads in China. If you are from China and have an accessibility problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety. Please refer to the mirror site for more information. _fast_init(`bool`, *optional*, defaults to `True`): Whether or not to disable fast initialization. <Tip warning={true}> One should only disable *_fast_init* to ensure backwards compatibility with `transformers.__version__ < 4.6.0` for seeded model initialization. This argument will be removed at the next major version. See [pull request 11471](https://github.com/huggingface/transformers/pull/11471) for more information.
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
</Tip> attn_implementation (`str`, *optional*): The attention implementation to use in the model (if relevant). Can be any of `"eager"` (manual implementation of the attention), `"sdpa"` (using [`F.scaled_dot_product_attention`](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention.html)), or `"flash_attention_2"` (using [Dao-AILab/flash-attention](https://github.com/Dao-AILab/flash-attention)). By default, if available, SDPA will be used for torch>=2.1.1. The default is otherwise the manual `"eager"` implementation. > Parameters for big model inference
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
low_cpu_mem_usage(`bool`, *optional*): Tries not to use more than 1x model size in CPU memory (including peak memory) while loading the model. Generally should be combined with a `device_map` (such as `"auto"`) for best results. This is an experimental feature and a subject to change at any moment. </Tip> If the model weights are in the same precision as the model loaded in, `low_cpu_mem_usage` (without `device_map`) is redundant and will not provide any benefit in regards to CPU memory usage. However, this should still be enabled if you are passing in a `device_map`. </Tip> torch_dtype (`str` or `torch.dtype`, *optional*): Override the default `torch.dtype` and load the model under a specific `dtype`. The different options are:
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
1. `torch.float16` or `torch.bfloat16` or `torch.float`: load in a specified `dtype`, ignoring the model's `config.torch_dtype` if one exists. If not specified - the model will get loaded in `torch.float` (fp32). 2. `"auto"` - A `torch_dtype` entry in the `config.json` file of the model will be attempted to be used. If this entry isn't found then next check the `dtype` of the first weight in the checkpoint that's of a floating point type and use that as `dtype`. This will load the model using the `dtype` it was saved in at the end of the training. It can't be used as an indicator of how the model was trained. Since it could be trained in one of half precision dtypes, but saved in fp32. 3. A string that is a valid `torch.dtype`. E.g. "float32" loads the model in `torch.float32`, "float16" loads in `torch.float16` etc. <Tip>
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
For some models the `dtype` they were trained in is unknown - you may try to check the model's paper or reach out to the authors and ask them to add this information to the model's card and to insert the `torch_dtype` entry in `config.json` on the hub. </Tip> device_map (`str` or `Dict[str, Union[int, str, torch.device]]` or `int` or `torch.device`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. If we only pass the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank like `1`) on which the model will be allocated, the device map will map the entire model to this device. Passing `device_map = 0` means put the whole model on GPU 0.
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. offload_folder (`str` or `os.PathLike`, *optional*): If the `device_map` contains any value `"disk"`, the folder where we will offload weights. offload_state_dict (`bool`, *optional*): If `True`, will temporarily offload the CPU state dict to the hard drive to avoid getting out of CPU RAM if the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
`True` when there is some disk offload. offload_buffers (`bool`, *optional*): Whether or not to offload the buffers with the model parameters. quantization_config (`Union[QuantizationConfigMixin,Dict]`, *optional*): A dictionary of configuration parameters or a QuantizationConfigMixin object for quantization (e.g bitsandbytes, gptq). There may be other quantization-related kwargs, including `load_in_4bit` and `load_in_8bit`, which are parsed by QuantizationConfigParser. Supported only for bitsandbytes quantizations and not preferred. consider inserting all such arguments into quantization_config instead. subfolder (`str`, *optional*, defaults to `""`): In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here. variant (`str`, *optional*):
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
If specified load weights from `variant` filename, *e.g.* pytorch_model.<variant>.bin. `variant` is ignored when using `from_tf` or `from_flax`. use_safetensors (`bool`, *optional*, defaults to `None`): Whether or not to use `safetensors` checkpoints. Defaults to `None`. If not specified and `safetensors` is not installed, it will be set to `False`.
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
weights_only (`bool`, *optional*, defaults to `True`): Indicates whether unpickler should be restricted to loading only tensors, primitive types, dictionaries and any types added via torch.serialization.add_safe_globals(). When set to False, we can load wrapper tensor subclass weights. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., `output_attentions=True`). Behaves differently depending on whether a `config` is provided or automatically loaded:
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
- If a configuration is provided with `config`, `**kwargs` will be directly passed to the underlying model's `__init__` method (we assume all relevant updates to the configuration have already been done) - If a configuration is not provided, `kwargs` will be first passed to the configuration class initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that corresponds to a configuration attribute will be used to override said attribute with the supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's `__init__` function. <Tip> Activate the special ["offline-mode"](https://huggingface.co/transformers/installation.html#offline-mode) to use this method in a firewalled environment. </Tip>
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
Examples: ```python >>> from transformers import BertConfig, BertModel
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
>>> # Download model and configuration from huggingface.co and cache. >>> model = BertModel.from_pretrained("google-bert/bert-base-uncased") >>> # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable). >>> model = BertModel.from_pretrained("./test/saved_model/") >>> # Update configuration during loading. >>> model = BertModel.from_pretrained("google-bert/bert-base-uncased", output_attentions=True) >>> assert model.config.output_attentions == True >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable). >>> config = BertConfig.from_json_file("./tf_model/my_tf_model_config.json") >>> model = BertModel.from_pretrained("./tf_model/my_tf_checkpoint.ckpt.index", from_tf=True, config=config) >>> # Loading from a Flax checkpoint file instead of a PyTorch model (slower)
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
>>> model = BertModel.from_pretrained("google-bert/bert-base-uncased", from_flax=True) ```
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
* `low_cpu_mem_usage` algorithm: This is an experimental function that loads the model using ~1x model size CPU memory Here is how it works: 1. save which state_dict keys we have 2. drop state_dict before the model is created, since the latter takes 1x model size CPU memory 3. after the model has been instantiated switch to the meta device all params/buffers that are going to be replaced from the loaded state_dict 4. load state_dict 2nd time 5. replace the params/buffers from the state_dict Currently, it can't handle deepspeed ZeRO stage 3 and ignores loading errors
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
""" state_dict = kwargs.pop("state_dict", None) from_tf = kwargs.pop("from_tf", False) from_flax = kwargs.pop("from_flax", False) resume_download = kwargs.pop("resume_download", None) proxies = kwargs.pop("proxies", None) output_loading_info = kwargs.pop("output_loading_info", False) use_auth_token = kwargs.pop("use_auth_token", None) trust_remote_code = kwargs.pop("trust_remote_code", None) _ = kwargs.pop("mirror", None) from_pipeline = kwargs.pop("_from_pipeline", None) from_auto_class = kwargs.pop("_from_auto", False) _fast_init = kwargs.pop("_fast_init", True) torch_dtype = kwargs.pop("torch_dtype", None) low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", None) device_map = kwargs.pop("device_map", None) max_memory = kwargs.pop("max_memory", None) offload_folder = kwargs.pop("offload_folder", None)
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
offload_state_dict = kwargs.pop("offload_state_dict", False) offload_buffers = kwargs.pop("offload_buffers", False) load_in_8bit = kwargs.pop("load_in_8bit", False) load_in_4bit = kwargs.pop("load_in_4bit", False) quantization_config = kwargs.pop("quantization_config", None) subfolder = kwargs.pop("subfolder", "") commit_hash = kwargs.pop("_commit_hash", None) variant = kwargs.pop("variant", None) adapter_kwargs = kwargs.pop("adapter_kwargs", {}) adapter_name = kwargs.pop("adapter_name", "default") use_flash_attention_2 = kwargs.pop("use_flash_attention_2", False) generation_config = kwargs.pop("generation_config", None)
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
gguf_file = kwargs.pop("gguf_file", None) # Cache path to the GGUF file gguf_path = None tp_plan = kwargs.pop("tp_plan", None) if tp_plan is not None and tp_plan != "auto": # TODO: we can relax this check when we support taking tp_plan from a json file, for example. raise ValueError(f"tp_plan supports 'auto' only for now but got {tp_plan}.") if is_fsdp_enabled(): low_cpu_mem_usage = True if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if token is not None and adapter_kwargs is not None and "token" not in adapter_kwargs: adapter_kwargs["token"] = token if use_safetensors is None and not is_safetensors_available(): use_safetensors = False if trust_remote_code is True: logger.warning( "The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is" " ignored." ) if gguf_file is not None and not is_accelerate_available(): raise ValueError("accelerate is required when loading a GGUF file `pip install accelerate`.")
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if commit_hash is None: if not isinstance(config, PretrainedConfig): # We make a call to the config file first (which may be absent) to get the commit hash as soon as possible resolved_config_file = cached_file( pretrained_model_name_or_path, CONFIG_NAME, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, _raise_exceptions_for_gated_repo=False, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False, ) commit_hash = extract_commit_hash(resolved_config_file, commit_hash) else:
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
commit_hash = getattr(config, "_commit_hash", None)
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if is_peft_available(): _adapter_model_path = adapter_kwargs.pop("_adapter_model_path", None) if _adapter_model_path is None: _adapter_model_path = find_adapter_config_file( pretrained_model_name_or_path, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, _commit_hash=commit_hash, **adapter_kwargs, ) if _adapter_model_path is not None and os.path.isfile(_adapter_model_path): with open(_adapter_model_path, "r", encoding="utf-8") as f: _adapter_model_path = pretrained_model_name_or_path pretrained_model_name_or_path = json.load(f)["base_model_name_or_path"] else: _adapter_model_path = None
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
# change device_map into a map if we passed an int, a str or a torch.device if isinstance(device_map, torch.device): device_map = {"": device_map} elif isinstance(device_map, str) and device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: try: device_map = {"": torch.device(device_map)} except RuntimeError: raise ValueError( "When passing device_map as a string, the value needs to be a device name (e.g. cpu, cuda:0) or " f"'auto', 'balanced', 'balanced_low_0', 'sequential' but found {device_map}." ) elif isinstance(device_map, int): if device_map < 0: raise ValueError( "You can't pass device_map as a negative int. If you want to put the model on the cpu, pass device_map = 'cpu' " ) else: device_map = {"": device_map}
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if device_map is not None: if low_cpu_mem_usage is None: low_cpu_mem_usage = True elif not low_cpu_mem_usage: raise ValueError("Passing along a `device_map` requires `low_cpu_mem_usage=True`") if low_cpu_mem_usage: if is_deepspeed_zero3_enabled(): raise ValueError( "DeepSpeed Zero-3 is not compatible with `low_cpu_mem_usage=True` or with passing a `device_map`." ) elif not is_accelerate_available(): raise ImportError( f"Using `low_cpu_mem_usage=True` or a `device_map` requires Accelerate: `pip install 'accelerate>={ACCELERATE_MIN_VERSION}'`" )
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
# handling bnb config from kwargs, remove after `load_in_{4/8}bit` deprecation. if load_in_4bit or load_in_8bit: if quantization_config is not None: raise ValueError( "You can't pass `load_in_4bit`or `load_in_8bit` as a kwarg when passing " "`quantization_config` argument at the same time." )
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
# preparing BitsAndBytesConfig from kwargs config_dict = {k: v for k, v in kwargs.items() if k in inspect.signature(BitsAndBytesConfig).parameters} config_dict = {**config_dict, "load_in_4bit": load_in_4bit, "load_in_8bit": load_in_8bit} quantization_config, kwargs = BitsAndBytesConfig.from_dict( config_dict=config_dict, return_unused_kwargs=True, **kwargs ) logger.warning( "The `load_in_4bit` and `load_in_8bit` arguments are deprecated and will be removed in the future versions. " "Please, pass a `BitsAndBytesConfig` object in `quantization_config` argument instead." ) from_pt = not (from_tf | from_flax) user_agent = {"file_type": "model", "framework": "pytorch", "from_auto_class": from_auto_class} if from_pipeline is not None: user_agent["using_pipeline"] = from_pipeline
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if is_offline_mode() and not local_files_only: logger.info("Offline mode: forcing local_files_only=True") local_files_only = True
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
# Load config if we don't provide a configuration if not isinstance(config, PretrainedConfig): config_path = config if config is not None else pretrained_model_name_or_path config, model_kwargs = cls.config_class.from_pretrained( config_path, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, _from_auto=from_auto_class, _from_pipeline=from_pipeline, **kwargs, ) else: # In case one passes a config to `from_pretrained` + "attn_implementation" # override the `_attn_implementation` attribute to `attn_implementation` of the kwargs
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
# Please see: https://github.com/huggingface/transformers/issues/28038
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
# Overwrite `config._attn_implementation` by the one from the kwargs --> in auto-factory # we pop attn_implementation from the kwargs but this handles the case where users # passes manually the config to `from_pretrained`. config = copy.deepcopy(config) kwarg_attn_imp = kwargs.pop("attn_implementation", None) if kwarg_attn_imp is not None: config._attn_implementation = kwarg_attn_imp model_kwargs = kwargs pre_quantized = getattr(config, "quantization_config", None) is not None if pre_quantized or quantization_config is not None: if pre_quantized: config.quantization_config = AutoHfQuantizer.merge_quantization_configs( config.quantization_config, quantization_config ) else: config.quantization_config = quantization_config
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
hf_quantizer = AutoHfQuantizer.from_config( config.quantization_config, pre_quantized=pre_quantized, ) else: hf_quantizer = None if hf_quantizer is not None: hf_quantizer.validate_environment( torch_dtype=torch_dtype, from_tf=from_tf, from_flax=from_flax, device_map=device_map, weights_only=weights_only, ) torch_dtype = hf_quantizer.update_torch_dtype(torch_dtype) device_map = hf_quantizer.update_device_map(device_map) # In order to ensure popular quantization methods are supported. Can be disable with `disable_telemetry` user_agent["quant"] = hf_quantizer.quantization_config.quant_method.value
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
# Force-set to `True` for more mem efficiency if low_cpu_mem_usage is None: low_cpu_mem_usage = True logger.warning("`low_cpu_mem_usage` was None, now default to True since model is quantized.") is_quantized = hf_quantizer is not None # This variable will flag if we're loading a sharded checkpoint. In this case the archive file is just the # index of the files. is_sharded = False sharded_metadata = None # Load model loading_info = None # Keep in fp32 modules keep_in_fp32_modules = None use_keep_in_fp32_modules = False if gguf_file is not None and hf_quantizer is not None: raise ValueError( "You cannot combine Quantization and loading a model from a GGUF file, try again by making sure you did not passed a `quantization_config` or that you did not load a quantized model from the Hub." )
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if pretrained_model_name_or_path is not None and gguf_file is None: pretrained_model_name_or_path = str(pretrained_model_name_or_path) is_local = os.path.isdir(pretrained_model_name_or_path) if is_local: if from_tf and os.path.isfile( os.path.join(pretrained_model_name_or_path, subfolder, TF_WEIGHTS_NAME + ".index") ): # Load from a TF 1.0 checkpoint in priority if from_tf archive_file = os.path.join(pretrained_model_name_or_path, subfolder, TF_WEIGHTS_NAME + ".index") elif from_tf and os.path.isfile( os.path.join(pretrained_model_name_or_path, subfolder, TF2_WEIGHTS_NAME) ): # Load from a TF 2.0 checkpoint in priority if from_tf archive_file = os.path.join(pretrained_model_name_or_path, subfolder, TF2_WEIGHTS_NAME) elif from_flax and os.path.isfile(
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME) ): # Load from a Flax checkpoint in priority if from_flax archive_file = os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME) elif use_safetensors is not False and os.path.isfile( os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_NAME, variant)) ): # Load from a safetensors checkpoint archive_file = os.path.join( pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_NAME, variant) ) elif use_safetensors is not False and os.path.isfile( os.path.join( pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant) ) ):
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
# Load from a sharded safetensors checkpoint archive_file = os.path.join( pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant) ) is_sharded = True elif not use_safetensors and os.path.isfile( os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_NAME, variant)) ): # Load from a PyTorch checkpoint archive_file = os.path.join( pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_NAME, variant) ) elif not use_safetensors and os.path.isfile( os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_INDEX_NAME, variant)) ): # Load from a sharded PyTorch checkpoint archive_file = os.path.join(
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py