text
stringlengths
1
1.02k
class_index
int64
0
10.8k
source
stringlengths
85
188
def reset_memory_hooks_state(self): """ Reset the `mem_rss_diff` attribute of each module (see [`~modeling_utils.ModuleUtilsMixin.add_memory_hooks`]). """ for module in self.modules(): module.mem_rss_diff = 0 module.mem_rss_post_forward = 0 module.mem_rss_pre_forward = 0 @property def device(self) -> torch.device: """ `torch.device`: The device on which the module is (assuming that all the module parameters are on the same device). """ return get_parameter_device(self) @property def dtype(self) -> torch.dtype: """ `torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype). """ return get_parameter_dtype(self) def invert_attention_mask(self, encoder_attention_mask: Tensor) -> Tensor: """ Invert an attention mask (e.g., switches 0. and 1.).
229
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
Args: encoder_attention_mask (`torch.Tensor`): An attention mask.
229
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
Returns: `torch.Tensor`: The inverted attention mask. """ if encoder_attention_mask.dim() == 3: encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * torch.finfo(self.dtype).min
229
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
return encoder_extended_attention_mask @staticmethod def create_extended_attention_mask_for_decoder(input_shape, attention_mask, device=None): if device is not None: warnings.warn( "The `device` argument is deprecated and will be removed in v5 of Transformers.", FutureWarning ) else: device = attention_mask.device batch_size, seq_length = input_shape seq_ids = torch.arange(seq_length, device=device) causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] # in case past_key_values are used we need to add a prefix ones mask to the causal mask # causal and attention masks must have same type with pytorch version < 1.3 causal_mask = causal_mask.to(attention_mask.dtype)
229
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if causal_mask.shape[1] < attention_mask.shape[1]: prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1] causal_mask = torch.cat( [ torch.ones((batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype), causal_mask, ], axis=-1, ) extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] return extended_attention_mask def get_extended_attention_mask( self, attention_mask: Tensor, input_shape: Tuple[int], device: torch.device = None, dtype: torch.float = None ) -> Tensor: """ Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
229
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
Arguments: attention_mask (`torch.Tensor`): Mask with ones indicating tokens to attend to, zeros for tokens to ignore. input_shape (`Tuple[int]`): The shape of the input to the model. Returns: `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`. """ if dtype is None: dtype = self.dtype
229
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if not (attention_mask.dim() == 2 and self.config.is_decoder): # show warning only if it won't be shown in `create_extended_attention_mask_for_decoder` if device is not None: warnings.warn( "The `device` argument is deprecated and will be removed in v5 of Transformers.", FutureWarning ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: # Provided a padding mask of dimensions [batch_size, seq_length] # - if the model is a decoder, apply a causal mask in addition to the padding mask # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
229
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if self.config.is_decoder: extended_attention_mask = ModuleUtilsMixin.create_extended_attention_mask_for_decoder( input_shape, attention_mask, device ) else: extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError( f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})" )
229
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and the dtype's smallest value for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to(dtype=dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(dtype).min return extended_attention_mask def get_head_mask( self, head_mask: Optional[Tensor], num_hidden_layers: int, is_attention_chunked: bool = False ) -> Tensor: """ Prepare the head mask if needed.
229
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
Args: head_mask (`torch.Tensor` with shape `[num_heads]` or `[num_hidden_layers x num_heads]`, *optional*): The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard). num_hidden_layers (`int`): The number of hidden layers in the model. is_attention_chunked (`bool`, *optional*, defaults to `False`): Whether or not the attentions scores are computed by chunks or not. Returns: `torch.Tensor` with shape `[num_hidden_layers x batch x num_heads x seq_length x seq_length]` or list with `[None]` for each layer. """ if head_mask is not None: head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers) if is_attention_chunked is True: head_mask = head_mask.unsqueeze(-1) else: head_mask = [None] * num_hidden_layers return head_mask
229
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers): """-> [num_hidden_layers x batch x num_heads x seq_length x seq_length]""" if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand(num_hidden_layers, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer assert head_mask.dim() == 5, f"head_mask.dim != 5, instead {head_mask.dim()}" head_mask = head_mask.to(dtype=self.dtype) # switch to float if need + fp16 compatibility return head_mask def num_parameters(self, only_trainable: bool = False, exclude_embeddings: bool = False) -> int: """ Get number of (optionally, trainable or non-embeddings) parameters in the module.
229
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
Args: only_trainable (`bool`, *optional*, defaults to `False`): Whether or not to return only the number of trainable parameters exclude_embeddings (`bool`, *optional*, defaults to `False`): Whether or not to return only the number of non-embeddings parameters Returns: `int`: The number of parameters. """ if exclude_embeddings: embedding_param_names = [ f"{name}.weight" for name, module_type in self.named_modules() if isinstance(module_type, nn.Embedding) ] total_parameters = [ parameter for name, parameter in self.named_parameters() if name not in embedding_param_names ] else: total_parameters = list(self.parameters()) total_numel = [] is_loaded_in_4bit = getattr(self, "is_loaded_in_4bit", False)
229
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if is_loaded_in_4bit: if is_bitsandbytes_available(): import bitsandbytes as bnb else: raise ValueError( "bitsandbytes is not installed but it seems that the model has been loaded in 4bit precision, something went wrong" " make sure to install bitsandbytes with `pip install bitsandbytes`. You also need a GPU. " )
229
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
for param in total_parameters: if param.requires_grad or not only_trainable: # For 4bit models, we need to multiply the number of parameters by 2 as half of the parameters are # used for the 4bit quantization (uint8 tensors are stored) if is_loaded_in_4bit and isinstance(param, bnb.nn.Params4bit): if hasattr(param, "element_size"): num_bytes = param.element_size() elif hasattr(param, "quant_storage"): num_bytes = param.quant_storage.itemsize else: num_bytes = 1 total_numel.append(param.numel() * 2 * num_bytes) else: total_numel.append(param.numel()) return sum(total_numel)
229
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
def estimate_tokens(self, input_dict: Dict[str, Union[torch.Tensor, Any]]) -> int: """ Helper function to estimate the total number of tokens from the model inputs. Args: inputs (`dict`): The model inputs. Returns: `int`: The total number of tokens. """ if not hasattr(self, "warnings_issued"): self.warnings_issued = {} if self.main_input_name in input_dict: return input_dict[self.main_input_name].numel() elif "estimate_tokens" not in self.warnings_issued: logger.warning( "Could not estimate the number of tokens of the input, floating-point operations will not be computed" ) self.warnings_issued["estimate_tokens"] = True return 0
229
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
def floating_point_ops( self, input_dict: Dict[str, Union[torch.Tensor, Any]], exclude_embeddings: bool = True ) -> int: """ Get number of (optionally, non-embeddings) floating-point operations for the forward and backward passes of a batch with this transformer model. Default approximation neglects the quadratic dependency on the number of tokens (valid if `12 * d_model << sequence_length`) as laid out in [this paper](https://arxiv.org/pdf/2001.08361.pdf) section 2.1. Should be overridden for transformers with parameter re-use e.g. Albert or Universal Transformers, or if doing long-range modeling with very high sequence lengths. Args: batch_size (`int`): The batch size for the forward pass. sequence_length (`int`): The number of tokens in each line of the batch.
229
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
exclude_embeddings (`bool`, *optional*, defaults to `True`): Whether or not to count embedding and softmax operations. Returns: `int`: The number of floating-point operations. """ return 6 * self.estimate_tokens(input_dict) * self.num_parameters(exclude_embeddings=exclude_embeddings)
229
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMixin, PeftAdapterMixin): r""" Base class for all models. [`PreTrainedModel`] takes care of storing the configuration of the models and handles methods for loading, downloading and saving models as well as a few methods common to all models to: - resize the input embeddings, - prune heads in the self-attention heads. Class attributes (overridden by derived classes): - **config_class** ([`PretrainedConfig`]) -- A subclass of [`PretrainedConfig`] to use as configuration class for this model architecture. - **load_tf_weights** (`Callable`) -- A python *method* for loading a TensorFlow checkpoint in a PyTorch model, taking as arguments:
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
- **model** ([`PreTrainedModel`]) -- An instance of the model on which to load the TensorFlow checkpoint. - **config** ([`PreTrainedConfig`]) -- An instance of the configuration associated to the model. - **path** (`str`) -- A path to the TensorFlow checkpoint. - **base_model_prefix** (`str`) -- A string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model. - **is_parallelizable** (`bool`) -- A flag indicating whether this model supports model parallelization. - **main_input_name** (`str`) -- The name of the principal input to the model (often `input_ids` for NLP models, `pixel_values` for vision models and `input_values` for speech models). """ config_class = None base_model_prefix = "" main_input_name = "input_ids" model_tags = None
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
_auto_class = None _no_split_modules = None _skip_keys_device_placement = None _keep_in_fp32_modules = None # a list of `re` patterns of `state_dict` keys that should be removed from the list of missing # keys we find (keys inside the model but not in the checkpoint) and avoid unnecessary warnings. _keys_to_ignore_on_load_missing = None # a list of `re` patterns of `state_dict` keys that should be removed from the list of # unexpected keys we find (keys inside the checkpoint but not the model) and avoid unnecessary # warnings. _keys_to_ignore_on_load_unexpected = None # a list of `state_dict` keys to ignore when saving the model (useful for keys that aren't # trained, but which are either deterministic or tied variables) _keys_to_ignore_on_save = None # a list of `state_dict` keys that are potentially tied to another key in the state_dict. _tied_weights_keys = None
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
is_parallelizable = False supports_gradient_checkpointing = False _is_stateful = False # Flash Attention 2 support _supports_flash_attn_2 = False # SDPA support _supports_sdpa = False # Flex Attention support _supports_flex_attn = False # Has support for a `Cache` instance as `past_key_values`? Does it support a `StaticCache`? _supports_cache_class = False _supports_static_cache = False # Has support for a `QuantoQuantizedCache` instance as `past_key_values` _supports_quantized_cache = False # A tensor parallel plan to be applied to the model when TP is enabled. For # top-level models, this attribute is currently defined in respective model # code. For base models, this attribute comes from # `config.base_model_tp_plan` during `post_init`. _tp_plan = None
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
@property def dummy_inputs(self) -> Dict[str, torch.Tensor]: """ `Dict[str, torch.Tensor]`: Dummy inputs to do a forward pass in the network. """ return {"input_ids": torch.tensor(DUMMY_INPUTS)} @property def framework(self) -> str: """ :str: Identifies that this is a PyTorch model. """ return "pt"
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
def __init__(self, config: PretrainedConfig, *inputs, **kwargs): super().__init__() if not isinstance(config, PretrainedConfig): raise ValueError( f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class " "`PretrainedConfig`. To create a model from a pretrained model use " f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`" ) if not getattr(config, "_attn_implementation_autoset", False): # config usually has a `torch_dtype` but we need the next line for the `no_super_init` tests dtype = config.torch_dtype if hasattr(config, "torch_dtype") else torch.get_default_dtype() config = self._autoset_attn_implementation(config, torch_dtype=dtype, check_device_map=False) self.config = config
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
# for initialization of the loss loss_type = self.__class__.__name__ if loss_type not in LOSS_MAPPING: loss_groups = f"({'|'.join(LOSS_MAPPING)})" loss_type = re.findall(loss_groups, self.__class__.__name__) if len(loss_type) > 0: loss_type = loss_type[0] else: loss_type = None self.loss_type = loss_type self.name_or_path = config.name_or_path self.warnings_issued = {} self.generation_config = GenerationConfig.from_model_config(config) if self.can_generate() else None # Overwrite the class attribute to make it an instance attribute, so models like # `InstructBlipForConditionalGeneration` can dynamically update it without modifying the class attribute # when a different component (e.g. language_model) is used. self._keep_in_fp32_modules = copy.copy(self.__class__._keep_in_fp32_modules)
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
def post_init(self): """ A method executed at the end of each Transformer model initialization, to execute code that needs the model's modules properly initialized (such as weight initialization). """ self.init_weights() self._backward_compatibility_gradient_checkpointing() # If current model is a base model, attach `base_model_tp_plan` from config if self.base_model is self: self._tp_plan = self.config.base_model_tp_plan def dequantize(self): """ Potentially dequantize the model in case it has been quantized by a quantization method that support dequantization. """ hf_quantizer = getattr(self, "hf_quantizer", None) if hf_quantizer is None: raise ValueError("You need to first quantize your model in order to dequantize it") return hf_quantizer.dequantize(self)
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
def _backward_compatibility_gradient_checkpointing(self): if self.supports_gradient_checkpointing and getattr(self.config, "gradient_checkpointing", False): self.gradient_checkpointing_enable() # Remove the attribute now that is has been consumed, so it's no saved in the config. delattr(self.config, "gradient_checkpointing") def add_model_tags(self, tags: Union[List[str], str]) -> None: r""" Add custom tags into the model that gets pushed to the Hugging Face Hub. Will not overwrite existing tags in the model. Args: tags (`Union[List[str], str]`): The desired tags to inject in the model Examples: ```python from transformers import AutoModel model = AutoModel.from_pretrained("google-bert/bert-base-cased") model.add_model_tags(["custom", "custom-bert"])
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
# Push the model to your namespace with the name "my-custom-bert". model.push_to_hub("my-custom-bert") ``` """ if isinstance(tags, str): tags = [tags] if self.model_tags is None: self.model_tags = [] for tag in tags: if tag not in self.model_tags: self.model_tags.append(tag) @classmethod def _from_config(cls, config, **kwargs): """ All context managers that the model should be initialized under go here.
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
Args: torch_dtype (`torch.dtype`, *optional*): Override the default `torch.dtype` and load the model under this dtype. """ # when we init a model from within another model (e.g. VLMs) and dispatch on FA2 # a warning is raised that dtype should be fp16. Since we never pass dtype from within # modeling code, we can try to infer it here same way as done in `from_pretrained` torch_dtype = kwargs.pop("torch_dtype", config.torch_dtype) if isinstance(torch_dtype, str): torch_dtype = getattr(torch, torch_dtype) use_flash_attention_2 = kwargs.pop("use_flash_attention_2", False) # override default dtype if needed dtype_orig = None if torch_dtype is not None: dtype_orig = cls._set_default_torch_dtype(torch_dtype) config = copy.deepcopy(config) # We do not want to modify the config inplace in _from_config.
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if config._attn_implementation_internal is not None: # In this case, the config has been created with the attn_implementation set by the user, which we # should respect. attn_implementation = config._attn_implementation_internal else: attn_implementation = None config._attn_implementation = kwargs.pop("attn_implementation", attn_implementation) if not getattr(config, "_attn_implementation_autoset", False): config = cls._autoset_attn_implementation( config, use_flash_attention_2=use_flash_attention_2, check_device_map=False, torch_dtype=torch_dtype, ) if is_deepspeed_zero3_enabled() and not _is_quantized and not _is_ds_init_called: import deepspeed
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model") # this immediately partitions the model across all gpus, to avoid the overhead in time # and memory copying it on CPU or each GPU first init_contexts = [deepspeed.zero.Init(config_dict_or_path=deepspeed_config()), set_zero3_state()] with ContextManagers(init_contexts): model = cls(config, **kwargs) else: model = cls(config, **kwargs) # restore default dtype if it was modified if dtype_orig is not None: torch.set_default_dtype(dtype_orig) return model
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
@classmethod def _autoset_attn_implementation( cls, config, use_flash_attention_2: bool = False, torch_dtype: Optional[torch.dtype] = None, device_map: Optional[Union[str, Dict[str, int]]] = None, check_device_map: bool = True, ): """ Automatically checks and dispatches to a default attention implementation. In order of priority: 1. An implementation specified in `config._attn_implementation` (due for example to the argument attn_implementation="sdpa" in from_pretrained). 2. DEPRECATED: if use_flash_attention_2 is set to `True` and `flash_attn` is available, flash attention. (`LlamaFlashAttention` for example) 3. SDPA implementation, if available and supported by the model type. (`LlamaSdpaAttention` for example) 4. The default model's implementation otherwise (`LlamaAttention` for example) . """
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
# Here we use config._attn_implementation_internal to check whether the attention implementation was explicitely set by the user. # The property `PretrainedConfig._attn_implementation` is never `None`, for backward compatibility (always fall back on "eager"). # The `hasattr` here is used as some Transformers tests for some reason do not call PretrainedConfig __init__ (e.g. test_no_super_init_config_and_model) requested_attn_implementation = None if hasattr(config, "_attn_implementation_internal") and config._attn_implementation_internal is not None: if config._attn_implementation != "flash_attention_2" and use_flash_attention_2: raise ValueError( f'Both attn_implementation="{config._attn_implementation}" and `use_flash_attention_2=True` were used when loading the model, which are not compatible.' ' We recommend to just use `attn_implementation="flash_attention_2"` when loading the model.'
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
)
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if not isinstance(config._attn_implementation, dict) and config._attn_implementation not in [ "eager" ] + list(ALL_ATTENTION_FUNCTIONS.keys()): message = f'Specified `attn_implementation="{config._attn_implementation}"` is not supported. The only possible arguments are `attn_implementation="eager"` (manual attention implementation)' if cls._supports_flash_attn_2: message += ', `"attn_implementation=flash_attention_2"` (implementation using flash attention 2)' if cls._supports_sdpa: message += ', `"attn_implementation=sdpa"` (implementation using torch.nn.functional.scaled_dot_product_attention)' if cls._supports_flex_attn: message += ( ', `"attn_implementation=flex_attention"` (implementation using torch\'s flex_attention)' ) raise ValueError(message + ".")
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
# If a config is passed with a preset attn_implementation, we skip the automatic dispatch and use the user-provided config, with hard checks that the requested attention implementation is available. requested_attn_implementation = config._attn_implementation_internal
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
# Composite models consisting of several PretrainedModels have to specify attention impl as a dict # where keys are sub-config names. But most people will specify one `str` which means that should dispatch it # for all sub-models. # Below we check if a config is composite and manually prepare a dict of attn impl if not already passed as a dict. # Later each sub-module will dispatch with its own attn impl, by calling `XXXModel._from_config(config.text_config)` # If any of sub-modules doesn't support requested attn, an error will be raised. See https://github.com/huggingface/transformers/pull/32238 for key in config.sub_configs.keys(): sub_config = getattr(config, key) curr_attn_implementation = ( requested_attn_implementation if not isinstance(requested_attn_implementation, dict) else requested_attn_implementation.get(key, None) )
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
sub_config._attn_implementation_internal = curr_attn_implementation
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if use_flash_attention_2: logger.warning_once( 'The model was loaded with use_flash_attention_2=True, which is deprecated and may be removed in a future release. Please use `attn_implementation="flash_attention_2"` instead.' ) config._attn_implementation = "flash_attention_2"
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if config._attn_implementation == "flash_attention_2": cls._check_and_enable_flash_attn_2( config, torch_dtype=torch_dtype, device_map=device_map, hard_check_only=False, check_device_map=check_device_map, ) elif requested_attn_implementation == "flex_attention": config = cls._check_and_enable_flex_attn(config, hard_check_only=True) elif requested_attn_implementation in [None, "sdpa"] and not is_torch_xla_available(): # use_flash_attention_2 takes priority over SDPA, hence SDPA treated in this elif. config = cls._check_and_enable_sdpa( config, hard_check_only=False if requested_attn_implementation is None else True, )
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if ( torch.version.hip is not None and config._attn_implementation == "sdpa" and torch.cuda.device_count() > 1 ): logger.warning_once( "Using the `SDPA` attention implementation on multi-gpu setup with ROCM may lead to performance issues due to the FA backend. Disabling it to use alternative backends." ) torch.backends.cuda.enable_flash_sdp(False) elif requested_attn_implementation in list(ALL_ATTENTION_FUNCTIONS.keys()): config._attn_implementation = requested_attn_implementation elif isinstance(requested_attn_implementation, dict): config._attn_implementation = None else: config._attn_implementation = "eager" config._attn_implementation_autoset = True return config
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
@classmethod def _set_default_torch_dtype(cls, dtype: torch.dtype) -> torch.dtype: """ Change the default dtype and return the previous one. This is needed when wanting to instantiate the model under specific dtype. Args: dtype (`torch.dtype`): a floating dtype to set to. Returns: `torch.dtype`: the original `dtype` that can be used to restore `torch.set_default_dtype(dtype)` if it was modified. If it wasn't, returns `None`. Note `set_default_dtype` currently only works with floating-point types and asserts if for example, `torch.int64` is passed. So if a non-float `dtype` is passed this functions will throw an exception. """ if not dtype.is_floating_point: raise ValueError( f"Can't instantiate {cls.__name__} model under dtype={dtype} since it is not a floating point dtype" )
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
logger.info(f"Instantiating {cls.__name__} model under default dtype {dtype}.") dtype_orig = torch.get_default_dtype() torch.set_default_dtype(dtype) return dtype_orig @property def base_model(self) -> nn.Module: """ `torch.nn.Module`: The main body of the model. """ return getattr(self, self.base_model_prefix, self) @classmethod def can_generate(cls) -> bool: """ Returns whether this model can generate sequences with `.generate()`.
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
Returns: `bool`: Whether this model can generate sequences with `.generate()`. """ # Directly inherits `GenerationMixin` -> can generate if "GenerationMixin" in str(cls.__bases__): return True # Model class overwrites `generate` (e.g. time series models) -> can generate if str(cls.__name__) in str(cls.generate): return True # The class inherits from a class that can generate (recursive check) -> can generate for base in cls.__bases__: if not hasattr(base, "can_generate"): continue if "PreTrainedModel" not in str(base) and base.can_generate(): return True # BC: Detects whether `prepare_inputs_for_generation` has been overwritten in the model. Prior to v4.45, this # was how we detected whether a model could generate. if "GenerationMixin" not in str(cls.prepare_inputs_for_generation): logger.warning_once(
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
f"{cls.__name__} has generative capabilities, as `prepare_inputs_for_generation` is explicitly " "overwritten. However, it doesn't directly inherit from `GenerationMixin`. From 👉v4.50👈 onwards, " "`PreTrainedModel` will NOT inherit from `GenerationMixin`, and this model will lose the ability " "to call `generate` and other related functions." "\n - If you're using `trust_remote_code=True`, you can get rid of this warning by loading the " "model with an auto class. See https://huggingface.co/docs/transformers/en/model_doc/auto#auto-classes" "\n - If you are the owner of the model architecture code, please modify your model class such that " "it inherits from `GenerationMixin` (after `PreTrainedModel`, otherwise you'll get an exception)." "\n - If you are not the owner of the model architecture class, please contact the model code owner "
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
"to update it." ) return True # Otherwise, can't generate return False
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
@classmethod def _check_and_enable_flash_attn_2( cls, config, torch_dtype: Optional[torch.dtype] = None, device_map: Optional[Union[str, Dict[str, int]]] = None, check_device_map: bool = True, hard_check_only: bool = False, ) -> PretrainedConfig: """ Checks the availability of Flash Attention 2 and compatibility with the current model.
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
If all checks pass and `hard_check_only` is False, the method will set the config attribute `attn_implementation` to "flash_attention_2" so that the model can initialize the correct attention module. """ if not cls._supports_flash_attn_2: raise ValueError( f"{cls.__name__} does not support Flash Attention 2.0 yet. Please request to add support where" f" the model is hosted, on its model hub page: https://huggingface.co/{config._name_or_path}/discussions/new" " or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new" ) if not is_flash_attn_2_available(): preface = "FlashAttention2 has been toggled on, but it cannot be used due to the following error:" install_message = "Please refer to the documentation of https://huggingface.co/docs/transformers/perf_infer_gpu_one#flashattention-2 to install Flash Attention 2."
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if importlib.util.find_spec("flash_attn") is None: raise ImportError(f"{preface} the package flash_attn seems to be not installed. {install_message}")
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
flash_attention_version = version.parse(importlib.metadata.version("flash_attn")) if torch.version.cuda: if flash_attention_version < version.parse("2.1.0"): raise ImportError( f"{preface} you need flash_attn package version to be greater or equal than 2.1.0. Detected version {flash_attention_version}. {install_message}" ) elif not torch.cuda.is_available(): raise ValueError( f"{preface} Flash Attention 2 is not available on CPU. Please make sure torch can access a CUDA device." ) else: raise ImportError(f"{preface} Flash Attention 2 is not available. {install_message}") elif torch.version.hip: if flash_attention_version < version.parse("2.0.4"): raise ImportError(
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
f"{preface} you need flash_attn package version to be greater or equal than 2.0.4. Make sure to have that version installed - detected version {flash_attention_version}. {install_message}" ) else: raise ImportError(f"{preface} Flash Attention 2 is not available. {install_message}")
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
_is_bettertransformer = getattr(cls, "use_bettertransformer", False) if _is_bettertransformer: raise ValueError( "Flash Attention 2 and BetterTransformer API are not compatible. Please make sure to disable BetterTransformers by doing model.reverse_bettertransformer()" )
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if torch_dtype is None: logger.warning_once( "You are attempting to use Flash Attention 2.0 without specifying a torch dtype. This might lead to unexpected behaviour" ) elif torch_dtype is not None and torch_dtype not in [torch.float16, torch.bfloat16]: logger.warning_once( "Flash Attention 2.0 only supports torch.float16 and torch.bfloat16 dtypes, but" f" the current dype in {cls.__name__} is {torch_dtype}. You should run training or inference using Automatic Mixed-Precision via the `with torch.autocast(device_type='torch_device'):` decorator," ' or load the model with the `torch_dtype` argument. Example: `model = AutoModel.from_pretrained("openai/whisper-tiny", attn_implementation="flash_attention_2", torch_dtype=torch.float16)`' )
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
# The check `torch.empty(0).device.type != "cuda"` is needed as the model may be initialized after `torch.set_default_device` has been called, # or the model may be initialized under the context manager `with torch.device("cuda"):`. if check_device_map and device_map is None and torch.empty(0).device.type != "cuda": if torch.cuda.is_available(): logger.warning_once( "You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU" " after initializing it on CPU with `model.to('cuda')`." ) else: raise ValueError( "You are attempting to use Flash Attention 2.0 with a model not initialized on GPU and with no GPU available. " "This is not supported yet. Please make sure to have access to a GPU and either initialise the model on a GPU by passing a device_map "
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
"or initialising the model on CPU and then moving it to GPU." ) elif ( check_device_map and device_map is not None and isinstance(device_map, dict) and ("cpu" in device_map.values() or "disk" in device_map.values()) ): raise ValueError( "You are attempting to use Flash Attention 2.0 with a model dispatched on CPU or disk. This is not supported. Please make sure to " "initialise the model on a GPU by passing a device_map that contains only GPU devices as keys." ) if not hard_check_only: config._attn_implementation = "flash_attention_2" return config
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
@classmethod def _check_and_enable_sdpa(cls, config, hard_check_only: bool = False) -> PretrainedConfig: """ Checks the availability of SDPA for a given model.
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
If all checks pass and `hard_check_only` is False, the method will set the config attribute `_attn_implementation` to "sdpa" so that the model can initialize the correct attention module. """ if hard_check_only: if not cls._supports_sdpa: raise ValueError( f"{cls.__name__} does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet." " Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe" ' this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation="eager"` meanwhile. Example: `model = AutoModel.from_pretrained("openai/whisper-tiny", attn_implementation="eager")`' ) if not is_torch_sdpa_available(): raise ImportError(
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
"PyTorch SDPA requirements in Transformers are not met. Please install torch>=2.1.1." )
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if not is_torch_sdpa_available() or not cls._supports_sdpa: return config _is_bettertransformer = getattr(cls, "use_bettertransformer", False) if _is_bettertransformer: return config if not hard_check_only: config._attn_implementation = "sdpa" return config @classmethod def _check_and_enable_flex_attn(cls, config, hard_check_only: bool = False) -> PretrainedConfig: """ Checks the availability of Flex Attention for a given model.
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
If all checks pass and `hard_check_only` is False, the method will set the config attribute `_attn_implementation` to "flex_attention" so that the model can initialize the correct attention module. """ if hard_check_only: if not cls._supports_flex_attn: raise ValueError( f"{cls.__name__} does not support an attention implementation through torch's flex_attention." " Please request the support for this architecture: https://github.com/huggingface/transformers/issues/34809." " If you believe this error is a bug, please open an issue in Transformers GitHub repository" ' and load your model with the argument `attn_implementation="eager"` meanwhile.' ' Example: `model = AutoModel.from_pretrained("openai/whisper-tiny", attn_implementation="eager")`' ) if not is_torch_flex_attn_available(): raise ImportError(
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
"PyTorch Flex Attention requirements in Transformers are not met. Please install torch>=2.5.0." )
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if not is_torch_flex_attn_available() or not cls._supports_flex_attn: return config if not hard_check_only: config._attn_implementation = "flex_attention" return config def enable_input_require_grads(self): """ Enables the gradients for the input embeddings. This is useful for fine-tuning adapter weights while keeping the model weights fixed. """ def make_inputs_require_grads(module, input, output): output.requires_grad_(True) self._require_grads_hook = self.get_input_embeddings().register_forward_hook(make_inputs_require_grads) def disable_input_require_grads(self): """ Removes the `_require_grads_hook`. """ self._require_grads_hook.remove() def get_input_embeddings(self) -> nn.Module: """ Returns the model's input embeddings.
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
Returns: `nn.Module`: A torch module mapping vocabulary to hidden states. """ base_model = getattr(self, self.base_model_prefix, self) if base_model is not self: return base_model.get_input_embeddings() else: raise NotImplementedError def set_input_embeddings(self, value: nn.Module): """ Set model's input embeddings. Args: value (`nn.Module`): A module mapping vocabulary to hidden states. """ base_model = getattr(self, self.base_model_prefix, self) if base_model is not self: base_model.set_input_embeddings(value) else: raise NotImplementedError def get_output_embeddings(self) -> nn.Module: """ Returns the model's output embeddings. Returns: `nn.Module`: A torch module mapping hidden states to vocabulary. """ return None # Overwrite for models with output embeddings
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
def _init_weights(self, module): """ Initialize the weights. This method should be overridden by derived class and is the only initialization method that will be called when loading a checkpoint using `from_pretrained`. Any attempt to initialize outside of this function will be useless as the torch.nn.init function are all replaced with skip. """ pass def _initialize_weights(self, module): """ Initialize the weights if they are not already initialized. """ if getattr(module, "_is_hf_initialized", False): return self._init_weights(module) module._is_hf_initialized = True def tie_weights(self): """ Tie the weights between the input embeddings and the output embeddings.
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning the weights instead. """ if getattr(self.config.get_text_config(decoder=True), "tie_word_embeddings", True): output_embeddings = self.get_output_embeddings() if output_embeddings is not None: self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if getattr(self.config, "is_encoder_decoder", False) and getattr(self.config, "tie_encoder_decoder", False): if hasattr(self, self.base_model_prefix): self = getattr(self, self.base_model_prefix) tied_weights = self._tie_encoder_decoder_weights( self.encoder, self.decoder, self.base_model_prefix, "encoder" ) # Setting a dynamic variable instead of `_tied_weights_keys` because it's a class # attributed not an instance member, therefore modifying it will modify the entire class # Leading to issues on subsequent calls by different tests or subsequent calls. self._dynamic_tied_weights_keys = tied_weights for module in self.modules(): if hasattr(module, "_tie_weights"): module._tie_weights()
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
@staticmethod def _tie_encoder_decoder_weights( encoder: nn.Module, decoder: nn.Module, base_model_prefix: str, base_encoder_name: str ): uninitialized_encoder_weights: List[str] = [] tied_weights: List[str] = [] if decoder.__class__ != encoder.__class__: logger.info( f"{decoder.__class__} and {encoder.__class__} are not equal. In this case make sure that all encoder" " weights are correctly initialized." )
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
def tie_encoder_to_decoder_recursively( decoder_pointer: nn.Module, encoder_pointer: nn.Module, module_name: str, base_encoder_name: str, uninitialized_encoder_weights: List[str], depth=0, total_decoder_name="", total_encoder_name="", ): assert isinstance(decoder_pointer, nn.Module) and isinstance( encoder_pointer, nn.Module ), f"{decoder_pointer} and {encoder_pointer} have to be of type nn.Module" if hasattr(decoder_pointer, "weight"): assert hasattr(encoder_pointer, "weight") encoder_pointer.weight = decoder_pointer.weight tied_weights.append(f"{base_encoder_name}{total_encoder_name}.weight") if hasattr(decoder_pointer, "bias"): assert hasattr(encoder_pointer, "bias") tied_weights.append(f"{base_encoder_name}{total_encoder_name}.bias")
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
encoder_pointer.bias = decoder_pointer.bias return
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
encoder_modules = encoder_pointer._modules decoder_modules = decoder_pointer._modules if len(decoder_modules) > 0: assert ( len(encoder_modules) > 0 ), f"Encoder module {encoder_pointer} does not match decoder module {decoder_pointer}"
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
all_encoder_weights = {module_name + "/" + sub_name for sub_name in encoder_modules.keys()} encoder_layer_pos = 0 for name, module in decoder_modules.items(): if name.isdigit(): encoder_name = str(int(name) + encoder_layer_pos) decoder_name = name if not isinstance(decoder_modules[decoder_name], type(encoder_modules[encoder_name])) and len( encoder_modules ) != len(decoder_modules): # this can happen if the name corresponds to the position in a list module list of layers # in this case the decoder has added a cross-attention that the encoder does not have # thus skip this step and subtract one layer pos from encoder encoder_layer_pos -= 1 continue
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
elif name not in encoder_modules: continue elif depth > 500: raise ValueError( "Max depth of recursive function `tie_encoder_to_decoder` reached. It seems that there is" " a circular dependency between two or more `nn.Modules` of your model." ) else: decoder_name = encoder_name = name tie_encoder_to_decoder_recursively( decoder_modules[decoder_name], encoder_modules[encoder_name], module_name + "/" + name, base_encoder_name, uninitialized_encoder_weights, depth=depth + 1, total_encoder_name=f"{total_encoder_name}.{encoder_name}", total_decoder_name=f"{total_decoder_name}.{decoder_name}",
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
) all_encoder_weights.remove(module_name + "/" + encoder_name)
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
uninitialized_encoder_weights += list(all_encoder_weights) # tie weights recursively tie_encoder_to_decoder_recursively( decoder, encoder, base_model_prefix, base_encoder_name, uninitialized_encoder_weights ) if len(uninitialized_encoder_weights) > 0: logger.warning( f"The following encoder weights were not tied to the decoder {uninitialized_encoder_weights}" ) return tied_weights def _tie_or_clone_weights(self, output_embeddings, input_embeddings): """Tie or clone module weights depending of whether we are using TorchScript or not""" if self.config.torchscript: output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone()) else: output_embeddings.weight = input_embeddings.weight
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if getattr(output_embeddings, "bias", None) is not None: output_embeddings.bias.data = nn.functional.pad( output_embeddings.bias.data, ( 0, output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0], ), "constant", 0, ) if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"): output_embeddings.out_features = input_embeddings.num_embeddings def _get_no_split_modules(self, device_map: str): """ Get the modules of the model that should not be spit when using device_map. We iterate through the modules to get the underlying `_no_split_modules`. Args: device_map (`str`): The device map value. Options are ["auto", "balanced", "balanced_low_0", "sequential"]
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
Returns: `List[str]`: List of modules that should not be split """ _no_split_modules = set() modules_to_check = [self] while len(modules_to_check) > 0: module = modules_to_check.pop(-1) # if the module does not appear in _no_split_modules, we also check the children if module.__class__.__name__ not in _no_split_modules: if isinstance(module, PreTrainedModel): if module._no_split_modules is None: raise ValueError( f"{module.__class__.__name__} does not support `device_map='{device_map}'`. To implement support, the model " "class needs to implement the `_no_split_modules` attribute." ) else: _no_split_modules = _no_split_modules | set(module._no_split_modules) modules_to_check += list(module.children())
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
return list(_no_split_modules)
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
def resize_token_embeddings( self, new_num_tokens: Optional[int] = None, pad_to_multiple_of: Optional[int] = None, mean_resizing: bool = True, ) -> nn.Embedding: """ Resizes input token embeddings matrix of the model if `new_num_tokens != config.vocab_size`. Takes care of tying weights embeddings afterwards if the model class has a `tie_weights()` method.
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
Arguments: new_num_tokens (`int`, *optional*): The new number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just returns a pointer to the input tokens `torch.nn.Embedding` module of the model without doing anything. pad_to_multiple_of (`int`, *optional*): If set will pad the embedding matrix to a multiple of the provided value.If `new_num_tokens` is set to `None` will just pad the embedding to a multiple of `pad_to_multiple_of`.
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. For more details about this, or help on choosing the correct value for resizing, refer to this guide: https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc mean_resizing (`bool`): Whether to initialize the added embeddings from a multivariate normal distribution that has old embeddings' mean and covariance or to initialize them with a normal distribution that has a mean of zero and std equals `config.initializer_range`.
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
Setting `mean_resizing` to `True` is useful when increasing the size of the embeddings of causal language models, where the generated tokens' probabilities won't be affected by the added embeddings because initializing the new embeddings with the old embeddings' mean will reduce the kl-divergence between the next token probability before and after adding the new embeddings. Refer to this article for more information: https://nlp.stanford.edu/~johnhew/vocab-expansion.html Return: `torch.nn.Embedding`: Pointer to the input tokens Embeddings Module of the model. """ model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing) if new_num_tokens is None and pad_to_multiple_of is None: return model_embeds
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
# Since we are basically resuing the same old embeddings with new weight values, gathering is required is_quantized = hasattr(self, "hf_quantizer") and self.hf_quantizer is not None if is_deepspeed_zero3_enabled() and not is_quantized: import deepspeed with deepspeed.zero.GatheredParameters(model_embeds.weight, modifier_rank=None): vocab_size = model_embeds.weight.shape[0] else: vocab_size = model_embeds.weight.shape[0] # Update base model and current model config. self.config.get_text_config().vocab_size = vocab_size self.vocab_size = vocab_size # Tie weights again if needed self.tie_weights() return model_embeds
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
def _resize_token_embeddings(self, new_num_tokens, pad_to_multiple_of=None, mean_resizing=True): old_embeddings = self.get_input_embeddings() new_embeddings = self._get_resized_embeddings( old_embeddings, new_num_tokens, pad_to_multiple_of, mean_resizing ) if hasattr(old_embeddings, "_hf_hook"): hook = old_embeddings._hf_hook add_hook_to_module(new_embeddings, hook) old_embeddings_requires_grad = old_embeddings.weight.requires_grad new_embeddings.requires_grad_(old_embeddings_requires_grad) self.set_input_embeddings(new_embeddings) is_quantized = hasattr(self, "hf_quantizer") and self.hf_quantizer is not None # Update new_num_tokens with the actual size of new_embeddings if pad_to_multiple_of is not None: if is_deepspeed_zero3_enabled() and not is_quantized: import deepspeed
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
with deepspeed.zero.GatheredParameters(new_embeddings.weight, modifier_rank=None): new_num_tokens = new_embeddings.weight.shape[0] else: new_num_tokens = new_embeddings.weight.shape[0]
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
# if word embeddings are not tied, make sure that lm head is resized as well if ( self.get_output_embeddings() is not None and not self.config.get_text_config(decoder=True).tie_word_embeddings ): old_lm_head = self.get_output_embeddings() if isinstance(old_lm_head, torch.nn.Embedding): new_lm_head = self._get_resized_embeddings(old_lm_head, new_num_tokens, mean_resizing=mean_resizing) else: new_lm_head = self._get_resized_lm_head(old_lm_head, new_num_tokens, mean_resizing=mean_resizing) if hasattr(old_lm_head, "_hf_hook"): hook = old_lm_head._hf_hook add_hook_to_module(new_lm_head, hook) old_lm_head_requires_grad = old_lm_head.weight.requires_grad new_lm_head.requires_grad_(old_lm_head_requires_grad) self.set_output_embeddings(new_lm_head) return self.get_input_embeddings()
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
def _get_resized_embeddings( self, old_embeddings: nn.Embedding, new_num_tokens: Optional[int] = None, pad_to_multiple_of: Optional[int] = None, mean_resizing: bool = True, ) -> nn.Embedding: """ Build a resized Embedding Module from a provided token Embedding Module. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end Args: old_embeddings (`torch.nn.Embedding`): Old embeddings to be resized. new_num_tokens (`int`, *optional*): New number of tokens in the embedding matrix.
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just returns a pointer to the input tokens `torch.nn.Embedding` module of the model without doing anything. pad_to_multiple_of (`int`, *optional*): If set will pad the embedding matrix to a multiple of the provided value. If `new_num_tokens` is set to `None` will just pad the embedding to a multiple of `pad_to_multiple_of`.
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. For more details about this, or help on choosing the correct value for resizing, refer to this guide: https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc mean_resizing (`bool`): Whether to initialize the added embeddings from a multivariate normal distribution that has old embeddings' mean and covariance or to initialize them with a normal distribution that has a mean of zero and std equals `config.initializer_range`.
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
Setting `mean_resizing` to `True` is useful when increasing the size of the embeddings of causal language models, where the generated tokens' probabilities will not be affected by the added embeddings because initializing the new embeddings with the old embeddings' mean will reduce the kl-divergence between the next token probability before and after adding the new embeddings. Refer to this article for more information: https://nlp.stanford.edu/~johnhew/vocab-expansion.html Return: `torch.nn.Embedding`: Pointer to the resized Embedding Module or the old Embedding Module if `new_num_tokens` is `None` """
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if pad_to_multiple_of is not None: if not isinstance(pad_to_multiple_of, int): raise ValueError( f"Asking to pad the embedding matrix to a multiple of `{pad_to_multiple_of}`, which is not and integer. Please make sure to pass an integer" ) if new_num_tokens is None: new_num_tokens = old_embeddings.weight.shape[0] new_num_tokens = ((new_num_tokens + pad_to_multiple_of - 1) // pad_to_multiple_of) * pad_to_multiple_of else: logger.info( "You are resizing the embedding layer without providing a `pad_to_multiple_of` parameter. This means that the new embedding" f" dimension will be {new_num_tokens}. This might induce some performance reduction as *Tensor Cores* will not be available." " For more details about this, or help on choosing the correct value for resizing, refer to this guide:"
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
" https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc" )
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
if new_num_tokens is None: return old_embeddings is_quantized = hasattr(self, "hf_quantizer") and self.hf_quantizer is not None if is_deepspeed_zero3_enabled() and not is_quantized: import deepspeed with deepspeed.zero.GatheredParameters(old_embeddings.weight, modifier_rank=None): old_num_tokens, old_embedding_dim = old_embeddings.weight.size() else: old_num_tokens, old_embedding_dim = old_embeddings.weight.size() if old_num_tokens == new_num_tokens and not is_deepspeed_zero3_enabled(): return old_embeddings if not isinstance(old_embeddings, nn.Embedding): raise TypeError( f"Old embeddings are of type {type(old_embeddings)}, which is not an instance of {nn.Embedding}. You" " should either use a different resize function or make sure that `old_embeddings` are an instance of" f" {nn.Embedding}." )
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
# Build new embeddings # When using DeepSpeed ZeRO-3, we shouldn't create new embeddings with DeepSpeed init # because the shape of the new embedding layer is used across various modeling files # as well as to update config vocab size. Shape will be 0 when using DeepSpeed init leading # to errors when training. new_embeddings = nn.Embedding( new_num_tokens, old_embedding_dim, device=old_embeddings.weight.device, dtype=old_embeddings.weight.dtype, ) if new_num_tokens > old_num_tokens and not mean_resizing: # initialize new embeddings (in particular added tokens) with a mean of 0 and std equals `config.initializer_range`. self._init_weights(new_embeddings)
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
elif new_num_tokens > old_num_tokens and mean_resizing: # initialize new embeddings (in particular added tokens). The new embeddings will be initialized # from a multivariate normal distribution that has old embeddings' mean and covariance. # as described in this article: https://nlp.stanford.edu/~johnhew/vocab-expansion.html logger.warning_once( "The new embeddings will be initialized from a multivariate normal distribution that has old embeddings' mean and covariance. " "As described in this article: https://nlp.stanford.edu/~johnhew/vocab-expansion.html. " "To disable this, use `mean_resizing=False`" ) added_num_tokens = new_num_tokens - old_num_tokens if is_deepspeed_zero3_enabled() and not is_quantized: import deepspeed
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
with deepspeed.zero.GatheredParameters([old_embeddings.weight], modifier_rank=None): self._init_added_embeddings_weights_with_mean( old_embeddings, new_embeddings, old_embedding_dim, old_num_tokens, added_num_tokens ) else: self._init_added_embeddings_weights_with_mean( old_embeddings, new_embeddings, old_embedding_dim, old_num_tokens, added_num_tokens ) # Copy token embeddings from the previous weights # numbers of tokens to copy n = min(old_num_tokens, new_num_tokens) if is_deepspeed_zero3_enabled() and not is_quantized: import deepspeed
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
params = [old_embeddings.weight, new_embeddings.weight] with deepspeed.zero.GatheredParameters(params, modifier_rank=0): new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :] else: new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :] # Replace weights in old_embeddings and return to maintain the same embedding type. # This ensures correct functionality when a Custom Embedding class is passed as input. # The input and output embedding types remain consistent. (c.f. https://github.com/huggingface/transformers/pull/31979) if is_deepspeed_zero3_enabled() and not is_quantized: import deepspeed
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
params = [old_embeddings.weight, new_embeddings.weight] with deepspeed.zero.GatheredParameters(params, modifier_rank=0): old_embeddings.weight = new_embeddings.weight old_embeddings.num_embeddings = new_embeddings.weight.data.shape[0] # If the new number of tokens is smaller than the original `padding_idx`, the `padding_idx` # will be set to `None` in the resized embeddings. if old_embeddings.padding_idx is not None and (new_num_tokens - 1) < old_embeddings.padding_idx: old_embeddings.padding_idx = None else: old_embeddings.weight.data = new_embeddings.weight.data old_embeddings.num_embeddings = new_embeddings.weight.data.shape[0] if old_embeddings.padding_idx is not None and (new_num_tokens - 1) < old_embeddings.padding_idx: old_embeddings.padding_idx = None return old_embeddings
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
def _get_resized_lm_head( self, old_lm_head: nn.Linear, new_num_tokens: Optional[int] = None, transposed: Optional[bool] = False, mean_resizing: bool = True, ) -> nn.Linear: """ Build a resized Linear Module from a provided old Linear Module. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end Args: old_lm_head (`torch.nn.Linear`): Old lm head liner layer to be resized. new_num_tokens (`int`, *optional*): New number of tokens in the linear matrix.
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just returns a pointer to the input tokens `torch.nn.Linear` module of the model without doing anything. transposed (`bool`, *optional*, defaults to `False`): Whether `old_lm_head` is transposed or not. If True `old_lm_head.size()` is `lm_head_dim, vocab_size` else `vocab_size, lm_head_dim`. mean_resizing (`bool`): Whether to initialize the added embeddings from a multivariate normal distribution that has old embeddings' mean and covariance or to initialize them with a normal distribution that has a mean of zero and std equals `config.initializer_range`.
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
Setting `mean_resizing` to `True` is useful when increasing the size of the embeddings of causal language models, where the generated tokens' probabilities will not be affected by the added embeddings because initializing the new embeddings with the old embeddings' mean will reduce the kl-divergence between the next token probability before and after adding the new embeddings. Refer to this article for more information: https://nlp.stanford.edu/~johnhew/vocab-expansion.html Return: `torch.nn.Linear`: Pointer to the resized Linear Module or the old Linear Module if `new_num_tokens` is `None` """ if new_num_tokens is None: return old_lm_head is_quantized = hasattr(self, "hf_quantizer") and self.hf_quantizer is not None if is_deepspeed_zero3_enabled() and not is_quantized: import deepspeed
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
with deepspeed.zero.GatheredParameters(old_lm_head.weight, modifier_rank=None): old_num_tokens, old_lm_head_dim = ( old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size() ) else: old_num_tokens, old_lm_head_dim = ( old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size() ) if old_num_tokens == new_num_tokens and not is_deepspeed_zero3_enabled(): return old_lm_head if not isinstance(old_lm_head, nn.Linear): raise TypeError( f"Old language model head is of type {type(old_lm_head)}, which is not an instance of {nn.Linear}. You" " should either use a different resize function or make sure that `old_lm_head` are an instance of" f" {nn.Linear}." )
230
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py